@@ -188,12 +188,11 @@ static void init_tensor_causal(ggml_tensor * tensor, float min = -1.0f, float ma
188188 std::mt19937 gen (rd ());
189189 std::uniform_real_distribution<float > dis (min, max);
190190
191- for (int64_t i0 = 0 ; i0 < tensor->ne [0 ]; i0++) {
192- for (int64_t i1 = 0 ; i1 < tensor->ne [1 ]; i1++) {
193- for (int64_t i2 = 0 ; i2 < tensor->ne [2 ]; i2++) {
194- for (int64_t i3 = 0 ; i3 < tensor->ne [3 ]; i3++) {
195- int64_t idx = i0 * tensor->nb [0 ] / sizeof (float ) + i1 * tensor->nb [1 ] / sizeof (float ) +
196- i2 * tensor->nb [2 ] / sizeof (float ) + i3 * tensor->nb [3 ] / sizeof (float );
191+ for (int64_t i3 = 0 ; i3 < tensor->ne [3 ]; i3++) {
192+ for (int64_t i2 = 0 ; i2 < tensor->ne [2 ]; i2++) {
193+ for (int64_t i1 = 0 ; i2 < tensor->ne [1 ]; i1++) {
194+ for (int64_t i0 = 0 ; i0 < tensor->ne [0 ]; i0++) {
195+ int64_t idx = (i0 * tensor->nb [0 ] + i1 * tensor->nb [1 ] + i2 * tensor->nb [2 ] + i3 * tensor->nb [3 ]) / sizeof (float );
197196 if (i0 <= i1) {
198197 data_f32[idx] = dis (gen);
199198 } else {
@@ -4858,7 +4857,6 @@ struct test_argsort : public test_case {
48584857 }
48594858};
48604859
4861- // GGML_OP_TOPK_MOE
48624860struct test_topk_moe : public test_case {
48634861 const std::array<int64_t , 4 > ne;
48644862 const int n_expert_used;
@@ -5369,7 +5367,7 @@ struct test_pad : public test_case {
53695367 }
53705368};
53715369
5372- // GGML_OP_EXT
5370+ // GGML_OP_PAD (with extension)
53735371struct test_pad_ext : public test_case {
53745372 const ggml_type type;
53755373 const std::array<int64_t , 4 > ne_a;
@@ -5817,49 +5815,53 @@ struct test_opt_step_sgd : public test_case {
58175815 }
58185816};
58195817
5820- // GGML_OP_ADD
5821- // GGML_OP_SUB
5822- // GGML_OP_DIV
5823- // GGML_OP_MUL
5824- struct test_op_arith : public test_case {
5818+ // GGML_OP_CUMSUM
5819+ struct test_cumsum : public test_case {
58255820 const ggml_type type;
58265821 const std::array<int64_t , 4 > ne;
5827- const ggml_op op;
58285822
5829- std::string vars () override { return VARS_TO_STR3 (type, ne, op ); }
5823+ std::string vars () override { return VARS_TO_STR2 (type, ne); }
58305824
5831- test_op_arith (ggml_op op, ggml_type type = GGML_TYPE_F32,
5825+ test_cumsum ( ggml_type type = GGML_TYPE_F32,
58325826 std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
5833- : type(type), ne(ne), op(op) {
5834- GGML_ASSERT (op == GGML_OP_ADD || op == GGML_OP_SUB || op == GGML_OP_DIV || op == GGML_OP_MUL);
5835- }
5827+ : type(type), ne(ne) {}
58365828
5837- ggml_tensor * build_graph (ggml_context * ctx) override {
5829+ ggml_tensor * build_graph (ggml_context * ctx) override {
58385830 ggml_tensor * a = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
58395831 ggml_set_param (a);
58405832 ggml_set_name (a, " a" );
58415833
5842- ggml_tensor * b = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
5843- ggml_set_name (b, " b" );
5834+ ggml_tensor * out = ggml_cumsum (ctx, a);
58445835
5845- ggml_tensor * out;
5836+ ggml_set_name (out, " out" ) ;
58465837
5847- switch (op) {
5848- case GGML_OP_ADD:
5849- out = ggml_add (ctx, a, b);
5850- break ;
5851- case GGML_OP_SUB:
5852- out = ggml_sub (ctx, a, b);
5853- break ;
5854- case GGML_OP_DIV:
5855- out = ggml_div (ctx, a, b);
5856- break ;
5857- case GGML_OP_MUL:
5858- out = ggml_mul (ctx, a, b);
5859- break ;
5860- default :
5861- GGML_ABORT (" This test only supports ADD, SUB, DIV and MUL" );
5838+ return out;
5839+ }
5840+
5841+ void initialize_tensors (ggml_context * ctx) override {
5842+ for (ggml_tensor * t = ggml_get_first_tensor (ctx); t != NULL ; t = ggml_get_next_tensor (ctx, t)) {
5843+ init_tensor_uniform (t, -1 .0f , 1 .0f );
58625844 }
5845+ }
5846+ };
5847+
5848+ // GGML_OP_EXPM1
5849+ struct test_expm1 : public test_case {
5850+ const ggml_type type;
5851+ const std::array<int64_t , 4 > ne;
5852+
5853+ std::string vars () override { return VARS_TO_STR2 (type, ne); }
5854+
5855+ test_expm1 (ggml_type type = GGML_TYPE_F32,
5856+ std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
5857+ : type(type), ne(ne) {}
5858+
5859+ ggml_tensor * build_graph (ggml_context * ctx) override {
5860+ ggml_tensor * a = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
5861+ ggml_set_param (a);
5862+ ggml_set_name (a, " a" );
5863+
5864+ ggml_tensor * out = ggml_expm1 (ctx, a);
58635865
58645866 ggml_set_name (out, " out" );
58655867
@@ -5868,20 +5870,19 @@ struct test_op_arith : public test_case {
58685870
58695871 void initialize_tensors (ggml_context * ctx) override {
58705872 for (ggml_tensor * t = ggml_get_first_tensor (ctx); t != NULL ; t = ggml_get_next_tensor (ctx, t)) {
5871- init_tensor_uniform (t, 0 . 1f , 1 .0f ); // no zeroes because div might complain
5873+ init_tensor_uniform (t, - 1 . 0f , 1 .0f );
58725874 }
58735875 }
5874-
58755876};
58765877
5877- // GGML_OP_CUMSUM
5878- struct test_cumsum : public test_case {
5878+ // GGML_OP_SOFTPLUS
5879+ struct test_softplus : public test_case {
58795880 const ggml_type type;
58805881 const std::array<int64_t , 4 > ne;
58815882
58825883 std::string vars () override { return VARS_TO_STR2 (type, ne); }
58835884
5884- test_cumsum (ggml_type type = GGML_TYPE_F32,
5885+ test_softplus (ggml_type type = GGML_TYPE_F32,
58855886 std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
58865887 : type(type), ne(ne) {}
58875888
@@ -5890,7 +5891,7 @@ struct test_cumsum : public test_case {
58905891 ggml_set_param (a);
58915892 ggml_set_name (a, " a" );
58925893
5893- ggml_tensor * out = ggml_cumsum (ctx, a);
5894+ ggml_tensor * out = ggml_softplus (ctx, a);
58945895
58955896 ggml_set_name (out, " out" );
58965897
@@ -7293,6 +7294,8 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
72937294 test_cases.emplace_back (new test_ceil (type));
72947295 test_cases.emplace_back (new test_round (type));
72957296 test_cases.emplace_back (new test_trunc (type));
7297+ test_cases.emplace_back (new test_expm1 (type));
7298+ test_cases.emplace_back (new test_softplus (type));
72967299 test_cases.emplace_back (new test_sqr (type, {7 , 1 , 5 , 3 }));
72977300 test_cases.emplace_back (new test_sqrt (type, {7 , 1 , 5 , 3 }));
72987301 test_cases.emplace_back (new test_log (type, {7 , 1 , 5 , 3 }));
@@ -7306,12 +7309,6 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
73067309 test_cases.emplace_back (new test_trunc (type, {7 , 1 , 5 , 3 }));
73077310 }
73087311
7309- // basic arithmetic, have to do them manually now that fusion is not supported
7310- test_cases.emplace_back (new test_op_arith (GGML_OP_ADD, GGML_TYPE_F32));
7311- test_cases.emplace_back (new test_op_arith (GGML_OP_SUB, GGML_TYPE_F32));
7312- test_cases.emplace_back (new test_op_arith (GGML_OP_DIV, GGML_TYPE_F32));
7313- test_cases.emplace_back (new test_op_arith (GGML_OP_MUL, GGML_TYPE_F32));
7314-
73157312 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 1 , 1 }, 5 ));
73167313 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 3 , 1 }, 5 ));
73177314 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 3 , 2 }, 5 ));
0 commit comments