@@ -188,12 +188,11 @@ static void init_tensor_causal(ggml_tensor * tensor, float min = -1.0f, float ma
188188 std::mt19937 gen (rd ());
189189 std::uniform_real_distribution<float > dis (min, max);
190190
191- for (int64_t i0 = 0 ; i0 < tensor->ne [0 ]; i0++) {
192- for (int64_t i1 = 0 ; i1 < tensor->ne [1 ]; i1++) {
193- for (int64_t i2 = 0 ; i2 < tensor->ne [2 ]; i2++) {
194- for (int64_t i3 = 0 ; i3 < tensor->ne [3 ]; i3++) {
195- int64_t idx = i0 * tensor->nb [0 ] / sizeof (float ) + i1 * tensor->nb [1 ] / sizeof (float ) +
196- i2 * tensor->nb [2 ] / sizeof (float ) + i3 * tensor->nb [3 ] / sizeof (float );
191+ for (int64_t i3 = 0 ; i3 < tensor->ne [3 ]; i3++) {
192+ for (int64_t i2 = 0 ; i2 < tensor->ne [2 ]; i2++) {
193+ for (int64_t i1 = 0 ; i2 < tensor->ne [1 ]; i1++) {
194+ for (int64_t i0 = 0 ; i0 < tensor->ne [0 ]; i0++) {
195+ int64_t idx = (i0 * tensor->nb [0 ] + i1 * tensor->nb [1 ] + i2 * tensor->nb [2 ] + i3 * tensor->nb [3 ]) / sizeof (float );
197196 if (i0 <= i1) {
198197 data_f32[idx] = dis (gen);
199198 } else {
@@ -4785,7 +4784,6 @@ struct test_argsort : public test_case {
47854784 }
47864785};
47874786
4788- // GGML_OP_TOPK_MOE
47894787struct test_topk_moe : public test_case {
47904788 const std::array<int64_t , 4 > ne;
47914789 const int n_expert_used;
@@ -4843,7 +4841,6 @@ struct test_topk_moe: public test_case {
48434841 }
48444842};
48454843
4846- // GGML_MOE_EXPERT_REDUCE
48474844struct test_moe_expert_reduce : public test_case {
48484845 const int64_t n_embd;
48494846 const int64_t n_tokens;
@@ -5349,7 +5346,7 @@ struct test_pad : public test_case {
53495346 }
53505347};
53515348
5352- // GGML_OP_EXT
5349+ // GGML_OP_PAD (with extension)
53535350struct test_pad_ext : public test_case {
53545351 const ggml_type type;
53555352 const std::array<int64_t , 4 > ne_a;
@@ -5797,49 +5794,53 @@ struct test_opt_step_sgd : public test_case {
57975794 }
57985795};
57995796
5800- // GGML_OP_ADD
5801- // GGML_OP_SUB
5802- // GGML_OP_DIV
5803- // GGML_OP_MUL
5804- struct test_op_arith : public test_case {
5797+ // GGML_OP_CUMSUM
5798+ struct test_cumsum : public test_case {
58055799 const ggml_type type;
58065800 const std::array<int64_t , 4 > ne;
5807- const ggml_op op;
58085801
5809- std::string vars () override { return VARS_TO_STR3 (type, ne, op ); }
5802+ std::string vars () override { return VARS_TO_STR2 (type, ne); }
58105803
5811- test_op_arith (ggml_op op, ggml_type type = GGML_TYPE_F32,
5804+ test_cumsum ( ggml_type type = GGML_TYPE_F32,
58125805 std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
5813- : type(type), ne(ne), op(op) {
5814- GGML_ASSERT (op == GGML_OP_ADD || op == GGML_OP_SUB || op == GGML_OP_DIV || op == GGML_OP_MUL);
5815- }
5806+ : type(type), ne(ne) {}
58165807
5817- ggml_tensor * build_graph (ggml_context * ctx) override {
5808+ ggml_tensor * build_graph (ggml_context * ctx) override {
58185809 ggml_tensor * a = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
58195810 ggml_set_param (a);
58205811 ggml_set_name (a, " a" );
58215812
5822- ggml_tensor * b = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
5823- ggml_set_name (b, " b" );
5813+ ggml_tensor * out = ggml_cumsum (ctx, a);
58245814
5825- ggml_tensor * out;
5815+ ggml_set_name (out, " out" ) ;
58265816
5827- switch (op) {
5828- case GGML_OP_ADD:
5829- out = ggml_add (ctx, a, b);
5830- break ;
5831- case GGML_OP_SUB:
5832- out = ggml_sub (ctx, a, b);
5833- break ;
5834- case GGML_OP_DIV:
5835- out = ggml_div (ctx, a, b);
5836- break ;
5837- case GGML_OP_MUL:
5838- out = ggml_mul (ctx, a, b);
5839- break ;
5840- default :
5841- GGML_ABORT (" This test only supports ADD, SUB, DIV and MUL" );
5817+ return out;
5818+ }
5819+
5820+ void initialize_tensors (ggml_context * ctx) override {
5821+ for (ggml_tensor * t = ggml_get_first_tensor (ctx); t != NULL ; t = ggml_get_next_tensor (ctx, t)) {
5822+ init_tensor_uniform (t, -1 .0f , 1 .0f );
58425823 }
5824+ }
5825+ };
5826+
5827+ // GGML_OP_EXPM1
5828+ struct test_expm1 : public test_case {
5829+ const ggml_type type;
5830+ const std::array<int64_t , 4 > ne;
5831+
5832+ std::string vars () override { return VARS_TO_STR2 (type, ne); }
5833+
5834+ test_expm1 (ggml_type type = GGML_TYPE_F32,
5835+ std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
5836+ : type(type), ne(ne) {}
5837+
5838+ ggml_tensor * build_graph (ggml_context * ctx) override {
5839+ ggml_tensor * a = ggml_new_tensor_4d (ctx, type, ne[0 ], ne[1 ], ne[2 ], ne[3 ]);
5840+ ggml_set_param (a);
5841+ ggml_set_name (a, " a" );
5842+
5843+ ggml_tensor * out = ggml_expm1 (ctx, a);
58435844
58445845 ggml_set_name (out, " out" );
58455846
@@ -5848,20 +5849,19 @@ struct test_op_arith : public test_case {
58485849
58495850 void initialize_tensors (ggml_context * ctx) override {
58505851 for (ggml_tensor * t = ggml_get_first_tensor (ctx); t != NULL ; t = ggml_get_next_tensor (ctx, t)) {
5851- init_tensor_uniform (t, 0 . 1f , 1 .0f ); // no zeroes because div might complain
5852+ init_tensor_uniform (t, - 1 . 0f , 1 .0f );
58525853 }
58535854 }
5854-
58555855};
58565856
5857- // GGML_OP_CUMSUM
5858- struct test_cumsum : public test_case {
5857+ // GGML_OP_SOFTPLUS
5858+ struct test_softplus : public test_case {
58595859 const ggml_type type;
58605860 const std::array<int64_t , 4 > ne;
58615861
58625862 std::string vars () override { return VARS_TO_STR2 (type, ne); }
58635863
5864- test_cumsum (ggml_type type = GGML_TYPE_F32,
5864+ test_softplus (ggml_type type = GGML_TYPE_F32,
58655865 std::array<int64_t , 4 > ne = { 10 , 5 , 4 , 3 })
58665866 : type(type), ne(ne) {}
58675867
@@ -5870,7 +5870,7 @@ struct test_cumsum : public test_case {
58705870 ggml_set_param (a);
58715871 ggml_set_name (a, " a" );
58725872
5873- ggml_tensor * out = ggml_cumsum (ctx, a);
5873+ ggml_tensor * out = ggml_softplus (ctx, a);
58745874
58755875 ggml_set_name (out, " out" );
58765876
@@ -7256,6 +7256,8 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
72567256 test_cases.emplace_back (new test_ceil (type));
72577257 test_cases.emplace_back (new test_round (type));
72587258 test_cases.emplace_back (new test_trunc (type));
7259+ test_cases.emplace_back (new test_expm1 (type));
7260+ test_cases.emplace_back (new test_softplus (type));
72597261 test_cases.emplace_back (new test_sqr (type, {7 , 1 , 5 , 3 }));
72607262 test_cases.emplace_back (new test_sqrt (type, {7 , 1 , 5 , 3 }));
72617263 test_cases.emplace_back (new test_log (type, {7 , 1 , 5 , 3 }));
@@ -7269,12 +7271,6 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_eval() {
72697271 test_cases.emplace_back (new test_trunc (type, {7 , 1 , 5 , 3 }));
72707272 }
72717273
7272- // basic arithmetic, have to do them manually now that fusion is not supported
7273- test_cases.emplace_back (new test_op_arith (GGML_OP_ADD, GGML_TYPE_F32));
7274- test_cases.emplace_back (new test_op_arith (GGML_OP_SUB, GGML_TYPE_F32));
7275- test_cases.emplace_back (new test_op_arith (GGML_OP_DIV, GGML_TYPE_F32));
7276- test_cases.emplace_back (new test_op_arith (GGML_OP_MUL, GGML_TYPE_F32));
7277-
72787274 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 1 , 1 }, 5 ));
72797275 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 3 , 1 }, 5 ));
72807276 test_cases.emplace_back (new test_diag_mask_inf (GGML_TYPE_F32, {10 , 10 , 3 , 2 }, 5 ));
0 commit comments