Skip to content

Commit 89f2817

Browse files
kiyaevdzakhar
authored andcommitted
Replace typo of bias_scale_expected from int to long long for avoiding of oveflow 32*32 multiplication
1 parent 48bf697 commit 89f2817

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

lib/src/private/src/mli_check.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,11 @@ mli_status mli_chk_bias_scale_asym(const mli_tensor * in, const mli_tensor * wei
127127
const int scale_in = (int)in->el_params.asym.scale.i32;
128128
const int out_shift = mli_prv_calc_shift(in, weights, bias);
129129
for (int idx = 0; idx < num_scale_vals; idx++) {
130-
int bias_scale_expected = scale_in * w_scales[idx];
130+
long long bias_scale_expected = scale_in * w_scales[idx];
131131
bias_scale_expected = (out_shift > 0)
132132
? bias_scale_expected >> out_shift
133133
: bias_scale_expected << out_shift;
134-
const int scales_diff = bias_scale_expected - b_scales[idx];
134+
const long long scales_diff = bias_scale_expected - b_scales[idx];
135135
// Check that diff is about the rounding error
136136
if (MLI_CHECK(scales_diff <= 1 && scales_diff >= -1, "Bias scale must be the multiplication of input and weights scales for correct calculations in current quanization scheme"))
137137
return MLI_STATUS_INCOMPATEBLE_TENSORS;

0 commit comments

Comments
 (0)