@@ -59,7 +59,7 @@ constexpr size_t kTensorDimensionLimit = 16;
5959// torch.ops.executorch.prim.add.int(iteration_index, 1, iteration_index)
6060// done_bool = torch.ops.executorch.prim.eq.int(iteration_index,
6161// sym_size, done_bool) # Emitter inserts a instruction here, if
62- // done_bool == False jump to selcect_copy op # if not continue. return
62+ // done_bool == False jump to select_copy op # if not continue. return
6363// add_tensor
6464//
6565// The output of each iteration (copy_from) is copied into the copy_to tensor at
@@ -79,12 +79,24 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
7979 auto copy_from = (*stack[1 ]).toTensor ();
8080 auto index = (*stack[2 ]).toInt ();
8181
82+ ET_KERNEL_CHECK_MSG (
83+ context,
84+ index >= 0 ,
85+ InvalidArgument,
86+ /* void */ ,
87+ " Expected index to be non-negative." );
88+
8289 // Number of bytes we need to copy over from copy_from tensor.
8390 size_t size_copy_from = (copy_from.element_size ()) * (copy_from.numel ());
8491
85- ET_CHECK_MSG (
92+ ET_KERNEL_CHECK_MSG (
93+ context,
8694 (copy_to.sizes ().size () - copy_from.sizes ().size ()) == 1 ,
87- " Ranks of copy_to and copy_from tensor should only differ by 1." );
95+ InvalidArgument,
96+ /* void */ ,
97+ " Ranks of copy_to %zu and copy_from tensor %zu should only differ by 1." ,
98+ copy_to.sizes ().size (),
99+ copy_from.sizes ().size ());
88100
89101 // Here we calculate the size of the out_tensor after copy_from has
90102 // been copied to it. This will be passed onto the resize call.
@@ -93,8 +105,11 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
93105 // If we're copying past the first index then the shape of
94106 // copy_from and copy_to without the leading dimension should be
95107 // the same. i.e. copy_to.size[1:] == copy_from.size[:].
96- ET_CHECK_MSG (
108+ ET_KERNEL_CHECK_MSG (
109+ context,
97110 copy_to.sizes ()[i + 1 ] == copy_from.sizes ()[i],
111+ InvalidArgument,
112+ /* void */ ,
98113 " Mismatch in shape between copy_to and copy_from tensors" );
99114 expected_output_size[i + 1 ] = copy_from.sizes ()[i];
100115 }
@@ -105,11 +120,22 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
105120 Error err =
106121 resize_tensor (copy_to, {expected_output_size, copy_to.sizes ().size ()});
107122 ET_CHECK (err == Error::Ok);
108- ET_CHECK_MSG (
123+ ET_KERNEL_CHECK_MSG (
124+ context,
109125 data_ptr == copy_to.const_data_ptr (),
126+ InvalidState,
127+ /* void */ ,
110128 " Data ptr of copy_to tensor changed after resize which isn't allowed for static/upper-bounded tensors" );
111129 }
112130
131+ // After potential resize, verify that index is within bounds.
132+ ET_KERNEL_CHECK_MSG (
133+ context,
134+ index < copy_to.sizes ()[0 ],
135+ InvalidArgument,
136+ /* void */ ,
137+ " Index out of bounds" );
138+
113139 auto copy_to_ptr = copy_to.const_data_ptr ();
114140 auto copy_from_ptr = copy_from.const_data_ptr ();
115141
@@ -118,12 +144,22 @@ void et_copy_index(KernelRuntimeContext& context, Span<EValue*> stack) {
118144 // copy_from into the copy_to tensor.
119145
120146 // Check that the destination has enough space for the copy.
147+ ET_KERNEL_CHECK_MSG (
148+ context,
149+ size_copy_from == 0 ||
150+ static_cast <size_t >(index) <= SIZE_MAX / size_copy_from,
151+ InvalidArgument,
152+ /* void */ ,
153+ " Offset multiplication ." );
121154 size_t offset = index * size_copy_from;
122155 size_t copy_to_size = copy_to.element_size () * copy_to.numel ();
123- ET_CHECK_MSG (
124- offset + size_copy_from <= copy_to_size,
125- " Buffer overflow: copy_to tensor is smaller than copy_from tensor." );
126-
156+ ET_KERNEL_CHECK_MSG (
157+ context,
158+ (offset <= SIZE_MAX - size_copy_from) &&
159+ (offset + size_copy_from <= copy_to_size),
160+ InvalidArgument,
161+ /* void */ ,
162+ " Buffer overflow; offset overflow or copy_to tensor is smaller than copy_from tensor." );
127163 memcpy (
128164 // NOLINTNEXTLINE(performance-no-int-to-ptr)
129165 (void *)((uintptr_t )copy_to_ptr + offset),
0 commit comments