Skip to content

Commit d362526

Browse files
remove fit comments. small typo of shape dimensions
Signed-off-by: Alfie Roddan <228966941+alfieroddanintel@users.noreply.github.com>
1 parent 91aac30 commit d362526

File tree

2 files changed

+6
-9
lines changed

2 files changed

+6
-9
lines changed

src/anomalib/models/image/anomaly_dino/lightning_model.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -187,13 +187,13 @@ def __init__(
187187
@classmethod
188188
def configure_pre_processor(
189189
cls,
190-
image_size: tuple[int, int] | None = None,
190+
image_size: tuple[int, int] | int | None = None,
191191
) -> PreProcessor:
192192
"""Configure the default pre-processor for AnomalyDINO.
193193
194194
Args:
195-
image_size (tuple[int, int] | None, optional): Target size for resizing
196-
input images. Defaults to ``(252, 252)``.
195+
image_size (tuple[int, int] | int | None, optional): Target size for resizing
196+
input images. Defaults to ``(252, 252)``. Note if int, keeps aspect ratio and resizes shortest side.
197197
198198
Returns:
199199
PreProcessor: Configured pre-processor instance.
@@ -243,11 +243,8 @@ def fit(self) -> None:
243243
"""Optional fitting step.
244244
245245
This method is a placeholder for potential post-training operations
246-
such as coreset subsampling or feature normalization.
247-
248-
Note:
249-
The current implementation is a no-op, as AnomalyDINO typically
250-
performs inference directly after feature extraction.
246+
such as coreset subsampling or feature normalization. The model
247+
handles fitting (if-needed).
251248
"""
252249
self.model.fit()
253250

src/anomalib/models/image/anomaly_dino/torch_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def forward(self, input_tensor: torch.Tensor) -> torch.Tensor | InferenceBatch:
224224
input_tensor = input_tensor.type(self.memory_bank.dtype)
225225

226226
# work out sizing
227-
b, _, w, h = input_tensor.shape
227+
b, _, h, w = input_tensor.shape
228228
cropped_width = w - w % self.feature_encoder.patch_size
229229
cropped_height = h - h % self.feature_encoder.patch_size
230230
grid_size = (

0 commit comments

Comments
 (0)