@@ -146,18 +146,18 @@ def test_model_inference(model_name, batch_size):
146146 rand_output = model (rand_tensors ['input' ])
147147 rand_features = model .forward_features (rand_tensors ['input' ])
148148 rand_pre_logits = model .forward_head (rand_features , pre_logits = True )
149- assert torch .allclose (rand_output , rand_tensors ['output' ], rtol = 1e-3 , atol = 1e-5 )
150- assert torch .allclose (rand_features , rand_tensors ['features' ], rtol = 1e-3 , atol = 1e-5 )
151- assert torch .allclose (rand_pre_logits , rand_tensors ['pre_logits' ], rtol = 1e-3 , atol = 1e-5 )
149+ assert torch .allclose (rand_output , rand_tensors ['output' ], rtol = 1e-3 , atol = 1e-4 )
150+ assert torch .allclose (rand_features , rand_tensors ['features' ], rtol = 1e-3 , atol = 1e-4 )
151+ assert torch .allclose (rand_pre_logits , rand_tensors ['pre_logits' ], rtol = 1e-3 , atol = 1e-4 )
152152
153153 def _test_owl (owl_input ):
154154 owl_output = model (owl_input )
155155 owl_features = model .forward_features (owl_input )
156156 owl_pre_logits = model .forward_head (owl_features .clone (), pre_logits = True )
157157 assert owl_output .softmax (1 ).argmax (1 ) == 24 # owl
158- assert torch .allclose (owl_output , owl_tensors ['output' ], rtol = 1e-3 , atol = 1e-5 )
159- assert torch .allclose (owl_features , owl_tensors ['features' ], rtol = 1e-3 , atol = 1e-5 )
160- assert torch .allclose (owl_pre_logits , owl_tensors ['pre_logits' ], rtol = 1e-3 , atol = 1e-5 )
158+ assert torch .allclose (owl_output , owl_tensors ['output' ], rtol = 1e-3 , atol = 1e-4 )
159+ assert torch .allclose (owl_features , owl_tensors ['features' ], rtol = 1e-3 , atol = 1e-4 )
160+ assert torch .allclose (owl_pre_logits , owl_tensors ['pre_logits' ], rtol = 1e-3 , atol = 1e-4 )
161161
162162 _test_owl (owl_tensors ['input' ]) # test with original pp owl tensor
163163 _test_owl (pp (test_owl ).unsqueeze (0 )) # re-process from original jpg
0 commit comments