Skip to content

Commit 95907e6

Browse files
committed
Further reduce atol for model comparison, move python 3.11 + torch 2.2 -> python 3.12 + torch 2.4.1
1 parent fde6719 commit 95907e6

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

.github/workflows/tests.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,11 @@ jobs:
1616
strategy:
1717
matrix:
1818
os: [ubuntu-latest]
19-
python: ['3.10', '3.11']
20-
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.1.0', vision: '0.16.0'}]
19+
python: ['3.10', '3.12']
20+
torch: [{base: '1.13.0', vision: '0.14.0'}, {base: '2.4.1', vision: '0.19.1'}]
2121
testmarker: ['-k "not test_models"', '-m base', '-m cfg', '-m torchscript', '-m features', '-m fxforward', '-m fxbackward']
2222
exclude:
23-
- python: '3.11'
23+
- python: '3.12'
2424
torch: {base: '1.13.0', vision: '0.14.0'}
2525
runs-on: ${{ matrix.os }}
2626

tests/test_models.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -146,18 +146,18 @@ def test_model_inference(model_name, batch_size):
146146
rand_output = model(rand_tensors['input'])
147147
rand_features = model.forward_features(rand_tensors['input'])
148148
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
149-
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-5)
150-
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-5)
151-
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-5)
149+
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4)
150+
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4)
151+
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
152152

153153
def _test_owl(owl_input):
154154
owl_output = model(owl_input)
155155
owl_features = model.forward_features(owl_input)
156156
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
157157
assert owl_output.softmax(1).argmax(1) == 24 # owl
158-
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-5)
159-
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-5)
160-
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-5)
158+
assert torch.allclose(owl_output, owl_tensors['output'], rtol=1e-3, atol=1e-4)
159+
assert torch.allclose(owl_features, owl_tensors['features'], rtol=1e-3, atol=1e-4)
160+
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=1e-3, atol=1e-4)
161161

162162
_test_owl(owl_tensors['input']) # test with original pp owl tensor
163163
_test_owl(pp(test_owl).unsqueeze(0)) # re-process from original jpg

0 commit comments

Comments
 (0)