diff --git a/test/test_models.py b/test/test_models.py index 5ab0640a70e..bc83874ee4f 100644 --- a/test/test_models.py +++ b/test/test_models.py @@ -614,7 +614,7 @@ def test_classification_model(model_fn, dev): # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests x = torch.rand(input_shape).to(device=dev) out = model(x) - _assert_expected(out.cpu(), model_name, prec=0.1) + _assert_expected(out.cpu(), model_name, prec=1e-3) assert out.shape[-1] == num_classes _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) _check_fx_compatible(model, x, eager_out=out) @@ -841,7 +841,7 @@ def test_video_model(model_fn, dev): # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests x = torch.rand(input_shape).to(device=dev) out = model(x) - _assert_expected(out.cpu(), model_name, prec=0.1) + _assert_expected(out.cpu(), model_name, prec=1e-5) assert out.shape[-1] == num_classes _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) _check_fx_compatible(model, x, eager_out=out) @@ -884,7 +884,7 @@ def test_quantized_classification_model(model_fn): out = model(x) if model_name not in quantized_flaky_models: - _assert_expected(out, model_name + "_quantized", prec=0.1) + _assert_expected(out, model_name + "_quantized", prec=2e-2) assert out.shape[-1] == 5 _check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) _check_fx_compatible(model, x, eager_out=out)