|
4 | 4 | import random
|
5 | 5 | import shutil
|
6 | 6 | import tempfile
|
7 |
| -from distutils.util import strtobool |
8 | 7 |
|
9 | 8 | import numpy as np
|
10 |
| -import pytest |
11 | 9 | import torch
|
12 | 10 | from PIL import Image
|
13 | 11 | from torchvision import io
|
14 | 12 |
|
15 | 13 | import __main__ # noqa: 401
|
16 | 14 |
|
17 | 15 |
|
18 |
| -def get_bool_env_var(name, *, exist_ok=False, default=False): |
19 |
| - value = os.getenv(name) |
20 |
| - if value is None: |
21 |
| - return default |
22 |
| - if exist_ok: |
23 |
| - return True |
24 |
| - return bool(strtobool(value)) |
25 |
| - |
26 |
| - |
27 |
| -IN_CIRCLE_CI = get_bool_env_var("CIRCLECI") |
28 |
| -IN_RE_WORKER = get_bool_env_var("INSIDE_RE_WORKER", exist_ok=True) |
29 |
| -IN_FBCODE = get_bool_env_var("IN_FBCODE_TORCHVISION") |
| 16 | +IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == "true" |
| 17 | +IN_RE_WORKER = os.environ.get("INSIDE_RE_WORKER") is not None |
| 18 | +IN_FBCODE = os.environ.get("IN_FBCODE_TORCHVISION") == "1" |
30 | 19 | CUDA_NOT_AVAILABLE_MSG = "CUDA device not available"
|
31 | 20 | CIRCLECI_GPU_NO_CUDA_MSG = "We're in a CircleCI GPU machine, and this test doesn't need cuda."
|
32 | 21 |
|
@@ -213,7 +202,3 @@ def _test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwargs):
|
213 | 202 | # scriptable function test
|
214 | 203 | s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs)
|
215 | 204 | torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol)
|
216 |
| - |
217 |
| - |
218 |
| -def run_on_env_var(name, *, skip_reason=None, exist_ok=False, default=False): |
219 |
| - return pytest.mark.skipif(not get_bool_env_var(name, exist_ok=exist_ok, default=default), reason=skip_reason) |
0 commit comments