Skip to content

Commit 36fe9fa

Browse files
Device selection with pytest. (#1713)
* The device to execute each test is set in stone. * Update CONTRIBUTING.md * Removed device placement logging and added comment.
1 parent 8b1b4cd commit 36fe9fa

File tree

11 files changed

+171
-178
lines changed

11 files changed

+171
-178
lines changed

CONTRIBUTING.md

Lines changed: 36 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -297,6 +297,10 @@ Run selected tests:
297297
python3 -m pytest path/to/file/or/directory/to/test
298298
```
299299

300+
Run the gpu only tests with `pytest -m needs_gpu ./tensorflow_addons`.
301+
Run the cpu only tests with `pytest -m 'not needs_gpu' ./tensorflow_addons`.
302+
303+
300304
#### Testing with Bazel
301305

302306
Testing with Bazel is still supported but not recommended unless you have prior experience
@@ -411,22 +415,46 @@ on Tensors, `if` or `for` for example. Or with `TensorArray`. In short, when the
411415
conversion to graph is not trivial. No need to use it on all
412416
your tests. Having fast tests is important.
413417

414-
#### cpu_and_gpu
418+
#### Selecting the devices to run the test
419+
420+
By default, each test is wrapped behind the scenes with a
421+
```python
422+
with tf.device("CPU:0"):
423+
...
424+
```
415425

416-
Will run your test function twice, once with `with tf.device("/device:CPU:0")` and
417-
once with `with tf.device("/device:GPU:0")`. If a GPU is not present on the system,
418-
the second test is skipped. To use it:
426+
This is automatic. But it's also possible to ask the test runner to run
427+
the test twice, on CPU and on GPU, or only on GPU. Here is how to do it.
419428

420429
```python
421-
@pytest.mark.usefixtures("cpu_and_gpu")
430+
import pytest
431+
432+
@pytest.mark.with_device(["cpu", "gpu"])
422433
def test_something():
423-
assert ...== ...
434+
# the code here will run twice, once on gpu, once on cpu.
435+
...
436+
437+
@pytest.mark.with_device(["gpu"])
438+
def test_something_else():
439+
# This test will be only run on gpu.
440+
# The test runner will call with tf.device("GPU:0") behind the scenes.
441+
...
442+
443+
@pytest.mark.with_device(["cpu"])
444+
def test_something_more():
445+
# Don't do that, this is the default behavior.
446+
...
424447
```
425448

449+
Note that if a gpu is not detected on the system, the test will be
450+
skipped and not marked as failed. Only the first gpu of the system is used,
451+
even when running pytest in multiprocessing mode. (`-n` argument).
452+
Beware of the out of cuda memory errors if the number of pytest workers is too high.
453+
426454
##### When to use it?
427455

428-
When you test custom CUDA code. We can expect existing TensorFlow ops to behave the same
429-
on CPU and GPU.
456+
When you test custom CUDA code or float16 ops.
457+
We can expect other existing TensorFlow ops to behave the same on CPU and GPU.
430458

431459
#### data_format
432460

tensorflow_addons/activations/tests/gelu_test.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ def test_gelu(dtype):
3636
test_utils.assert_allclose_according_to_type(gelu(x, False), expected_result)
3737

3838

39+
@pytest.mark.with_device(["cpu", "gpu"])
3940
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
4041
@pytest.mark.parametrize("approximate", [True, False])
4142
def test_same_as_py_func(dtype, approximate):

tensorflow_addons/conftest.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1-
from tensorflow_addons.utils.test_utils import maybe_run_functions_eagerly # noqa: F401
2-
from tensorflow_addons.utils.test_utils import cpu_and_gpu # noqa: F401
3-
from tensorflow_addons.utils.test_utils import data_format # noqa: F401
4-
from tensorflow_addons.utils.test_utils import set_seeds # noqa: F401
5-
from tensorflow_addons.utils.test_utils import pytest_addoption # noqa: F401
6-
from tensorflow_addons.utils.test_utils import set_global_variables # noqa: F401
1+
from tensorflow_addons.utils.test_utils import ( # noqa: F401
2+
maybe_run_functions_eagerly,
3+
data_format,
4+
set_seeds,
5+
pytest_addoption,
6+
set_global_variables,
7+
pytest_configure,
8+
_device_placement,
9+
pytest_generate_tests,
10+
)
711

812
# fixtures present in this file will be available
913
# when running tests and can be referenced with strings

tensorflow_addons/image/tests/resampler_ops_test.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def _make_warp(batch_size, warp_height, warp_width, dtype):
9292
return warp.astype(dtype)
9393

9494

95-
@pytest.mark.usefixtures("cpu_and_gpu")
95+
@pytest.mark.with_device(["cpu", "gpu"])
9696
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
9797
def test_op_forward_pass(dtype):
9898
np.random.seed(0)
@@ -182,7 +182,7 @@ def test_op_errors():
182182
resampler_ops.resampler(data, warp)
183183

184184

185-
@pytest.mark.usefixtures("cpu_and_gpu")
185+
@pytest.mark.with_device(["cpu", "gpu"])
186186
@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
187187
def test_op_backward_pass(dtype):
188188
np.random.seed(13)

tensorflow_addons/image/tests/transform_ops_test.py

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
import tensorflow as tf
2020

2121
from tensorflow_addons.image import transform_ops
22-
from tensorflow_addons.utils import test_utils
2322
from skimage import transform
2423

2524
_DTYPES = {
@@ -32,39 +31,39 @@
3231
}
3332

3433

34+
@pytest.mark.with_device(["cpu", "gpu"])
3535
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
3636
@pytest.mark.parametrize("dtype", _DTYPES)
3737
def test_compose(dtype):
38-
with test_utils.use_gpu():
39-
image = tf.constant(
40-
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype,
41-
)
42-
# Rotate counter-clockwise by pi / 2.
43-
rotation = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
44-
# Translate right by 1 (the transformation matrix is always inverted,
45-
# hence the -1).
46-
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.dtypes.float32)
47-
composed = transform_ops.compose_transforms([rotation, translation])
48-
image_transformed = transform_ops.transform(image, composed)
49-
np.testing.assert_equal(
50-
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
51-
image_transformed.numpy(),
52-
)
38+
image = tf.constant(
39+
[[1, 1, 1, 0], [1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 0, 0]], dtype=dtype,
40+
)
41+
# Rotate counter-clockwise by pi / 2.
42+
rotation = transform_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
43+
# Translate right by 1 (the transformation matrix is always inverted,
44+
# hence the -1).
45+
translation = tf.constant([1, 0, -1, 0, 1, 0, 0, 0], dtype=tf.dtypes.float32)
46+
composed = transform_ops.compose_transforms([rotation, translation])
47+
image_transformed = transform_ops.transform(image, composed)
48+
np.testing.assert_equal(
49+
[[0, 0, 0, 0], [0, 1, 0, 1], [0, 1, 0, 1], [0, 1, 1, 1]],
50+
image_transformed.numpy(),
51+
)
5352

5453

54+
@pytest.mark.with_device(["cpu", "gpu"])
5555
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
5656
@pytest.mark.parametrize("dtype", _DTYPES)
5757
def test_extreme_projective_transform(dtype):
58-
with test_utils.use_gpu():
59-
image = tf.constant(
60-
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype,
61-
)
62-
transformation = tf.constant([1, 0, 0, 0, 1, 0, -1, 0], tf.dtypes.float32)
63-
image_transformed = transform_ops.transform(image, transformation)
64-
np.testing.assert_equal(
65-
[[1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]],
66-
image_transformed.numpy(),
67-
)
58+
image = tf.constant(
59+
[[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], dtype=dtype,
60+
)
61+
transformation = tf.constant([1, 0, 0, 0, 1, 0, -1, 0], tf.dtypes.float32)
62+
image_transformed = transform_ops.transform(image, transformation)
63+
np.testing.assert_equal(
64+
[[1, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]],
65+
image_transformed.numpy(),
66+
)
6867

6968

7069
@pytest.mark.usefixtures("maybe_run_functions_eagerly")

tensorflow_addons/layers/tests/optical_flow_test.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ def _create_test_data(data_format):
7272
return val_a, val_b
7373

7474

75-
@pytest.mark.usefixtures("cpu_and_gpu", "maybe_run_functions_eagerly")
75+
@pytest.mark.with_device(["cpu", "gpu"])
76+
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
7677
def test_forward_simple(data_format):
7778
# We are just testing where the output has vanishing values.
7879
val_a, val_b = _create_test_data(data_format)
@@ -114,7 +115,7 @@ def test_forward_simple(data_format):
114115
assert actual.shape == (2, 9, 7, 8)
115116

116117

117-
@pytest.mark.usefixtures("cpu_and_gpu")
118+
@pytest.mark.with_device(["cpu", "gpu"])
118119
def test_gradients(data_format):
119120
batch, channels, height, width = 2, 3, 5, 6
120121
input_a = np.random.randn(batch, channels, height, width).astype(np.float32)
@@ -150,7 +151,7 @@ def correlation_fn(input_a, input_b):
150151
np.testing.assert_allclose(theoretical[0], numerical[0], atol=1e-3)
151152

152153

153-
@pytest.mark.usefixtures("cpu_and_gpu")
154+
@pytest.mark.with_device(["cpu", "gpu"])
154155
def test_keras(data_format):
155156
# Unable to use `layer_test` as this layer has multiple inputs.
156157
val_a, val_b = _create_test_data(data_format)

tensorflow_addons/metrics/tests/metrics_test.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,16 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
# ==============================================================================
15-
import unittest
1615
import inspect
1716

1817
from tensorflow.keras.metrics import Metric
1918
from tensorflow_addons import metrics
2019

2120

22-
class MetricsTests(unittest.TestCase):
23-
def test_update_state_signature(self):
24-
for name, obj in inspect.getmembers(metrics):
25-
if inspect.isclass(obj) and issubclass(obj, Metric):
26-
check_update_state_signature(obj)
21+
def test_update_state_signature():
22+
for name, obj in inspect.getmembers(metrics):
23+
if inspect.isclass(obj) and issubclass(obj, Metric):
24+
check_update_state_signature(obj)
2725

2826

2927
def check_update_state_signature(metric_class):

tensorflow_addons/optimizers/tests/lazy_adam_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ def _test_sparse(dtype):
9797

9898

9999
@pytest.mark.parametrize("dtype", [tf.int32, tf.int64])
100-
@pytest.mark.usefixtures("cpu_and_gpu")
100+
@pytest.mark.with_device(["cpu", "gpu"])
101101
def test_sparse_device_placement(dtype):
102102

103103
# If a GPU is available, tests that all optimizer ops can be placed on

tensorflow_addons/seq2seq/tests/beam_search_ops_test.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -56,21 +56,18 @@ def test_bad_parent_values_on_cpu():
5656
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]]
5757
)
5858
max_sequence_lengths = [3]
59-
with tf.device("/cpu:0"):
60-
with pytest.raises(tf.errors.InvalidArgumentError):
61-
_ = gather_tree(
62-
step_ids=step_ids,
63-
parent_ids=parent_ids,
64-
max_sequence_lengths=max_sequence_lengths,
65-
end_token=end_token,
66-
)
59+
60+
with pytest.raises(tf.errors.InvalidArgumentError):
61+
_ = gather_tree(
62+
step_ids=step_ids,
63+
parent_ids=parent_ids,
64+
max_sequence_lengths=max_sequence_lengths,
65+
end_token=end_token,
66+
)
6767

6868

69+
@pytest.mark.with_device(["gpu"])
6970
def test_bad_parent_values_on_gpu():
70-
# Only want to run this test on CUDA devices, as gather_tree is not
71-
# registered for SYCL devices.
72-
if not tf.test.is_gpu_available(cuda_only=True):
73-
return
7471
# (max_time = 4, batch_size = 1, beams = 3)
7572
# bad parent in beam 1 time 1; appears as a negative index at time 0
7673
end_token = 10
@@ -82,14 +79,13 @@ def test_bad_parent_values_on_gpu():
8279
expected_result = _transpose_batch_time(
8380
[[[2, -1, 2], [6, 5, 6], [7, 8, 9], [10, 10, 10]]]
8481
)
85-
with tf.device("/device:GPU:0"):
86-
beams = gather_tree(
87-
step_ids=step_ids,
88-
parent_ids=parent_ids,
89-
max_sequence_lengths=max_sequence_lengths,
90-
end_token=end_token,
91-
)
92-
np.testing.assert_equal(expected_result, beams.numpy())
82+
beams = gather_tree(
83+
step_ids=step_ids,
84+
parent_ids=parent_ids,
85+
max_sequence_lengths=max_sequence_lengths,
86+
end_token=end_token,
87+
)
88+
np.testing.assert_equal(expected_result, beams.numpy())
9389

9490

9591
def test_gather_tree_batch():

0 commit comments

Comments
 (0)