From 0035867e3c4d939dc743dcd03274a0040d5793a1 Mon Sep 17 00:00:00 2001 From: gabrieldemarmiesse Date: Thu, 19 Mar 2020 16:37:01 +0000 Subject: [PATCH] Used pytest only to test tanshrink in eager mode --- .../activations/tanhshrink_test.py | 40 +++++++++---------- tensorflow_addons/utils/test_utils.py | 33 +++++++++++++++ 2 files changed, 51 insertions(+), 22 deletions(-) diff --git a/tensorflow_addons/activations/tanhshrink_test.py b/tensorflow_addons/activations/tanhshrink_test.py index 1606461b0e..03682e1462 100644 --- a/tensorflow_addons/activations/tanhshrink_test.py +++ b/tensorflow_addons/activations/tanhshrink_test.py @@ -16,7 +16,6 @@ import sys import pytest -from absl.testing import parameterized import numpy as np import tensorflow as tf @@ -25,27 +24,24 @@ from tensorflow_addons.utils import test_utils -@test_utils.run_all_in_graph_and_eager_modes -class TanhshrinkTest(tf.test.TestCase, parameterized.TestCase): - @parameterized.named_parameters( - ("float16", np.float16), ("float32", np.float32), ("float64", np.float64) - ) - def test_same_as_py_func(self, dtype): - np.random.seed(1234) - for _ in range(20): - self.verify_funcs_are_equivalent(dtype) - - def verify_funcs_are_equivalent(self, dtype): - x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype) - x = tf.convert_to_tensor(x_np) - with tf.GradientTape(persistent=True) as t: - t.watch(x) - y_native = tanhshrink(x) - y_py = _tanhshrink_py(x) - self.assertAllCloseAccordingToType(y_native, y_py) - grad_native = t.gradient(y_native, x) - grad_py = t.gradient(y_py, x) - self.assertAllCloseAccordingToType(grad_native, grad_py) +@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) +def test_same_as_py_func(dtype): + np.random.seed(1234) + for _ in range(20): + verify_funcs_are_equivalent(dtype) + + +def verify_funcs_are_equivalent(dtype): + x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype) + x = tf.convert_to_tensor(x_np) + with tf.GradientTape(persistent=True) as t: + t.watch(x) + y_native = tanhshrink(x) + y_py = _tanhshrink_py(x) + test_utils.assert_allclose_according_to_type(y_native, y_py) + grad_native = t.gradient(y_native, x) + grad_py = t.gradient(y_py, x) + test_utils.assert_allclose_according_to_type(grad_native, grad_py) if __name__ == "__main__": diff --git a/tensorflow_addons/utils/test_utils.py b/tensorflow_addons/utils/test_utils.py index ac5438ff1a..d946eb036d 100644 --- a/tensorflow_addons/utils/test_utils.py +++ b/tensorflow_addons/utils/test_utils.py @@ -18,6 +18,7 @@ import inspect import unittest +import numpy as np import pytest import tensorflow as tf @@ -175,3 +176,35 @@ def maybe_run_functions_eagerly(request): tf.config.experimental_run_functions_eagerly(False) request.addfinalizer(finalizer) + + +def assert_allclose_according_to_type( + a, + b, + rtol=1e-6, + atol=1e-6, + float_rtol=1e-6, + float_atol=1e-6, + half_rtol=1e-3, + half_atol=1e-3, +): + """ + Similar to tf.test.TestCase.assertAllCloseAccordingToType() + but this doesn't need a subclassing to run. + """ + a = np.array(a) + b = np.array(b) + # types with lower tol are put later to overwrite previous ones. + if ( + a.dtype == np.float32 + or b.dtype == np.float32 + or a.dtype == np.complex64 + or b.dtype == np.complex64 + ): + rtol = max(rtol, float_rtol) + atol = max(atol, float_atol) + if a.dtype == np.float16 or b.dtype == np.float16: + rtol = max(rtol, half_rtol) + atol = max(atol, half_atol) + + np.testing.assert_allclose(a, b, rtol=rtol, atol=atol)