diff --git a/tensorflow_addons/BUILD b/tensorflow_addons/BUILD index 24033291e0..1578af7993 100644 --- a/tensorflow_addons/BUILD +++ b/tensorflow_addons/BUILD @@ -11,6 +11,7 @@ py_library( name = "tensorflow_addons", data = [ "__init__.py", + "conftest.py", "options.py", "register.py", "version.py", diff --git a/tensorflow_addons/conftest.py b/tensorflow_addons/conftest.py new file mode 100644 index 0000000000..610965fe66 --- /dev/null +++ b/tensorflow_addons/conftest.py @@ -0,0 +1,5 @@ +from tensorflow_addons.utils.test_utils import maybe_run_functions_eagerly # noqa: F401 + +# fixtures present in this file will be available +# when running tests and can be referenced with strings +# https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions diff --git a/tensorflow_addons/losses/focal_loss_test.py b/tensorflow_addons/losses/focal_loss_test.py index 646593224c..fef90f4542 100644 --- a/tensorflow_addons/losses/focal_loss_test.py +++ b/tensorflow_addons/losses/focal_loss_test.py @@ -40,11 +40,6 @@ def to_logit(self, prob): logit = np.log(prob / (1.0 - prob)) return logit - def log10(self, x): - numerator = tf.math.log(x) - denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype)) - return numerator / denominator - # Test with logits def test_with_logits(self): # predictiions represented as logits @@ -87,35 +82,7 @@ def test_with_logits(self): ) # order_of_ratio = np.power(10, np.floor(np.log10(bce/FL))) - order_of_ratio = tf.pow(10.0, tf.math.floor(self.log10(bce / fl))) - pow_values = tf.constant([1000, 100, 10, 10, 100, 1000]) - self.assertAllClose(order_of_ratio, pow_values) - - # Test without logits - def test_without_logits(self): - # predictiions represented as logits - prediction_tensor = tf.constant( - [[0.97], [0.91], [0.73], [0.27], [0.09], [0.03]], tf.float32 - ) - # Ground truth - target_tensor = tf.constant([[1], [1], [1], [0], [0], [0]], tf.float32) - - fl = sigmoid_focal_crossentropy( - y_true=target_tensor, y_pred=prediction_tensor, alpha=None, gamma=None - ) - bce = tf.reduce_sum( - K.binary_crossentropy(target_tensor, prediction_tensor), axis=-1 - ) - - # When alpha and gamma are None, it should be equal to BCE - self.assertAllClose(fl, bce) - - # When gamma==2.0 - fl = sigmoid_focal_crossentropy( - y_true=target_tensor, y_pred=prediction_tensor, alpha=None, gamma=2.0 - ) - - order_of_ratio = tf.pow(10.0, tf.math.floor(self.log10(bce / fl))) + order_of_ratio = tf.pow(10.0, tf.math.floor(log10(bce / fl))) pow_values = tf.constant([1000, 100, 10, 10, 100, 1000]) self.assertAllClose(order_of_ratio, pow_values) @@ -129,5 +96,41 @@ def test_keras_model_compile(self): model.compile(loss="Addons>sigmoid_focal_crossentropy") +def log10(x): + numerator = tf.math.log(x) + denominator = tf.math.log(tf.constant(10, dtype=numerator.dtype)) + return numerator / denominator + + +# Test without logits +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def test_without_logits(): + # predictiions represented as logits + prediction_tensor = tf.constant( + [[0.97], [0.91], [0.73], [0.27], [0.09], [0.03]], tf.float32 + ) + # Ground truth + target_tensor = tf.constant([[1], [1], [1], [0], [0], [0]], tf.float32) + + fl = sigmoid_focal_crossentropy( + y_true=target_tensor, y_pred=prediction_tensor, alpha=None, gamma=None + ) + bce = tf.reduce_sum( + K.binary_crossentropy(target_tensor, prediction_tensor), axis=-1 + ) + + # When alpha and gamma are None, it should be equal to BCE + assert np.allclose(fl, bce) + + # When gamma==2.0 + fl = sigmoid_focal_crossentropy( + y_true=target_tensor, y_pred=prediction_tensor, alpha=None, gamma=2.0 + ) + + order_of_ratio = tf.pow(10.0, tf.math.floor(log10(bce / fl))) + pow_values = tf.constant([1000, 100, 10, 10, 100, 1000]) + assert np.allclose(order_of_ratio, pow_values) + + if __name__ == "__main__": sys.exit(pytest.main([__file__])) diff --git a/tensorflow_addons/metrics/cohens_kappa_test.py b/tensorflow_addons/metrics/cohens_kappa_test.py index 8b8d9efbd8..833be2cbea 100644 --- a/tensorflow_addons/metrics/cohens_kappa_test.py +++ b/tensorflow_addons/metrics/cohens_kappa_test.py @@ -170,17 +170,6 @@ def test_with_sparse_labels(self): self.evaluate(obj.update_state(y_true, y_pred)) self.assertAllClose(0.19999999, obj.result()) - def test_with_ohe_labels(self): - y_true = np.array([4, 4, 3, 4], dtype=np.int32) - y_true = tf.keras.utils.to_categorical(y_true, num_classes=5) - y_pred = np.array([4, 4, 1, 2], dtype=np.int32) - - obj = CohenKappa(num_classes=5, sparse_labels=False) - self.evaluate(tf.compat.v1.variables_initializer(obj.variables)) - - self.evaluate(obj.update_state(y_true, y_pred)) - self.assertAllClose(0.19999999, obj.result()) - def test_keras_binary_reg_model(self): kp = CohenKappa(num_classes=2) inputs = tf.keras.layers.Input(shape=(10,)) @@ -231,5 +220,17 @@ def test_keras_multiclass_classification_model(self): model.fit(x, y, epochs=1, verbose=0, batch_size=32) +@pytest.mark.usefixtures("maybe_run_functions_eagerly") +def test_with_ohe_labels(): + y_true = np.array([4, 4, 3, 4], dtype=np.int32) + y_true = tf.keras.utils.to_categorical(y_true, num_classes=5) + y_pred = np.array([4, 4, 1, 2], dtype=np.int32) + + obj = CohenKappa(num_classes=5, sparse_labels=False) + + obj.update_state(y_true, y_pred) + np.testing.assert_allclose(0.19999999, obj.result().numpy()) + + if __name__ == "__main__": sys.exit(pytest.main([__file__])) diff --git a/tensorflow_addons/utils/BUILD b/tensorflow_addons/utils/BUILD index 2cca7537af..90b98460f7 100644 --- a/tensorflow_addons/utils/BUILD +++ b/tensorflow_addons/utils/BUILD @@ -12,6 +12,9 @@ py_library( "resource_loader.py", "types.py", ]), + data = [ + "//tensorflow_addons:conftest.py", + ], ) py_test( diff --git a/tensorflow_addons/utils/test_utils.py b/tensorflow_addons/utils/test_utils.py index 2528a2c6d7..dab44fa5b2 100644 --- a/tensorflow_addons/utils/test_utils.py +++ b/tensorflow_addons/utils/test_utils.py @@ -19,6 +19,7 @@ import time import unittest +import pytest import tensorflow as tf # TODO: find public API alternative to these @@ -182,3 +183,17 @@ def time_all_functions(cls): ): setattr(cls, name, time_function(method)) return cls + + +def finalizer(): + tf.config.experimental_run_functions_eagerly(False) + + +@pytest.fixture(scope="function", params=["eager_mode", "tf_function"]) +def maybe_run_functions_eagerly(request): + if request.param == "eager_mode": + tf.config.experimental_run_functions_eagerly(True) + elif request.param == "tf_function": + tf.config.experimental_run_functions_eagerly(False) + + request.addfinalizer(finalizer)