Skip to content

Commit

Permalink
PR #18303: Refactor test cases to improve unit test quality
Browse files Browse the repository at this point in the history
Imported from GitHub PR #18303

Hello, first-time submitting PR here. I've read through the contributor guide and figured it's a minor update, hence this PR.

This PR improves the readability of the assert statements in some of the unit tests by using more expressive assert methods from the unittest module. These changes improve code quality (avoid test smells) by:

- Making the assert statements more readable and consistent with other asserts in the same test file, i.e., assertLen.

Happy to update more related issues in the test code if needed :)
Copybara import of the project:

--
d823be2 by freddiewanah <[email protected]>:

Refactor test cases to improve unit test quality

--
ddeff03 by freddiewanah <[email protected]>:

revert assertRaisesRegex

Merging this change closes #18303

FUTURE_COPYBARA_INTEGRATE_REVIEW=#18303 from freddiewanah:master ddeff03
PiperOrigin-RevId: 551893138
  • Loading branch information
freddiewanah authored and tensorflower-gardener committed Jul 28, 2023
1 parent 21c25fd commit 79628a4
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 22 deletions.
3 changes: 0 additions & 3 deletions keras/benchmarks/eager_microbenchmarks_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,6 @@ def fn():
self._run(fn, 20)

def benchmark_layers_embeddings_embedding_overhead(self):

layer = tf.keras.layers.Embedding(1, 1)
x = tf.zeros((1, 1), dtype="int32")

Expand All @@ -148,7 +147,6 @@ def fn():
class KerasLayerCallOverheadBenchmarks(
MicroBenchmarksBase, metaclass=tf.__internal__.test.ParameterizedBenchmark
):

# The set of layers for benchmarking. To add benchmarks for new layers,
# please add the parameter configs to "_benchmark_paramters".

Expand Down Expand Up @@ -225,7 +223,6 @@ class KerasLayerCallOverheadBenchmarks(
]

def benchmark_layer(self, layer, input_shape, kwargs=None):

x = tf.ones(input_shape)

def fn():
Expand Down
32 changes: 16 additions & 16 deletions keras/tests/add_loss_correctness_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,7 @@ def call(self, inputs):
layer = MyLayer()
outputs = layer(inputs)
model = Model(inputs, outputs)
self.assertEqual(len(model.losses), 1)
self.assertLen(model.losses, 1)
model.compile("sgd", "mse", run_eagerly=test_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
Expand Down Expand Up @@ -373,7 +373,7 @@ def call(self, inputs):
m = Sequential([shared_layer])
m2 = Sequential([shared_layer, m])
m2(tf.constant([1, 2, 3]))
self.assertEqual(len(m2.losses), 2)
self.assertLen(m2.losses, 2)
self.assertAllClose(m2.losses, [6, 12])

@test_combinations.run_all_keras_modes
Expand All @@ -394,36 +394,36 @@ def call(self, x):
x1 = tf.ones((1, 1))
_ = l(x1)
if not tf.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
self.assertLen(l.get_losses_for(x1), 2)
self.assertLen(l.get_losses_for(None), 1)

x2 = tf.ones((1, 1))
_ = l(x2)
if not tf.executing_eagerly():
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
self.assertLen(l.get_losses_for(x1), 2)
self.assertLen(l.get_losses_for(x2), 2)
self.assertLen(l.get_losses_for(None), 1)

outputs = l(inputs)
model = Model(inputs, outputs)
if not tf.executing_eagerly():
self.assertEqual(len(model.losses), 7)
self.assertEqual(len(l.get_losses_for(x1)), 2)
self.assertEqual(len(l.get_losses_for(x2)), 2)
self.assertEqual(len(l.get_losses_for(None)), 1)
self.assertLen(model.losses, 7)
self.assertLen(l.get_losses_for(x1), 2)
self.assertLen(l.get_losses_for(x2), 2)
self.assertLen(l.get_losses_for(None), 1)

x3 = tf.ones((1, 1))
model(x3)
x4 = tf.ones((1, 1))
model(x4)
if tf.executing_eagerly():
# Eager losses are cleared every `__call__`.
self.assertEqual(len(model.losses), 3)
self.assertLen(model.losses, 3)
else:
self.assertEqual(len(model.losses), 11)
self.assertEqual(len(model.get_losses_for(x3)), 2)
self.assertEqual(len(model.get_losses_for(x4)), 2)
self.assertEqual(len(model.get_losses_for(None)), 1)
self.assertLen(model.losses, 11)
self.assertLen(l.get_losses_for(x3), 2)
self.assertLen(l.get_losses_for(x4), 2)
self.assertLen(l.get_losses_for(None), 1)

@test_combinations.run_all_keras_modes(always_skip_v1=True)
def test_invalid_constant_input(self):
Expand Down
5 changes: 2 additions & 3 deletions keras/tests/model_subclassing_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,9 +512,9 @@ def call(self, inputs):
model(x)

if tf.executing_eagerly():
self.assertEqual(0, len(model.updates))
self.assertLen(model.updates, 0)
else:
self.assertEqual(2, len(model.updates))
self.assertLen(model.updates, 2)


class GraphSpecificModelSubclassingTests(tf.test.TestCase):
Expand Down Expand Up @@ -557,7 +557,6 @@ def test_multi_io_workflow_with_tensors(self):
_ = model.evaluate(steps=10, verbose=0)

def test_updates_and_losses_for_nested_models_in_subclassed_model(self):

# Case 1: deferred-build sequential nested in subclass.
class TestModel1(keras.Model):
def __init__(self):
Expand Down

0 comments on commit 79628a4

Please sign in to comment.