From 4f1fbba665b29907e0de290dc2246ee53c624f45 Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 20 Jul 2022 17:20:59 -0400 Subject: [PATCH 01/20] feature/1124 Added Pytorch Lightning Test support for mmdet --- .../models/mmdet/lightning/model_adapter.py | 26 +++++++++++-------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/icevision/models/mmdet/lightning/model_adapter.py b/icevision/models/mmdet/lightning/model_adapter.py index ef28b64fe..2b5aa54be 100644 --- a/icevision/models/mmdet/lightning/model_adapter.py +++ b/icevision/models/mmdet/lightning/model_adapter.py @@ -15,7 +15,7 @@ class MMDetModelAdapter(LightningModelAdapter, ABC): - """Lightning module specialized for EfficientDet, with metrics support. + """Lightning module specialized for MMDet, with metrics support. The methods `forward`, `training_step`, `validation_step`, `validation_epoch_end` are already overriden. @@ -50,14 +50,15 @@ def training_step(self, batch, batch_idx): return outputs["loss"] def validation_step(self, batch, batch_idx): + self.__val_predict(batch, loss_log_key="valid/") + + def __val_predict(self, batch, loss_log_key): data, records = batch - self.model.eval() - with torch.no_grad(): - outputs = self.model.train_step(data=data, optimizer=None) - raw_preds = self.model.forward_test( - imgs=[data["img"]], img_metas=[data["img_metas"]] - ) + outputs = self.model.train_step(data=data, optimizer=None) + raw_preds = self.model.forward_test( + imgs=[data["img"]], img_metas=[data["img_metas"]] + ) preds = self.convert_raw_predictions( batch=data, raw_preds=raw_preds, records=records @@ -65,10 +66,13 @@ def validation_step(self, batch, batch_idx): self.accumulate_metrics(preds) for k, v in outputs["log_vars"].items(): - self.log(f"valid/{k}", v) - - # TODO: is train and eval model automatically set by lighnting? - self.model.train() + self.log(f"{loss_log_key}{k}", v) def validation_epoch_end(self, outs): self.finalize_metrics() + + def test_step(self, batch, batch_idx): + self.__val_predict(batch=batch, loss_log_key="test/") + + def test_epoch_end(self, outs): + self.finalize_metrics() From 76fcce5c309a6796360bcac0e8caa9b31fb40844 Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 20 Jul 2022 17:23:43 -0400 Subject: [PATCH 02/20] feature/1124 Added Pytorch Lightning Test support for efficientdet --- .../efficientdet/lightning/model_adapter.py | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/icevision/models/ross/efficientdet/lightning/model_adapter.py b/icevision/models/ross/efficientdet/lightning/model_adapter.py index 0b1d5037d..3b7dc1e12 100644 --- a/icevision/models/ross/efficientdet/lightning/model_adapter.py +++ b/icevision/models/ross/efficientdet/lightning/model_adapter.py @@ -39,23 +39,31 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): + self.__val_predict(batch, loss_log_key="valid/") + + def __val_predict(self, batch, loss_log_key): (xb, yb), records = batch - with torch.no_grad(): - raw_preds = self(xb, yb) - preds = efficientdet.convert_raw_predictions( - batch=(xb, yb), - raw_preds=raw_preds["detections"], - records=records, - detection_threshold=0.0, - ) - loss = efficientdet.loss_fn(raw_preds, yb) + raw_preds = self(xb, yb) + preds = efficientdet.convert_raw_predictions( + batch=(xb, yb), + raw_preds=raw_preds["detections"], + records=records, + detection_threshold=0.0, + ) + loss = efficientdet.loss_fn(raw_preds, yb) self.accumulate_metrics(preds) for k, v in raw_preds.items(): if "loss" in k: - self.log(f"valid/{k}", v) + self.log(f"{loss_log_key}/{k}", v) def validation_epoch_end(self, outs): self.finalize_metrics() + + def test_step(self, batch, batch_idx): + self.__val_predict(batch=batch, loss_log_key="test/") + + def test_epoch_end(self, outs): + self.finalize_metrics() From c406ddc6ecac90daa1392cb6033ec121eee6a2c9 Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 20 Jul 2022 17:26:54 -0400 Subject: [PATCH 03/20] feature/1124 Added Pytorch Lightning Test support for torchvision --- .../torchvision/lightning_model_adapter.py | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/icevision/models/torchvision/lightning_model_adapter.py b/icevision/models/torchvision/lightning_model_adapter.py index 1e14b98db..115d64e80 100644 --- a/icevision/models/torchvision/lightning_model_adapter.py +++ b/icevision/models/torchvision/lightning_model_adapter.py @@ -35,20 +35,29 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): + self.__val_predict(batch, loss_log_key="val_loss") + + def __val_predict(self, batch, loss_log_key): (xb, yb), records = batch - with torch.no_grad(): - self.train() - train_preds = self(xb, yb) - loss = loss_fn(train_preds, yb) - self.eval() - raw_preds = self(xb) - preds = self.convert_raw_predictions( - batch=batch, raw_preds=raw_preds, records=records - ) - self.accumulate_metrics(preds=preds) + self.train() + train_preds = self(xb, yb) + loss = loss_fn(train_preds, yb) + + self.eval() + raw_preds = self(xb) + preds = self.convert_raw_predictions( + batch=batch, raw_preds=raw_preds, records=records + ) + self.accumulate_metrics(preds=preds) - self.log("val_loss", loss) + self.log(loss_log_key, loss) def validation_epoch_end(self, outs): self.finalize_metrics() + + def test_step(self, batch, batch_idx): + self.__val_predict(batch=batch, loss_log_key="test_loss") + + def test_epoch_end(self, outs): + self.finalize_metrics() From 407b730b9b5cce4732a31b29d89df57629ed0764 Mon Sep 17 00:00:00 2001 From: brown Date: Wed, 20 Jul 2022 17:30:50 -0400 Subject: [PATCH 04/20] feature/1124 Added Pytorch Lightning Test support for yolov5 --- .../yolov5/lightning/model_adapter.py | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py index f967f097e..9bc212ea3 100644 --- a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py +++ b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py @@ -40,22 +40,30 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): + self.__val_predict(batch, loss_log_key="val_loss") + + def __val_predict(self, batch, loss_log_key): (xb, yb), records = batch - with torch.no_grad(): - inference_out, training_out = self(xb) - preds = yolov5.convert_raw_predictions( - batch=xb, - raw_preds=inference_out, - records=records, - detection_threshold=0.001, - nms_iou_threshold=0.6, - ) - loss = self.compute_loss(training_out, yb)[0] + inference_out, training_out = self(xb) + preds = yolov5.convert_raw_predictions( + batch=xb, + raw_preds=inference_out, + records=records, + detection_threshold=0.001, + nms_iou_threshold=0.6, + ) + loss = self.compute_loss(training_out, yb)[0] self.accumulate_metrics(preds) - self.log("val_loss", loss) + self.log(loss_log_key, loss) def validation_epoch_end(self, outs): self.finalize_metrics() + + def test_step(self, batch, batch_idx): + self.__val_predict(batch=batch, loss_log_key="test_loss") + + def test_epoch_end(self, outs): + self.finalize_metrics() From 5cb1d0f6172355e8411e0db37fe1e6b5dc88b73f Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 08:09:52 -0400 Subject: [PATCH 05/20] feature/1124 Renamed shared evaluation method for mmdet --- icevision/models/mmdet/lightning/model_adapter.py | 8 ++++---- tests/engines/lightning/test_lightning_model_adapter.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/icevision/models/mmdet/lightning/model_adapter.py b/icevision/models/mmdet/lightning/model_adapter.py index 2b5aa54be..6329fa9b0 100644 --- a/icevision/models/mmdet/lightning/model_adapter.py +++ b/icevision/models/mmdet/lightning/model_adapter.py @@ -45,14 +45,14 @@ def training_step(self, batch, batch_idx): outputs = self.model.train_step(data=data, optimizer=None) for k, v in outputs["log_vars"].items(): - self.log(f"train/{k}", v) + self.log(f"train_{k}", v) return outputs["loss"] def validation_step(self, batch, batch_idx): - self.__val_predict(batch, loss_log_key="valid/") + self._shared_eval(batch, loss_log_key="val_") - def __val_predict(self, batch, loss_log_key): + def _shared_eval(self, batch, loss_log_key): data, records = batch outputs = self.model.train_step(data=data, optimizer=None) @@ -72,7 +72,7 @@ def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self.__val_predict(batch=batch, loss_log_key="test/") + self._shared_eval(batch=batch, loss_log_key="test_") def test_epoch_end(self, outs): self.finalize_metrics() diff --git a/tests/engines/lightning/test_lightning_model_adapter.py b/tests/engines/lightning/test_lightning_model_adapter.py index fbe6107de..c7ecbd88e 100644 --- a/tests/engines/lightning/test_lightning_model_adapter.py +++ b/tests/engines/lightning/test_lightning_model_adapter.py @@ -16,7 +16,6 @@ class DummLightningModelAdapter(LightningModelAdapter): pass -# test if finalize metrics reports metrics correctly def test_finalze_metrics_reports_metrics_correctly(mocker): mocker.patch( "icevision.engines.lightning.lightning_model_adapter.LightningModelAdapter.log" From 4e807b50b75fa519977463a68e52efe323ef8363 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 08:10:39 -0400 Subject: [PATCH 06/20] feature/1124 Renamed shared evaluation method for efficientdet --- .../ross/efficientdet/lightning/model_adapter.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/icevision/models/ross/efficientdet/lightning/model_adapter.py b/icevision/models/ross/efficientdet/lightning/model_adapter.py index 3b7dc1e12..7f4f9e9d9 100644 --- a/icevision/models/ross/efficientdet/lightning/model_adapter.py +++ b/icevision/models/ross/efficientdet/lightning/model_adapter.py @@ -34,14 +34,14 @@ def training_step(self, batch, batch_idx): loss = efficientdet.loss_fn(preds, yb) for k, v in preds.items(): - self.log(f"train/{k}", v) + self.log(f"train_{k}", v) return loss def validation_step(self, batch, batch_idx): - self.__val_predict(batch, loss_log_key="valid/") + self._shared_eval(batch, loss_log_key="val_") - def __val_predict(self, batch, loss_log_key): + def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch raw_preds = self(xb, yb) @@ -57,13 +57,13 @@ def __val_predict(self, batch, loss_log_key): for k, v in raw_preds.items(): if "loss" in k: - self.log(f"{loss_log_key}/{k}", v) + self.log(f"{loss_log_key}{k}", v) def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self.__val_predict(batch=batch, loss_log_key="test/") + self._shared_eval(batch=batch, loss_log_key="test_") def test_epoch_end(self, outs): self.finalize_metrics() From b0dfa9ce029631b0f57e10c7242d01f7f6c61f57 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 08:11:03 -0400 Subject: [PATCH 07/20] feature/1124 Renamed shared evaluation method for torchvision --- icevision/models/torchvision/lightning_model_adapter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/icevision/models/torchvision/lightning_model_adapter.py b/icevision/models/torchvision/lightning_model_adapter.py index 115d64e80..f8ea31a76 100644 --- a/icevision/models/torchvision/lightning_model_adapter.py +++ b/icevision/models/torchvision/lightning_model_adapter.py @@ -35,9 +35,9 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): - self.__val_predict(batch, loss_log_key="val_loss") + self._shared_eval(batch, loss_log_key="val_loss") - def __val_predict(self, batch, loss_log_key): + def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch self.train() @@ -57,7 +57,7 @@ def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self.__val_predict(batch=batch, loss_log_key="test_loss") + self._shared_eval(batch=batch, loss_log_key="test_loss") def test_epoch_end(self, outs): self.finalize_metrics() From 4ae2091ec8057a103e8ac048eb2112585f5fc79d Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 08:11:26 -0400 Subject: [PATCH 08/20] feature/1124 Renamed shared evaluation method for yolov5 --- .../models/ultralytics/yolov5/lightning/model_adapter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py index 9bc212ea3..59cdb0abb 100644 --- a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py +++ b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py @@ -40,9 +40,9 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): - self.__val_predict(batch, loss_log_key="val_loss") + self._shared_eval(batch, loss_log_key="val_loss") - def __val_predict(self, batch, loss_log_key): + def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch inference_out, training_out = self(xb) @@ -63,7 +63,7 @@ def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self.__val_predict(batch=batch, loss_log_key="test_loss") + self._shared_eval(batch=batch, loss_log_key="test_loss") def test_epoch_end(self, outs): self.finalize_metrics() From 732fd39859db49c4ed7ca6816bf137a2ddbd5aed Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 08:46:55 -0400 Subject: [PATCH 09/20] feature/1124 Added test for PL test step of efficientdet --- .../efficient_det/lightning/test_test.py | 50 +++++++++++++++++++ .../efficient_det/lightning/test_validate.py | 50 +++++++++++++++++++ 2 files changed, 100 insertions(+) create mode 100644 tests/models/efficient_det/lightning/test_test.py create mode 100644 tests/models/efficient_det/lightning/test_validate.py diff --git a/tests/models/efficient_det/lightning/test_test.py b/tests/models/efficient_det/lightning/test_test.py new file mode 100644 index 000000000..681f20966 --- /dev/null +++ b/tests/models/efficient_det/lightning/test_test.py @@ -0,0 +1,50 @@ +import pytest +from icevision.all import * + + +@pytest.fixture +def light_model_cls(): + class LightModel(models.ross.efficientdet.lightning.ModelAdapter): + def __init__(self, model, metrics): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 +@pytest.mark.cuda +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_efficientdet_test( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls, metrics +): + _, valid_dl = fridge_efficientdet_dls + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.test(light_model, valid_dl) + + +# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 +@pytest.mark.cuda +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_efficientdet_finalizes_metrics_on_test_epoch_end( + fridge_efficientdet_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/efficient_det/lightning/test_validate.py b/tests/models/efficient_det/lightning/test_validate.py new file mode 100644 index 000000000..1deb30e00 --- /dev/null +++ b/tests/models/efficient_det/lightning/test_validate.py @@ -0,0 +1,50 @@ +import pytest +from icevision.all import * + + +@pytest.fixture +def light_model_cls(): + class LightModel(models.ross.efficientdet.lightning.ModelAdapter): + def __init__(self, model, metrics): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 +@pytest.mark.cuda +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_efficientdet_validate( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls, metrics +): + _, valid_dl = fridge_efficientdet_dls + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.validate(light_model, valid_dl) + + +# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 +@pytest.mark.cuda +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_efficientdet_finalizes_metrics_on_validation_epoch_end( + fridge_efficientdet_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True From e010af2f674101eb04e181044020509b86879704 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 09:48:52 -0400 Subject: [PATCH 10/20] feature/1124 Added test for PL test step of mmdet --- tests/models/mmdet/test_model.py | 95 +++++++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 1 deletion(-) diff --git a/tests/models/mmdet/test_model.py b/tests/models/mmdet/test_model.py index 98c51a9ff..d563baae2 100644 --- a/tests/models/mmdet/test_model.py +++ b/tests/models/mmdet/test_model.py @@ -49,7 +49,7 @@ def test_mmdet_bbox_models_fastai( ) learn.fine_tune(1, 3e-4) - def test_mmdet_bbox_models_light( + def test_mmdet_bbox_models_light_train( self, ds, model_type, pretrained, cfg_options, samples_source, request ): train_dl, valid_dl, model = self.dls_model( @@ -68,8 +68,101 @@ def configure_optimizers(self): logger=False, checkpoint_callback=False, ) + trainer.fit(light_model, train_dl, valid_dl) + def test_mmdet_bbox_models_light_validate( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + light_model = LitModel(model) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.validate(light_model, valid_dl) + + def test_mmdet_bbox_models_light_finalizes_metrics_on_validation_epoch_end( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + light_model = LitModel(model) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True + + def test_mmdet_bbox_models_light_test( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + light_model = LitModel(model) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.test(light_model, valid_dl) + + def test_mmdet_bbox_models_light_finalizes_metrics_on_test_epoch_end( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + light_model = LitModel(model) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True + @pytest.fixture() def mask_dls_model(coco_mask_records, samples_source): From 0b05f506cc7bb4feb4ecbfa2f8b47d49782997b3 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 10:08:28 -0400 Subject: [PATCH 11/20] feature/1124 Added test for PL test step of torchvision models --- .../faster_rcnn/lightning/test_test.py | 47 +++++++++++++++++++ .../faster_rcnn/lightning/test_validate.py | 47 +++++++++++++++++++ .../keypoints_rcnn/lightning/test_test.py | 43 +++++++++++++++++ .../keypoints_rcnn/lightning/test_validate.py | 45 ++++++++++++++++++ .../retinanet/lightning/test_test.py | 47 +++++++++++++++++++ .../retinanet/lightning/test_train.py | 17 +++++++ .../retinanet/lightning/test_validate.py | 47 +++++++++++++++++++ 7 files changed, 293 insertions(+) create mode 100644 tests/models/torchvision_models/faster_rcnn/lightning/test_test.py create mode 100644 tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py create mode 100644 tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py create mode 100644 tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py create mode 100644 tests/models/torchvision_models/retinanet/lightning/test_test.py create mode 100644 tests/models/torchvision_models/retinanet/lightning/test_validate.py diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py new file mode 100644 index 000000000..547a44e70 --- /dev/null +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py @@ -0,0 +1,47 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import faster_rcnn + + +@pytest.fixture +def light_model_cls(): + class LightModel(faster_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_faster_rcnn_test( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls, metrics +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.test(light_model, valid_dl) + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_faster_rcnn_finalizes_metrics_on_test_epoch_end( + fridge_faster_rcnn_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py new file mode 100644 index 000000000..f12cf17a2 --- /dev/null +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py @@ -0,0 +1,47 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import faster_rcnn + + +@pytest.fixture +def light_model_cls(): + class LightModel(faster_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_faster_rcnn_validate( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls, metrics +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.validate(light_model, valid_dl) + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_faster_rcnn_finalizes_metrics_on_validation_epoch_end( + fridge_faster_rcnn_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py new file mode 100644 index 000000000..17cd154fb --- /dev/null +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py @@ -0,0 +1,43 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import keypoint_rcnn + + +@pytest.fixture +def light_model_cls(): + class LightModel(keypoint_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +def test_lightining_keypoints_rcnn_test(ochuman_keypoints_dls, light_model_cls): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.test(light_model, valid_dl) + + +def test_lightining_keypoints_finalizes_metrics_on_test_epoch_end(light_model_cls): + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py new file mode 100644 index 000000000..637de9256 --- /dev/null +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py @@ -0,0 +1,45 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import keypoint_rcnn + + +@pytest.fixture +def light_model_cls(): + class LightModel(keypoint_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +def test_lightining_keypoints_rcnn_validate(ochuman_keypoints_dls, light_model_cls): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.validate(light_model, valid_dl) + + +def test_lightining_keypoints_finalizes_metrics_on_validation_epoch_end( + light_model_cls, +): + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/torchvision_models/retinanet/lightning/test_test.py b/tests/models/torchvision_models/retinanet/lightning/test_test.py new file mode 100644 index 000000000..7a7e2f1cf --- /dev/null +++ b/tests/models/torchvision_models/retinanet/lightning/test_test.py @@ -0,0 +1,47 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import retinanet + + +@pytest.fixture +def light_model_cls(): + class LightModel(retinanet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_retinanet_test( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls, metrics +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.test(light_model, valid_dl) + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_retinanet_finalizes_metrics_on_test_epoch_end( + fridge_retinanet_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/torchvision_models/retinanet/lightning/test_train.py b/tests/models/torchvision_models/retinanet/lightning/test_train.py index b7004787b..95fd4a7d8 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_train.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_train.py @@ -27,3 +27,20 @@ def test_lightining_retinanet_train( checkpoint_callback=False, ) trainer.fit(light_model, train_dl, valid_dl) + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_retinanet_test( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls, metrics +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + trainer.test(light_model, valid_dl) diff --git a/tests/models/torchvision_models/retinanet/lightning/test_validate.py b/tests/models/torchvision_models/retinanet/lightning/test_validate.py new file mode 100644 index 000000000..a3760549a --- /dev/null +++ b/tests/models/torchvision_models/retinanet/lightning/test_validate.py @@ -0,0 +1,47 @@ +import pytest +from icevision.all import * +from icevision.models.torchvision import retinanet + + +@pytest.fixture +def light_model_cls(): + class LightModel(retinanet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_retinanet_validate( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls, metrics +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + + trainer.validate(light_model, valid_dl) + + +@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) +def test_lightining_retinanet_finalizes_metrics_on_validation_epoch_end( + fridge_retinanet_model, light_model_cls, metrics +): + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True From 20b710211513bc581617502e84ec9f08a5debf11 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 13:13:41 -0400 Subject: [PATCH 12/20] feature/1124 Added test for PL test step of yolov5 --- .../ultralytics/yolov5/lightning/test_test.py | 55 ++++++++++++++++++ .../yolov5/lightning/test_validate.py | 57 +++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 tests/models/ultralytics/yolov5/lightning/test_test.py create mode 100644 tests/models/ultralytics/yolov5/lightning/test_validate.py diff --git a/tests/models/ultralytics/yolov5/lightning/test_test.py b/tests/models/ultralytics/yolov5/lightning/test_test.py new file mode 100644 index 000000000..2d091d1e6 --- /dev/null +++ b/tests/models/ultralytics/yolov5/lightning/test_test.py @@ -0,0 +1,55 @@ +import pytest +from icevision.all import * +from icevision.models.ultralytics.yolov5.backbones import * + + +@pytest.fixture +def light_model_cls(): + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize( + "backbone", + [small, medium, large, extra_large], +) +def test_lightning_yolo_test(fridge_ds, backbone, light_model_cls): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.valid_dl( + valid_ds, batch_size=3, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) + gpus = 1 if torch.cuda.is_available() else 0 + trainer = pl.Trainer(max_epochs=1, gpus=gpus) + + trainer.test(light_model, valid_dl) + + +@pytest.mark.parametrize( + "backbone", + [small, medium, large, extra_large], +) +def test_lightning_yolo_finalizes_metrics_on_test_epoch_end(backbone, light_model_cls): + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) + + light_model.test_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True diff --git a/tests/models/ultralytics/yolov5/lightning/test_validate.py b/tests/models/ultralytics/yolov5/lightning/test_validate.py new file mode 100644 index 000000000..ee27bcc9b --- /dev/null +++ b/tests/models/ultralytics/yolov5/lightning/test_validate.py @@ -0,0 +1,57 @@ +import pytest +from icevision.all import * +from icevision.models.ultralytics.yolov5.backbones import * + + +@pytest.fixture +def light_model_cls(): + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-3) + + def finalize_metrics(self): + self.was_finalize_metrics_called = True + + return LightModel + + +@pytest.mark.parametrize( + "backbone", + [small, medium, large, extra_large], +) +def test_lightning_yolo_validate(fridge_ds, backbone, light_model_cls): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.valid_dl( + valid_ds, batch_size=3, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) + gpus = 1 if torch.cuda.is_available() else 0 + trainer = pl.Trainer(max_epochs=1, gpus=gpus) + + trainer.validate(light_model, valid_dl) + + +@pytest.mark.parametrize( + "backbone", + [small, medium, large, extra_large], +) +def test_lightning_yolo_finalizes_metrics_on_validation_epoch_end( + backbone, light_model_cls +): + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) + + light_model.validation_epoch_end(None) + + assert light_model.was_finalize_metrics_called == True From a6cff1a8537433bf7868c2986de84d9f403e0fc5 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 13:14:10 -0400 Subject: [PATCH 13/20] feature/1124 Updated object detection getting started guide to add PL test --- .../getting_started_object_detection.ipynb | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/notebooks/getting_started_object_detection.ipynb b/notebooks/getting_started_object_detection.ipynb index 37a111157..5061ba449 100644 --- a/notebooks/getting_started_object_detection.ipynb +++ b/notebooks/getting_started_object_detection.ipynb @@ -802,6 +802,24 @@ "trainer.fit(light_model, train_dl, valid_dl)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Testing using Pytorch Lightning\n", + "For testing, it is recommended to use a separate test dataset that the model did not see during training but for demonstration purposes we'll re-use the validation dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainer = pl.Trainer()\n", + "trainer.test(light_model, valid_dl)" + ] + }, { "cell_type": "markdown", "metadata": { @@ -961,7 +979,7 @@ "provenance": [] }, "kernelspec": { - "display_name": "Python 3.8.5 ('icevision')", + "display_name": "Python 3.10.4 64-bit", "language": "python", "name": "python3" }, @@ -975,7 +993,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.5" + "version": "3.10.4" }, "metadata": { "interpreter": { @@ -1002,7 +1020,7 @@ }, "vscode": { "interpreter": { - "hash": "8de9f07afee82c69462511c5dd73ef92dc31ce4377be5cdd30293ad211c0627b" + "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" } }, "widgets": { From 1c53bb2bd904b8bb466af1d185500954cc364a49 Mon Sep 17 00:00:00 2001 From: brown Date: Thu, 21 Jul 2022 15:15:17 -0400 Subject: [PATCH 14/20] feature/1124 Updated environment.yml to fix icevision version not found --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index 2eced792f..4ee58ac1a 100644 --- a/environment.yml +++ b/environment.yml @@ -13,7 +13,7 @@ dependencies: - fastai >=2.5.2,<2.6 - pip - pip: - - icevision[all]==0.12.0rc1 + - icevision[all]==0.12.0 - --find-links https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html - mmcv-full==1.3.17 - mmdet==2.17.0 \ No newline at end of file From e1729cf7d2ac111364233a2dfe051b303cd3b291 Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 09:56:46 -0400 Subject: [PATCH 15/20] feature/1124 Added unit tests for efficientdet lightning model adapter --- .../efficientdet/lightning/model_adapter.py | 32 ++++++++----- .../efficient_det/lightning/test_test.py | 25 +++++++++- .../efficient_det/lightning/test_train.py | 48 ++++++++++++++++++- .../efficient_det/lightning/test_validate.py | 27 ++++++++++- 4 files changed, 115 insertions(+), 17 deletions(-) diff --git a/icevision/models/ross/efficientdet/lightning/model_adapter.py b/icevision/models/ross/efficientdet/lightning/model_adapter.py index 7f4f9e9d9..2990aa61a 100644 --- a/icevision/models/ross/efficientdet/lightning/model_adapter.py +++ b/icevision/models/ross/efficientdet/lightning/model_adapter.py @@ -31,39 +31,49 @@ def training_step(self, batch, batch_idx): (xb, yb), records = batch preds = self(xb, yb) - loss = efficientdet.loss_fn(preds, yb) + loss = self.compute_loss(preds, yb) for k, v in preds.items(): self.log(f"train_{k}", v) return loss + def compute_loss(self, preds, yb): + return efficientdet.loss_fn(preds, yb) + def validation_step(self, batch, batch_idx): - self._shared_eval(batch, loss_log_key="val_") + self._shared_eval(batch, loss_log_key="val") def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch raw_preds = self(xb, yb) - preds = efficientdet.convert_raw_predictions( - batch=(xb, yb), - raw_preds=raw_preds["detections"], - records=records, - detection_threshold=0.0, - ) - loss = efficientdet.loss_fn(raw_preds, yb) + + preds = self.convert_raw_predictions(xb, yb, raw_preds, records) + + self.compute_loss(raw_preds, yb) self.accumulate_metrics(preds) for k, v in raw_preds.items(): if "loss" in k: - self.log(f"{loss_log_key}{k}", v) + self.log(f"{loss_log_key}_{k}", v) + + def convert_raw_predictions(self, xb, yb, raw_preds, records): + # Note: raw_preds["detections"] key is available only during Pytorch Lightning validation/test step + # Calling the method manually (instead of letting the Trainer call it) will raise an exception. + return efficientdet.convert_raw_predictions( + batch=(xb, yb), + raw_preds=raw_preds["detections"], + records=records, + detection_threshold=0.0, + ) def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self._shared_eval(batch=batch, loss_log_key="test_") + self._shared_eval(batch=batch, loss_log_key="test") def test_epoch_end(self, outs): self.finalize_metrics() diff --git a/tests/models/efficient_det/lightning/test_test.py b/tests/models/efficient_det/lightning/test_test.py index 681f20966..c84c468b7 100644 --- a/tests/models/efficient_det/lightning/test_test.py +++ b/tests/models/efficient_det/lightning/test_test.py @@ -8,6 +8,7 @@ class LightModel(models.ross.efficientdet.lightning.ModelAdapter): def __init__(self, model, metrics): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -15,6 +16,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -37,8 +42,6 @@ def test_lightining_efficientdet_test( trainer.test(light_model, valid_dl) -# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 -@pytest.mark.cuda @pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) def test_lightining_efficientdet_finalizes_metrics_on_test_epoch_end( fridge_efficientdet_model, light_model_cls, metrics @@ -48,3 +51,21 @@ def test_lightining_efficientdet_finalizes_metrics_on_test_epoch_end( light_model.test_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_efficientdet_logs_losses_during_test_step( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls +): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda *args: None + light_model.compute_loss = lambda *args: None + light_model.accumulate_metrics = lambda *args: None + + light_model.test_step(batch, 0) + + assert sorted(light_model.logs.keys()) == sorted( + ["test_loss", "test_box_loss", "test_class_loss"] + ) diff --git a/tests/models/efficient_det/lightning/test_train.py b/tests/models/efficient_det/lightning/test_train.py index 9ccf0402b..260d4303d 100644 --- a/tests/models/efficient_det/lightning/test_train.py +++ b/tests/models/efficient_det/lightning/test_train.py @@ -1,13 +1,23 @@ import pytest +import random from icevision.all import * @pytest.fixture def light_model_cls(): class LightModel(models.ross.efficientdet.lightning.ModelAdapter): + def __init__(self, model, metrics): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -18,8 +28,7 @@ def test_lightining_efficientdet_train( fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls, metrics ): train_dl, valid_dl = fridge_efficientdet_dls - light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) - + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=metrics) trainer = pl.Trainer( max_epochs=1, weights_summary=None, @@ -27,4 +36,39 @@ def test_lightining_efficientdet_train( logger=False, checkpoint_callback=False, ) + trainer.fit(light_model, train_dl, valid_dl) + + +def test_lightining_efficientdet_training_step_returns_loss( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls +): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + expected_loss = random.randint(0, 1000) + + def fake_compute_loss(self, *args): + return expected_loss + + light_model.compute_loss = fake_compute_loss + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +def test_lightining_efficientdet_logs_losses_during_training_step( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls +): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + + light_model.training_step(batch, 0) + + assert sorted(light_model.logs.keys()) == sorted( + ["train_loss", "train_box_loss", "train_class_loss"] + ) diff --git a/tests/models/efficient_det/lightning/test_validate.py b/tests/models/efficient_det/lightning/test_validate.py index 1deb30e00..cab20e458 100644 --- a/tests/models/efficient_det/lightning/test_validate.py +++ b/tests/models/efficient_det/lightning/test_validate.py @@ -7,7 +7,9 @@ def light_model_cls(): class LightModel(models.ross.efficientdet.lightning.ModelAdapter): def __init__(self, model, metrics): super(LightModel, self).__init__(model, metrics) + self.model = model self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -15,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -37,14 +43,31 @@ def test_lightining_efficientdet_validate( trainer.validate(light_model, valid_dl) -# WARNING: Only works with cuda: https://github.com/rwightman/efficientdet-pytorch/issues/44#issuecomment-662594014 -@pytest.mark.cuda @pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) def test_lightining_efficientdet_finalizes_metrics_on_validation_epoch_end( fridge_efficientdet_model, light_model_cls, metrics ): light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + light_model.convert_raw_predictions = lambda *args: None light_model.validation_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_efficientdet_logs_losses_during_validation_step( + fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls +): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda *args: None + light_model.compute_loss = lambda *args: None + light_model.accumulate_metrics = lambda *args: None + + light_model.validation_step(batch, 0) + + assert sorted(light_model.logs.keys()) == sorted( + ["val_loss", "val_box_loss", "val_class_loss"] + ) From f3990a086a71e117a7cc1121ca027f95f38e5f8c Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 10:34:00 -0400 Subject: [PATCH 16/20] feature/1124 Added unit tests for mmdet lightning model adapter --- .../models/mmdet/lightning/model_adapter.py | 6 +- tests/models/mmdet/test_model.py | 123 ++++++++++++++++++ 2 files changed, 126 insertions(+), 3 deletions(-) diff --git a/icevision/models/mmdet/lightning/model_adapter.py b/icevision/models/mmdet/lightning/model_adapter.py index 6329fa9b0..3d32516ee 100644 --- a/icevision/models/mmdet/lightning/model_adapter.py +++ b/icevision/models/mmdet/lightning/model_adapter.py @@ -50,7 +50,7 @@ def training_step(self, batch, batch_idx): return outputs["loss"] def validation_step(self, batch, batch_idx): - self._shared_eval(batch, loss_log_key="val_") + self._shared_eval(batch, loss_log_key="val") def _shared_eval(self, batch, loss_log_key): data, records = batch @@ -66,13 +66,13 @@ def _shared_eval(self, batch, loss_log_key): self.accumulate_metrics(preds) for k, v in outputs["log_vars"].items(): - self.log(f"{loss_log_key}{k}", v) + self.log(f"{loss_log_key}_{k}", v) def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self._shared_eval(batch=batch, loss_log_key="test_") + self._shared_eval(batch=batch, loss_log_key="test") def test_epoch_end(self, outs): self.finalize_metrics() diff --git a/tests/models/mmdet/test_model.py b/tests/models/mmdet/test_model.py index d563baae2..9f7adc16f 100644 --- a/tests/models/mmdet/test_model.py +++ b/tests/models/mmdet/test_model.py @@ -1,4 +1,5 @@ import pytest +import random from icevision.all import * @@ -71,6 +72,62 @@ def configure_optimizers(self): trainer.fit(light_model, train_dl, valid_dl) + def test_mmdet_bbox_models_light_training_step_returns_loss( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + train_dl, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + expected_loss = random.randint(0, 10) + model.train_step = lambda **args: {"log_vars": {}, "loss": expected_loss} + light_model = LitModel(model) + for batch in train_dl: + batch + break + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + def test_mmdet_bbox_models_light_logs_losses_during_training_step( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + train_dl, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value + + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)}, + "loss": random.randint(0, 10), + } + light_model = LitModel(model) + for batch in train_dl: + batch + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"train_{expected_loss_key}"] + def test_mmdet_bbox_models_light_validate( self, ds, model_type, pretrained, cfg_options, samples_source, request ): @@ -93,6 +150,39 @@ def configure_optimizers(self): trainer.validate(light_model, valid_dl) + def test_mmdet_bbox_models_light_logs_losses_during_validation_step( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value + + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)} + } + light_model = LitModel(model) + for batch in valid_dl: + batch + break + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"val_{expected_loss_key}"] + def test_mmdet_bbox_models_light_finalizes_metrics_on_validation_epoch_end( self, ds, model_type, pretrained, cfg_options, samples_source, request ): @@ -139,6 +229,39 @@ def configure_optimizers(self): trainer.test(light_model, valid_dl) + def test_mmdet_bbox_models_light_logs_losses_during_test_step( + self, ds, model_type, pretrained, cfg_options, samples_source, request + ): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value + + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)} + } + light_model = LitModel(model) + for batch in valid_dl: + batch + break + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"test_{expected_loss_key}"] + def test_mmdet_bbox_models_light_finalizes_metrics_on_test_epoch_end( self, ds, model_type, pretrained, cfg_options, samples_source, request ): From 0f97db875ffb8cf6f71d7d1e98a7d3183a5ec8e2 Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 11:07:06 -0400 Subject: [PATCH 17/20] feature/1124 Added unit tests for torchvision lightning model adapter --- .../torchvision/lightning_model_adapter.py | 15 ++++--- .../faster_rcnn/lightning/test_test.py | 20 +++++++++ .../faster_rcnn/lightning/test_train.py | 40 ++++++++++++++++- .../faster_rcnn/lightning/test_validate.py | 20 +++++++++ .../keypoints_rcnn/lightning/test_test.py | 21 +++++++++ .../keypoints_rcnn/lightning/test_train.py | 39 ++++++++++++++++ .../keypoints_rcnn/lightning/test_validate.py | 21 +++++++++ .../retinanet/lightning/test_test.py | 20 +++++++++ .../retinanet/lightning/test_train.py | 45 +++++++++++++------ .../retinanet/lightning/test_validate.py | 20 +++++++++ 10 files changed, 241 insertions(+), 20 deletions(-) diff --git a/icevision/models/torchvision/lightning_model_adapter.py b/icevision/models/torchvision/lightning_model_adapter.py index f8ea31a76..ed8a5f78e 100644 --- a/icevision/models/torchvision/lightning_model_adapter.py +++ b/icevision/models/torchvision/lightning_model_adapter.py @@ -29,20 +29,23 @@ def training_step(self, batch, batch_idx): (xb, yb), records = batch preds = self(xb, yb) - loss = loss_fn(preds, yb) + loss = self.compute_loss(preds, yb) self.log("train_loss", loss) return loss + def compute_loss(self, preds, yb): + return loss_fn(preds, yb) + def validation_step(self, batch, batch_idx): - self._shared_eval(batch, loss_log_key="val_loss") + self._shared_eval(batch, loss_log_key="val") def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch self.train() - train_preds = self(xb, yb) - loss = loss_fn(train_preds, yb) + preds = self(xb, yb) + loss = self.compute_loss(preds, yb) self.eval() raw_preds = self(xb) @@ -51,13 +54,13 @@ def _shared_eval(self, batch, loss_log_key): ) self.accumulate_metrics(preds=preds) - self.log(loss_log_key, loss) + self.log(f"{loss_log_key}_loss", loss) def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self._shared_eval(batch=batch, loss_log_key="test_loss") + self._shared_eval(batch=batch, loss_log_key="test") def test_epoch_end(self, outs): self.finalize_metrics() diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py index 547a44e70..967b5a571 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py @@ -9,6 +9,7 @@ class LightModel(faster_rcnn.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -45,3 +50,18 @@ def test_lightining_faster_rcnn_finalizes_metrics_on_test_epoch_end( light_model.test_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_faster_rcnn_logs_losses_during_test_step( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls +): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py index c07d232c8..73c0a956a 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py @@ -1,4 +1,5 @@ import pytest +import random from icevision.all import * from icevision.models.torchvision import faster_rcnn @@ -6,8 +7,17 @@ @pytest.fixture def light_model_cls(): class LightModel(faster_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + def configure_optimizers(self): - return SGD(self.parameters(), lr=1e-3) + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value return LightModel @@ -27,3 +37,31 @@ def test_lightining_faster_rcnn_train( checkpoint_callback=False, ) trainer.fit(light_model, train_dl, valid_dl) + + +def test_lightining_faster_rcnn_training_step_returns_loss( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls +): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +def test_lightining_faster_rcnn_logs_losses_during_training_step( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls +): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py index f12cf17a2..75da4d28c 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py @@ -9,6 +9,7 @@ class LightModel(faster_rcnn.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -45,3 +50,18 @@ def test_lightining_faster_rcnn_finalizes_metrics_on_validation_epoch_end( light_model.validation_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_faster_rcnn_logs_losses_during_validation_step( + fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls +): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py index 17cd154fb..fde3898f0 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py @@ -9,6 +9,7 @@ class LightModel(keypoint_rcnn.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -41,3 +46,19 @@ def test_lightining_keypoints_finalizes_metrics_on_test_epoch_end(light_model_cl light_model.test_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_keypoints_rcnn_logs_losses_during_test_step( + ochuman_keypoints_dls, light_model_cls +): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py index 33b3809c7..5f57d6778 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py @@ -6,9 +6,18 @@ @pytest.fixture def light_model_cls(): class LightModel(keypoint_rcnn.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + def configure_optimizers(self): return SGD(self.parameters(), lr=1e-4) + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -25,3 +34,33 @@ def test_lightining_keypoints_rcnn_train(ochuman_keypoints_dls, light_model_cls) checkpoint_callback=False, ) trainer.fit(light_model, train_dl, valid_dl) + + +def test_lightining_keypoints_rcnn_training_step_returns_loss( + ochuman_keypoints_dls, light_model_cls +): + train_dl, _ = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +def test_lightining_keypoints_rcnn_logs_losses_during_training_step( + ochuman_keypoints_dls, light_model_cls +): + train_dl, _ = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in train_dl: + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py index 637de9256..e8db4d6ec 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py @@ -9,6 +9,7 @@ class LightModel(keypoint_rcnn.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -43,3 +48,19 @@ def test_lightining_keypoints_finalizes_metrics_on_validation_epoch_end( light_model.validation_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_keypoints_rcnn_logs_losses_during_validation_step( + ochuman_keypoints_dls, light_model_cls +): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_test.py b/tests/models/torchvision_models/retinanet/lightning/test_test.py index 7a7e2f1cf..689b968bc 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_test.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_test.py @@ -9,6 +9,7 @@ class LightModel(retinanet.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -45,3 +50,18 @@ def test_lightining_retinanet_finalizes_metrics_on_test_epoch_end( light_model.test_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_retinanet_logs_losses_during_test_step( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_train.py b/tests/models/torchvision_models/retinanet/lightning/test_train.py index 95fd4a7d8..cc7c0eafa 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_train.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_train.py @@ -6,9 +6,17 @@ @pytest.fixture def light_model_cls(): class LightModel(retinanet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.logs = {} + def configure_optimizers(self): return SGD(self.parameters(), lr=1e-4) + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -29,18 +37,29 @@ def test_lightining_retinanet_train( trainer.fit(light_model, train_dl, valid_dl) -@pytest.mark.parametrize("metrics", [[], [COCOMetric()]]) -def test_lightining_retinanet_test( - fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls, metrics +def test_lightining_retinanet_training_step_returns_loss( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls ): - _, valid_dl = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break - trainer = pl.Trainer( - max_epochs=1, - weights_summary=None, - num_sanity_val_steps=0, - logger=False, - checkpoint_callback=False, - ) - trainer.test(light_model, valid_dl) + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +def test_lightining_retinanet_logs_losses_during_training_step( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls +): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in train_dl: + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_validate.py b/tests/models/torchvision_models/retinanet/lightning/test_validate.py index a3760549a..a1710a4a3 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_validate.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_validate.py @@ -9,6 +9,7 @@ class LightModel(retinanet.lightning.ModelAdapter): def __init__(self, model, metrics=None): super(LightModel, self).__init__(model, metrics) self.was_finalize_metrics_called = False + self.logs = {} def configure_optimizers(self): return SGD(self.parameters(), lr=1e-3) @@ -16,6 +17,10 @@ def configure_optimizers(self): def finalize_metrics(self): self.was_finalize_metrics_called = True + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + return LightModel @@ -45,3 +50,18 @@ def test_lightining_retinanet_finalizes_metrics_on_validation_epoch_end( light_model.validation_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +def test_lightining_retinanet_logs_losses_during_validation_step( + fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls +): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] From 7cc8d88bc9f848dc67146bf18491fe4a9ecdacd5 Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 11:56:38 -0400 Subject: [PATCH 18/20] feature/1124 Added unit tests for yolov5 lightning model adapter --- .../yolov5/lightning/model_adapter.py | 19 ++++-- .../ultralytics/yolov5/lightning/test_test.py | 41 ++++++++++- .../yolov5/lightning/test_train.py | 68 +++++++++++++++++++ .../yolov5/lightning/test_validate.py | 39 +++++++++++ 4 files changed, 162 insertions(+), 5 deletions(-) diff --git a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py index 59cdb0abb..58184515a 100644 --- a/icevision/models/ultralytics/yolov5/lightning/model_adapter.py +++ b/icevision/models/ultralytics/yolov5/lightning/model_adapter.py @@ -40,13 +40,13 @@ def training_step(self, batch, batch_idx): return loss def validation_step(self, batch, batch_idx): - self._shared_eval(batch, loss_log_key="val_loss") + self._shared_eval(batch, loss_log_key="val") def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch inference_out, training_out = self(xb) - preds = yolov5.convert_raw_predictions( + preds = self.convert_raw_predictions( batch=xb, raw_preds=inference_out, records=records, @@ -57,13 +57,24 @@ def _shared_eval(self, batch, loss_log_key): self.accumulate_metrics(preds) - self.log(loss_log_key, loss) + self.log(f"{loss_log_key}_loss", loss) + + def convert_raw_predictions( + self, batch, raw_preds, records, detection_threshold, nms_iou_threshold + ): + return yolov5.convert_raw_predictions( + batch=batch, + raw_preds=raw_preds, + records=records, + detection_threshold=detection_threshold, + nms_iou_threshold=nms_iou_threshold, + ) def validation_epoch_end(self, outs): self.finalize_metrics() def test_step(self, batch, batch_idx): - self._shared_eval(batch=batch, loss_log_key="test_loss") + self._shared_eval(batch=batch, loss_log_key="test") def test_epoch_end(self, outs): self.finalize_metrics() diff --git a/tests/models/ultralytics/yolov5/lightning/test_test.py b/tests/models/ultralytics/yolov5/lightning/test_test.py index 2d091d1e6..30831f3bd 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_test.py +++ b/tests/models/ultralytics/yolov5/lightning/test_test.py @@ -41,7 +41,7 @@ def test_lightning_yolo_test(fridge_ds, backbone, light_model_cls): @pytest.mark.parametrize( "backbone", - [small, medium, large, extra_large], + [small, medium], ) def test_lightning_yolo_finalizes_metrics_on_test_epoch_end(backbone, light_model_cls): model = models.ultralytics.yolov5.model( @@ -53,3 +53,42 @@ def test_lightning_yolo_finalizes_metrics_on_test_epoch_end(backbone, light_mode light_model.test_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +@pytest.mark.parametrize( + "backbone", + [small, medium], +) +def test_lightning_yolo_logs_losses_during_test_step(fridge_ds, backbone): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.eval() + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in valid_dl: + batch + break + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"test_loss"] diff --git a/tests/models/ultralytics/yolov5/lightning/test_train.py b/tests/models/ultralytics/yolov5/lightning/test_train.py index 28c6fe835..9043bf7ef 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_train.py +++ b/tests/models/ultralytics/yolov5/lightning/test_train.py @@ -30,3 +30,71 @@ def configure_optimizers(self): gpus = 1 if torch.cuda.is_available() else 0 trainer = pl.Trainer(max_epochs=1, gpus=gpus) trainer.fit(light_model, train_dl, valid_dl) + + +@pytest.mark.parametrize( + "backbone", + [small, medium], +) +def test_lightning_yolo_training_step_returns_loss(fridge_ds, backbone): + train_ds, _ = fridge_ds + train_dl = models.ultralytics.yolov5.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + light_model = LightModel(model) + light_model.to("cpu") + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: [expected_loss] + for batch in train_dl: + batch + break + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +@pytest.mark.parametrize( + "backbone", + [small, medium], +) +def test_lightning_yolo_logs_losses_during_training_step(fridge_ds, backbone): + train_ds, _ = fridge_ds + train_dl = models.ultralytics.yolov5.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in train_dl: + batch + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"train_loss"] diff --git a/tests/models/ultralytics/yolov5/lightning/test_validate.py b/tests/models/ultralytics/yolov5/lightning/test_validate.py index ee27bcc9b..0badb13d0 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_validate.py +++ b/tests/models/ultralytics/yolov5/lightning/test_validate.py @@ -55,3 +55,42 @@ def test_lightning_yolo_finalizes_metrics_on_validation_epoch_end( light_model.validation_epoch_end(None) assert light_model.was_finalize_metrics_called == True + + +@pytest.mark.parametrize( + "backbone", + [small, medium], +) +def test_lightning_yolo_logs_losses_during_validation_step(fridge_ds, backbone): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.eval() + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in valid_dl: + batch + break + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"val_loss"] From 8f39eff5aa59eac0e4f4e433590402721d8268b0 Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 12:26:29 -0400 Subject: [PATCH 19/20] feature/1124 Added unit tests for fastai unet lightning model adapter --- icevision/models/fastai/unet/lightning.py | 38 +++++++++--- .../models/fastai/unet/lightning/test_test.py | 61 +++++++++++++++++++ .../fastai/unet/lightning/test_train.py | 57 +++++++++++++++++ .../fastai/unet/lightning/test_validate.py | 61 +++++++++++++++++++ 4 files changed, 207 insertions(+), 10 deletions(-) create mode 100644 tests/models/fastai/unet/lightning/test_test.py create mode 100644 tests/models/fastai/unet/lightning/test_validate.py diff --git a/icevision/models/fastai/unet/lightning.py b/icevision/models/fastai/unet/lightning.py index 8506746d7..2d773ace3 100644 --- a/icevision/models/fastai/unet/lightning.py +++ b/icevision/models/fastai/unet/lightning.py @@ -33,28 +33,46 @@ def training_step(self, batch, batch_idx): (xb, yb), _ = batch preds = self(xb) - loss = self.loss_func(preds, yb) + loss = self.compute_loss(preds, yb) self.log("train_loss", loss) return loss + def compute_loss(self, preds, yb): + return self.loss_func(preds, yb) + def validation_step(self, batch, batch_idx): + self._shared_eval(batch=batch, loss_log_key="val") + + def _shared_eval(self, batch, loss_log_key): (xb, yb), records = batch - with torch.no_grad(): - preds = self(xb) - loss = self.loss_func(preds, yb) + preds = self(xb) + loss = self.compute_loss(preds, yb) - preds = unet.convert_raw_predictions( - batch=xb, - raw_preds=preds, - records=records, - ) + preds = self.convert_raw_predictions( + batch=xb, + raw_preds=preds, + records=records, + ) self.accumulate_metrics(preds) - self.log("val_loss", loss) + self.log(f"{loss_log_key}_loss", loss) + + def convert_raw_predictions(self, batch, raw_preds, records): + return unet.convert_raw_predictions( + batch=batch, + raw_preds=raw_preds, + records=records, + ) def validation_epoch_end(self, outs): self.finalize_metrics() + + def test_step(self, batch, batch_idx): + self._shared_eval(batch=batch, loss_log_key="test") + + def test_epoch_end(self, outs): + self.finalize_metrics() diff --git a/tests/models/fastai/unet/lightning/test_test.py b/tests/models/fastai/unet/lightning/test_test.py new file mode 100644 index 000000000..758e5cc59 --- /dev/null +++ b/tests/models/fastai/unet/lightning/test_test.py @@ -0,0 +1,61 @@ +import pytest +from icevision.all import * +from icevision.models.fastai.unet.backbones import * + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_test(camvid_ds, backbone): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.valid_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + light_model = LightModel(model) + + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + trainer.test(light_model, valid_dl) + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_logs_losses_during_test_step(camvid_ds, backbone): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in valid_dl: + break + light_model = LightModel(model) + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/fastai/unet/lightning/test_train.py b/tests/models/fastai/unet/lightning/test_train.py index 24dfa2529..91a32625b 100644 --- a/tests/models/fastai/unet/lightning/test_train.py +++ b/tests/models/fastai/unet/lightning/test_train.py @@ -30,3 +30,60 @@ def configure_optimizers(self): checkpoint_callback=False, ) trainer.fit(light_model, train_dl, valid_dl) + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_training_step_returns_loss(camvid_ds, backbone): + train_ds, _ = camvid_ds + train_dl = models.fastai.unet.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + for batch in train_dl: + break + expected_loss = random.randint(0, 1000) + light_model = LightModel(model) + light_model.compute_loss = lambda *args: expected_loss + + loss = light_model.training_step(batch, 0) + + assert loss == expected_loss + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_logs_losses_during_training_step(camvid_ds, backbone): + train_ds, _ = camvid_ds + train_dl = models.fastai.unet.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in train_dl: + break + light_model = LightModel(model) + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/fastai/unet/lightning/test_validate.py b/tests/models/fastai/unet/lightning/test_validate.py new file mode 100644 index 000000000..1303d2600 --- /dev/null +++ b/tests/models/fastai/unet/lightning/test_validate.py @@ -0,0 +1,61 @@ +import pytest +from icevision.all import * +from icevision.models.fastai.unet.backbones import * + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_validate(camvid_ds, backbone): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.valid_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + light_model = LightModel(model) + + trainer = pl.Trainer( + max_epochs=1, + weights_summary=None, + num_sanity_val_steps=0, + logger=False, + checkpoint_callback=False, + ) + trainer.validate(light_model, valid_dl) + + +@pytest.mark.parametrize("backbone", [resnet18]) +def test_fastai_unet_logs_losses_during_validation_step(camvid_ds, backbone): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in valid_dl: + break + light_model = LightModel(model) + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] From 3ee0e5477174f989e673cb798100714cb701dfc5 Mon Sep 17 00:00:00 2001 From: brown Date: Fri, 22 Jul 2022 12:37:19 -0400 Subject: [PATCH 20/20] feature/1124 Fixed memory consumption in PL tests --- .../efficient_det/lightning/test_test.py | 34 +-- .../efficient_det/lightning/test_train.py | 38 +-- .../efficient_det/lightning/test_validate.py | 36 +-- .../models/fastai/unet/lightning/test_test.py | 57 +++-- .../fastai/unet/lightning/test_train.py | 92 +++---- .../fastai/unet/lightning/test_validate.py | 57 +++-- tests/models/mmdet/test_model.py | 242 +++++++++--------- .../faster_rcnn/lightning/test_test.py | 24 +- .../faster_rcnn/lightning/test_train.py | 30 ++- .../faster_rcnn/lightning/test_validate.py | 24 +- .../keypoints_rcnn/lightning/test_test.py | 32 +-- .../keypoints_rcnn/lightning/test_train.py | 34 +-- .../keypoints_rcnn/lightning/test_validate.py | 32 +-- .../retinanet/lightning/test_test.py | 24 +- .../retinanet/lightning/test_train.py | 30 ++- .../retinanet/lightning/test_validate.py | 24 +- .../ultralytics/yolov5/lightning/test_test.py | 80 +++--- .../yolov5/lightning/test_train.py | 102 ++++---- .../yolov5/lightning/test_validate.py | 80 +++--- 19 files changed, 556 insertions(+), 516 deletions(-) diff --git a/tests/models/efficient_det/lightning/test_test.py b/tests/models/efficient_det/lightning/test_test.py index c84c468b7..de0533841 100644 --- a/tests/models/efficient_det/lightning/test_test.py +++ b/tests/models/efficient_det/lightning/test_test.py @@ -46,26 +46,28 @@ def test_lightining_efficientdet_test( def test_lightining_efficientdet_finalizes_metrics_on_test_epoch_end( fridge_efficientdet_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_efficientdet_logs_losses_during_test_step( fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls ): - train_dl, _ = fridge_efficientdet_dls - light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) - for batch in train_dl: - break - light_model.convert_raw_predictions = lambda *args: None - light_model.compute_loss = lambda *args: None - light_model.accumulate_metrics = lambda *args: None - - light_model.test_step(batch, 0) - - assert sorted(light_model.logs.keys()) == sorted( - ["test_loss", "test_box_loss", "test_class_loss"] - ) + with torch.set_grad_enabled(False): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda *args: None + light_model.compute_loss = lambda *args: None + light_model.accumulate_metrics = lambda *args: None + + light_model.test_step(batch, 0) + + assert sorted(light_model.logs.keys()) == sorted( + ["test_loss", "test_box_loss", "test_class_loss"] + ) diff --git a/tests/models/efficient_det/lightning/test_train.py b/tests/models/efficient_det/lightning/test_train.py index 260d4303d..53158c033 100644 --- a/tests/models/efficient_det/lightning/test_train.py +++ b/tests/models/efficient_det/lightning/test_train.py @@ -43,32 +43,34 @@ def test_lightining_efficientdet_train( def test_lightining_efficientdet_training_step_returns_loss( fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls ): - train_dl, _ = fridge_efficientdet_dls - light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) - for batch in train_dl: - break - expected_loss = random.randint(0, 1000) + with torch.set_grad_enabled(True): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + expected_loss = random.randint(0, 1000) - def fake_compute_loss(self, *args): - return expected_loss + def fake_compute_loss(self, *args): + return expected_loss - light_model.compute_loss = fake_compute_loss + light_model.compute_loss = fake_compute_loss - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss def test_lightining_efficientdet_logs_losses_during_training_step( fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls ): - train_dl, _ = fridge_efficientdet_dls - light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break - light_model.training_step(batch, 0) + light_model.training_step(batch, 0) - assert sorted(light_model.logs.keys()) == sorted( - ["train_loss", "train_box_loss", "train_class_loss"] - ) + assert sorted(light_model.logs.keys()) == sorted( + ["train_loss", "train_box_loss", "train_class_loss"] + ) diff --git a/tests/models/efficient_det/lightning/test_validate.py b/tests/models/efficient_det/lightning/test_validate.py index cab20e458..975bf55e1 100644 --- a/tests/models/efficient_det/lightning/test_validate.py +++ b/tests/models/efficient_det/lightning/test_validate.py @@ -47,27 +47,29 @@ def test_lightining_efficientdet_validate( def test_lightining_efficientdet_finalizes_metrics_on_validation_epoch_end( fridge_efficientdet_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) - light_model.convert_raw_predictions = lambda *args: None + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_efficientdet_model, metrics=metrics) + light_model.convert_raw_predictions = lambda *args: None - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_efficientdet_logs_losses_during_validation_step( fridge_efficientdet_dls, fridge_efficientdet_model, light_model_cls ): - train_dl, _ = fridge_efficientdet_dls - light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) - for batch in train_dl: - break - light_model.convert_raw_predictions = lambda *args: None - light_model.compute_loss = lambda *args: None - light_model.accumulate_metrics = lambda *args: None - - light_model.validation_step(batch, 0) - - assert sorted(light_model.logs.keys()) == sorted( - ["val_loss", "val_box_loss", "val_class_loss"] - ) + with torch.set_grad_enabled(False): + train_dl, _ = fridge_efficientdet_dls + light_model = light_model_cls(model=fridge_efficientdet_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda *args: None + light_model.compute_loss = lambda *args: None + light_model.accumulate_metrics = lambda *args: None + + light_model.validation_step(batch, 0) + + assert sorted(light_model.logs.keys()) == sorted( + ["val_loss", "val_box_loss", "val_class_loss"] + ) diff --git a/tests/models/fastai/unet/lightning/test_test.py b/tests/models/fastai/unet/lightning/test_test.py index 758e5cc59..78b53cfa5 100644 --- a/tests/models/fastai/unet/lightning/test_test.py +++ b/tests/models/fastai/unet/lightning/test_test.py @@ -31,31 +31,32 @@ def configure_optimizers(self): @pytest.mark.parametrize("backbone", [resnet18]) def test_fastai_unet_logs_losses_during_test_step(camvid_ds, backbone): - _, valid_ds = camvid_ds - valid_dl = models.fastai.unet.train_dl( - valid_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.fastai.unet.model( - num_classes=32, img_size=64, backbone=backbone(pretrained=False) - ) - - class LightModel(models.fastai.unet.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - for batch in valid_dl: - break - light_model = LightModel(model) - - light_model.test_step(batch, 0) - - assert list(light_model.logs.keys()) == ["test_loss"] + with torch.set_grad_enabled(False): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in valid_dl: + break + light_model = LightModel(model) + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/fastai/unet/lightning/test_train.py b/tests/models/fastai/unet/lightning/test_train.py index 91a32625b..d8e626fd3 100644 --- a/tests/models/fastai/unet/lightning/test_train.py +++ b/tests/models/fastai/unet/lightning/test_train.py @@ -34,56 +34,58 @@ def configure_optimizers(self): @pytest.mark.parametrize("backbone", [resnet18]) def test_fastai_unet_training_step_returns_loss(camvid_ds, backbone): - train_ds, _ = camvid_ds - train_dl = models.fastai.unet.train_dl( - train_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.fastai.unet.model( - num_classes=32, img_size=64, backbone=backbone(pretrained=False) - ) + with torch.set_grad_enabled(True): + train_ds, _ = camvid_ds + train_dl = models.fastai.unet.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) - class LightModel(models.fastai.unet.lightning.ModelAdapter): - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - for batch in train_dl: - break - expected_loss = random.randint(0, 1000) - light_model = LightModel(model) - light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break + expected_loss = random.randint(0, 1000) + light_model = LightModel(model) + light_model.compute_loss = lambda *args: expected_loss - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss @pytest.mark.parametrize("backbone", [resnet18]) def test_fastai_unet_logs_losses_during_training_step(camvid_ds, backbone): - train_ds, _ = camvid_ds - train_dl = models.fastai.unet.train_dl( - train_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.fastai.unet.model( - num_classes=32, img_size=64, backbone=backbone(pretrained=False) - ) - - class LightModel(models.fastai.unet.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - for batch in train_dl: - break - light_model = LightModel(model) - - light_model.training_step(batch, 0) - - assert list(light_model.logs.keys()) == ["train_loss"] + with torch.set_grad_enabled(True): + train_ds, _ = camvid_ds + train_dl = models.fastai.unet.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in train_dl: + break + light_model = LightModel(model) + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/fastai/unet/lightning/test_validate.py b/tests/models/fastai/unet/lightning/test_validate.py index 1303d2600..9881b9221 100644 --- a/tests/models/fastai/unet/lightning/test_validate.py +++ b/tests/models/fastai/unet/lightning/test_validate.py @@ -31,31 +31,32 @@ def configure_optimizers(self): @pytest.mark.parametrize("backbone", [resnet18]) def test_fastai_unet_logs_losses_during_validation_step(camvid_ds, backbone): - _, valid_ds = camvid_ds - valid_dl = models.fastai.unet.train_dl( - valid_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.fastai.unet.model( - num_classes=32, img_size=64, backbone=backbone(pretrained=False) - ) - - class LightModel(models.fastai.unet.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - for batch in valid_dl: - break - light_model = LightModel(model) - - light_model.validation_step(batch, 0) - - assert list(light_model.logs.keys()) == ["val_loss"] + with torch.set_grad_enabled(False): + _, valid_ds = camvid_ds + valid_dl = models.fastai.unet.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.fastai.unet.model( + num_classes=32, img_size=64, backbone=backbone(pretrained=False) + ) + + class LightModel(models.fastai.unet.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + for batch in valid_dl: + break + light_model = LightModel(model) + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/mmdet/test_model.py b/tests/models/mmdet/test_model.py index 9f7adc16f..00dd497f5 100644 --- a/tests/models/mmdet/test_model.py +++ b/tests/models/mmdet/test_model.py @@ -75,58 +75,60 @@ def configure_optimizers(self): def test_mmdet_bbox_models_light_training_step_returns_loss( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - train_dl, _, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) + with torch.set_grad_enabled(True): + train_dl, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) - class LitModel(model_type.lightning.ModelAdapter): - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + class LitModel(model_type.lightning.ModelAdapter): + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - expected_loss = random.randint(0, 10) - model.train_step = lambda **args: {"log_vars": {}, "loss": expected_loss} - light_model = LitModel(model) - for batch in train_dl: - batch - break + expected_loss = random.randint(0, 10) + model.train_step = lambda **args: {"log_vars": {}, "loss": expected_loss} + light_model = LitModel(model) + for batch in train_dl: + batch + break - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss def test_mmdet_bbox_models_light_logs_losses_during_training_step( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - train_dl, _, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) - - class LitModel(model_type.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LitModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LitModel, self).log(key, value, **args) - self.logs[key] = value - - expected_loss_key = "my_loss_key" - model.train_step = lambda **args: { - "log_vars": {expected_loss_key: random.randint(0, 10)}, - "loss": random.randint(0, 10), - } - light_model = LitModel(model) - for batch in train_dl: - batch - break - - light_model.training_step(batch, 0) - - assert list(light_model.logs.keys()) == [f"train_{expected_loss_key}"] + with torch.set_grad_enabled(True): + train_dl, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) + + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value + + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)}, + "loss": random.randint(0, 10), + } + light_model = LitModel(model) + for batch in train_dl: + batch + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"train_{expected_loss_key}"] def test_mmdet_bbox_models_light_validate( self, ds, model_type, pretrained, cfg_options, samples_source, request @@ -153,59 +155,61 @@ def configure_optimizers(self): def test_mmdet_bbox_models_light_logs_losses_during_validation_step( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - _, valid_dl, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) + with torch.set_grad_enabled(False): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) - class LitModel(model_type.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LitModel, self).__init__(model, metrics) - self.model = model - self.logs = {} + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - def log(self, key, value, **args): - super(LitModel, self).log(key, value, **args) - self.logs[key] = value + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value - expected_loss_key = "my_loss_key" - model.train_step = lambda **args: { - "log_vars": {expected_loss_key: random.randint(0, 10)} - } - light_model = LitModel(model) - for batch in valid_dl: - batch - break + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)} + } + light_model = LitModel(model) + for batch in valid_dl: + batch + break - light_model.validation_step(batch, 0) + light_model.validation_step(batch, 0) - assert list(light_model.logs.keys()) == [f"val_{expected_loss_key}"] + assert list(light_model.logs.keys()) == [f"val_{expected_loss_key}"] def test_mmdet_bbox_models_light_finalizes_metrics_on_validation_epoch_end( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - _, _, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) + with torch.set_grad_enabled(False): + _, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) - class LitModel(model_type.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LitModel, self).__init__(model, metrics) - self.was_finalize_metrics_called = False + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - def finalize_metrics(self): - self.was_finalize_metrics_called = True + def finalize_metrics(self): + self.was_finalize_metrics_called = True - light_model = LitModel(model) + light_model = LitModel(model) - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_mmdet_bbox_models_light_test( self, ds, model_type, pretrained, cfg_options, samples_source, request @@ -232,59 +236,61 @@ def configure_optimizers(self): def test_mmdet_bbox_models_light_logs_losses_during_test_step( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - _, valid_dl, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) + with torch.set_grad_enabled(False): + _, valid_dl, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) - class LitModel(model_type.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LitModel, self).__init__(model, metrics) - self.model = model - self.logs = {} + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.model = model + self.logs = {} - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - def log(self, key, value, **args): - super(LitModel, self).log(key, value, **args) - self.logs[key] = value + def log(self, key, value, **args): + super(LitModel, self).log(key, value, **args) + self.logs[key] = value - expected_loss_key = "my_loss_key" - model.train_step = lambda **args: { - "log_vars": {expected_loss_key: random.randint(0, 10)} - } - light_model = LitModel(model) - for batch in valid_dl: - batch - break + expected_loss_key = "my_loss_key" + model.train_step = lambda **args: { + "log_vars": {expected_loss_key: random.randint(0, 10)} + } + light_model = LitModel(model) + for batch in valid_dl: + batch + break - light_model.test_step(batch, 0) + light_model.test_step(batch, 0) - assert list(light_model.logs.keys()) == [f"test_{expected_loss_key}"] + assert list(light_model.logs.keys()) == [f"test_{expected_loss_key}"] def test_mmdet_bbox_models_light_finalizes_metrics_on_test_epoch_end( self, ds, model_type, pretrained, cfg_options, samples_source, request ): - _, _, model = self.dls_model( - ds, model_type, pretrained, cfg_options, samples_source, request - ) + with torch.set_grad_enabled(False): + _, _, model = self.dls_model( + ds, model_type, pretrained, cfg_options, samples_source, request + ) - class LitModel(model_type.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LitModel, self).__init__(model, metrics) - self.was_finalize_metrics_called = False + class LitModel(model_type.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LitModel, self).__init__(model, metrics) + self.was_finalize_metrics_called = False - def configure_optimizers(self): - return Adam(self.parameters(), lr=1e-4) + def configure_optimizers(self): + return Adam(self.parameters(), lr=1e-4) - def finalize_metrics(self): - self.was_finalize_metrics_called = True + def finalize_metrics(self): + self.was_finalize_metrics_called = True - light_model = LitModel(model) + light_model = LitModel(model) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True @pytest.fixture() diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py index 967b5a571..08c5d0f19 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_test.py @@ -45,23 +45,25 @@ def test_lightining_faster_rcnn_test( def test_lightining_faster_rcnn_finalizes_metrics_on_test_epoch_end( fridge_faster_rcnn_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_faster_rcnn_logs_losses_during_test_step( fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) - for batch in train_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None + with torch.set_grad_enabled(False): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None - light_model.test_step(batch, 0) + light_model.test_step(batch, 0) - assert list(light_model.logs.keys()) == ["test_loss"] + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py index 73c0a956a..77a214ace 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_train.py @@ -42,26 +42,28 @@ def test_lightining_faster_rcnn_train( def test_lightining_faster_rcnn_training_step_returns_loss( fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) - expected_loss = random.randint(0, 10) - light_model.compute_loss = lambda *args: expected_loss - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss def test_lightining_faster_rcnn_logs_losses_during_training_step( fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break - light_model.training_step(batch, 0) + light_model.training_step(batch, 0) - assert list(light_model.logs.keys()) == ["train_loss"] + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py index 75da4d28c..0f6ea0e9f 100644 --- a/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py +++ b/tests/models/torchvision_models/faster_rcnn/lightning/test_validate.py @@ -45,23 +45,25 @@ def test_lightining_faster_rcnn_validate( def test_lightining_faster_rcnn_finalizes_metrics_on_validation_epoch_end( fridge_faster_rcnn_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=metrics) - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_faster_rcnn_logs_losses_during_validation_step( fridge_faster_rcnn_dls, fridge_faster_rcnn_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) - for batch in train_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None + with torch.set_grad_enabled(False): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_faster_rcnn_model, metrics=None) + for batch in train_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None - light_model.validation_step(batch, 0) + light_model.validation_step(batch, 0) - assert list(light_model.logs.keys()) == ["val_loss"] + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py index fde3898f0..9775519bb 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_test.py @@ -40,25 +40,27 @@ def test_lightining_keypoints_rcnn_test(ochuman_keypoints_dls, light_model_cls): def test_lightining_keypoints_finalizes_metrics_on_test_epoch_end(light_model_cls): - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) + with torch.set_grad_enabled(False): + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_keypoints_rcnn_logs_losses_during_test_step( ochuman_keypoints_dls, light_model_cls ): - _, valid_dl = ochuman_keypoints_dls - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) - for batch in valid_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None - - light_model.test_step(batch, 0) - - assert list(light_model.logs.keys()) == ["test_loss"] + with torch.set_grad_enabled(False): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py index 5f57d6778..c89189c78 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_train.py @@ -39,28 +39,30 @@ def test_lightining_keypoints_rcnn_train(ochuman_keypoints_dls, light_model_cls) def test_lightining_keypoints_rcnn_training_step_returns_loss( ochuman_keypoints_dls, light_model_cls ): - train_dl, _ = ochuman_keypoints_dls - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) - expected_loss = random.randint(0, 10) - light_model.compute_loss = lambda *args: expected_loss - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss def test_lightining_keypoints_rcnn_logs_losses_during_training_step( ochuman_keypoints_dls, light_model_cls ): - train_dl, _ = ochuman_keypoints_dls - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in train_dl: + break - light_model.training_step(batch, 0) + light_model.training_step(batch, 0) - assert list(light_model.logs.keys()) == ["train_loss"] + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py index e8db4d6ec..b9a6bdef5 100644 --- a/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py +++ b/tests/models/torchvision_models/keypoints_rcnn/lightning/test_validate.py @@ -42,25 +42,27 @@ def test_lightining_keypoints_rcnn_validate(ochuman_keypoints_dls, light_model_c def test_lightining_keypoints_finalizes_metrics_on_validation_epoch_end( light_model_cls, ): - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) + with torch.set_grad_enabled(False): + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_keypoints_rcnn_logs_losses_during_validation_step( ochuman_keypoints_dls, light_model_cls ): - _, valid_dl = ochuman_keypoints_dls - model = keypoint_rcnn.model(num_keypoints=19) - light_model = light_model_cls(model) - for batch in valid_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None - - light_model.validation_step(batch, 0) - - assert list(light_model.logs.keys()) == ["val_loss"] + with torch.set_grad_enabled(False): + _, valid_dl = ochuman_keypoints_dls + model = keypoint_rcnn.model(num_keypoints=19) + light_model = light_model_cls(model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_test.py b/tests/models/torchvision_models/retinanet/lightning/test_test.py index 689b968bc..e018c35aa 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_test.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_test.py @@ -45,23 +45,25 @@ def test_lightining_retinanet_test( def test_lightining_retinanet_finalizes_metrics_on_test_epoch_end( fridge_retinanet_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_retinanet_logs_losses_during_test_step( fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls ): - _, valid_dl = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_retinanet_model) - for batch in valid_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None + with torch.set_grad_enabled(False): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None - light_model.test_step(batch, 0) + light_model.test_step(batch, 0) - assert list(light_model.logs.keys()) == ["test_loss"] + assert list(light_model.logs.keys()) == ["test_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_train.py b/tests/models/torchvision_models/retinanet/lightning/test_train.py index cc7c0eafa..8fe547bce 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_train.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_train.py @@ -40,26 +40,28 @@ def test_lightining_retinanet_train( def test_lightining_retinanet_training_step_returns_loss( fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_retinanet_model) - expected_loss = random.randint(0, 10) - light_model.compute_loss = lambda *args: expected_loss - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: expected_loss + for batch in train_dl: + break - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss def test_lightining_retinanet_logs_losses_during_training_step( fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls ): - train_dl, _ = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_retinanet_model) - for batch in train_dl: - break + with torch.set_grad_enabled(True): + train_dl, _ = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in train_dl: + break - light_model.training_step(batch, 0) + light_model.training_step(batch, 0) - assert list(light_model.logs.keys()) == ["train_loss"] + assert list(light_model.logs.keys()) == ["train_loss"] diff --git a/tests/models/torchvision_models/retinanet/lightning/test_validate.py b/tests/models/torchvision_models/retinanet/lightning/test_validate.py index a1710a4a3..8d787abaa 100644 --- a/tests/models/torchvision_models/retinanet/lightning/test_validate.py +++ b/tests/models/torchvision_models/retinanet/lightning/test_validate.py @@ -45,23 +45,25 @@ def test_lightining_retinanet_validate( def test_lightining_retinanet_finalizes_metrics_on_validation_epoch_end( fridge_retinanet_model, light_model_cls, metrics ): - light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) + with torch.set_grad_enabled(False): + light_model = light_model_cls(fridge_retinanet_model, metrics=metrics) - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True def test_lightining_retinanet_logs_losses_during_validation_step( fridge_faster_rcnn_dls, fridge_retinanet_model, light_model_cls ): - _, valid_dl = fridge_faster_rcnn_dls - light_model = light_model_cls(fridge_retinanet_model) - for batch in valid_dl: - break - light_model.convert_raw_predictions = lambda **args: None - light_model.accumulate_metrics = lambda **args: None + with torch.set_grad_enabled(False): + _, valid_dl = fridge_faster_rcnn_dls + light_model = light_model_cls(fridge_retinanet_model) + for batch in valid_dl: + break + light_model.convert_raw_predictions = lambda **args: None + light_model.accumulate_metrics = lambda **args: None - light_model.validation_step(batch, 0) + light_model.validation_step(batch, 0) - assert list(light_model.logs.keys()) == ["val_loss"] + assert list(light_model.logs.keys()) == ["val_loss"] diff --git a/tests/models/ultralytics/yolov5/lightning/test_test.py b/tests/models/ultralytics/yolov5/lightning/test_test.py index 30831f3bd..03eb693c1 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_test.py +++ b/tests/models/ultralytics/yolov5/lightning/test_test.py @@ -44,15 +44,16 @@ def test_lightning_yolo_test(fridge_ds, backbone, light_model_cls): [small, medium], ) def test_lightning_yolo_finalizes_metrics_on_test_epoch_end(backbone, light_model_cls): - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=True) - ) - metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] - light_model = light_model_cls(model, metrics=metrics) + with torch.set_grad_enabled(False): + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) - light_model.test_epoch_end(None) + light_model.test_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True @pytest.mark.parametrize( @@ -60,35 +61,36 @@ def test_lightning_yolo_finalizes_metrics_on_test_epoch_end(backbone, light_mode [small, medium], ) def test_lightning_yolo_logs_losses_during_test_step(fridge_ds, backbone): - _, valid_ds = fridge_ds - valid_dl = models.ultralytics.yolov5.train_dl( - valid_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=False) - ) - - class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return SGD(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - light_model = LightModel(model) - light_model.to("cpu") - light_model.eval() - light_model.compute_loss = lambda *args: [random.randint(0, 10)] - for batch in valid_dl: - batch - break - - light_model.test_step(batch, 0) - - assert list(light_model.logs.keys()) == [f"test_loss"] + with torch.set_grad_enabled(False): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.eval() + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in valid_dl: + batch + break + + light_model.test_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"test_loss"] diff --git a/tests/models/ultralytics/yolov5/lightning/test_train.py b/tests/models/ultralytics/yolov5/lightning/test_train.py index 9043bf7ef..db6bd37a8 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_train.py +++ b/tests/models/ultralytics/yolov5/lightning/test_train.py @@ -37,29 +37,30 @@ def configure_optimizers(self): [small, medium], ) def test_lightning_yolo_training_step_returns_loss(fridge_ds, backbone): - train_ds, _ = fridge_ds - train_dl = models.ultralytics.yolov5.train_dl( - train_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=False) - ) + with torch.set_grad_enabled(True): + train_ds, _ = fridge_ds + train_dl = models.ultralytics.yolov5.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) - class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): - def configure_optimizers(self): - return SGD(self.parameters(), lr=1e-4) + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) - light_model = LightModel(model) - light_model.to("cpu") - expected_loss = random.randint(0, 10) - light_model.compute_loss = lambda *args: [expected_loss] - for batch in train_dl: - batch - break + light_model = LightModel(model) + light_model.to("cpu") + expected_loss = random.randint(0, 10) + light_model.compute_loss = lambda *args: [expected_loss] + for batch in train_dl: + batch + break - loss = light_model.training_step(batch, 0) + loss = light_model.training_step(batch, 0) - assert loss == expected_loss + assert loss == expected_loss @pytest.mark.parametrize( @@ -67,34 +68,35 @@ def configure_optimizers(self): [small, medium], ) def test_lightning_yolo_logs_losses_during_training_step(fridge_ds, backbone): - train_ds, _ = fridge_ds - train_dl = models.ultralytics.yolov5.train_dl( - train_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=False) - ) - - class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return SGD(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - light_model = LightModel(model) - light_model.to("cpu") - light_model.compute_loss = lambda *args: [random.randint(0, 10)] - for batch in train_dl: - batch - break - - light_model.training_step(batch, 0) - - assert list(light_model.logs.keys()) == [f"train_loss"] + with torch.set_grad_enabled(True): + train_ds, _ = fridge_ds + train_dl = models.ultralytics.yolov5.train_dl( + train_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in train_dl: + batch + break + + light_model.training_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"train_loss"] diff --git a/tests/models/ultralytics/yolov5/lightning/test_validate.py b/tests/models/ultralytics/yolov5/lightning/test_validate.py index 0badb13d0..fd7abc540 100644 --- a/tests/models/ultralytics/yolov5/lightning/test_validate.py +++ b/tests/models/ultralytics/yolov5/lightning/test_validate.py @@ -46,15 +46,16 @@ def test_lightning_yolo_validate(fridge_ds, backbone, light_model_cls): def test_lightning_yolo_finalizes_metrics_on_validation_epoch_end( backbone, light_model_cls ): - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=True) - ) - metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] - light_model = light_model_cls(model, metrics=metrics) + with torch.set_grad_enabled(False): + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=True) + ) + metrics = [COCOMetric(metric_type=COCOMetricType.bbox)] + light_model = light_model_cls(model, metrics=metrics) - light_model.validation_epoch_end(None) + light_model.validation_epoch_end(None) - assert light_model.was_finalize_metrics_called == True + assert light_model.was_finalize_metrics_called == True @pytest.mark.parametrize( @@ -62,35 +63,36 @@ def test_lightning_yolo_finalizes_metrics_on_validation_epoch_end( [small, medium], ) def test_lightning_yolo_logs_losses_during_validation_step(fridge_ds, backbone): - _, valid_ds = fridge_ds - valid_dl = models.ultralytics.yolov5.train_dl( - valid_ds, batch_size=1, num_workers=0, shuffle=False - ) - model = models.ultralytics.yolov5.model( - num_classes=5, img_size=384, backbone=backbone(pretrained=False) - ) - - class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): - def __init__(self, model, metrics=None): - super(LightModel, self).__init__(model, metrics) - self.model = model - self.logs = {} - - def configure_optimizers(self): - return SGD(self.parameters(), lr=1e-4) - - def log(self, key, value, **args): - super(LightModel, self).log(key, value, **args) - self.logs[key] = value - - light_model = LightModel(model) - light_model.to("cpu") - light_model.eval() - light_model.compute_loss = lambda *args: [random.randint(0, 10)] - for batch in valid_dl: - batch - break - - light_model.validation_step(batch, 0) - - assert list(light_model.logs.keys()) == [f"val_loss"] + with torch.set_grad_enabled(False): + _, valid_ds = fridge_ds + valid_dl = models.ultralytics.yolov5.train_dl( + valid_ds, batch_size=1, num_workers=0, shuffle=False + ) + model = models.ultralytics.yolov5.model( + num_classes=5, img_size=384, backbone=backbone(pretrained=False) + ) + + class LightModel(models.ultralytics.yolov5.lightning.ModelAdapter): + def __init__(self, model, metrics=None): + super(LightModel, self).__init__(model, metrics) + self.model = model + self.logs = {} + + def configure_optimizers(self): + return SGD(self.parameters(), lr=1e-4) + + def log(self, key, value, **args): + super(LightModel, self).log(key, value, **args) + self.logs[key] = value + + light_model = LightModel(model) + light_model.to("cpu") + light_model.eval() + light_model.compute_loss = lambda *args: [random.randint(0, 10)] + for batch in valid_dl: + batch + break + + light_model.validation_step(batch, 0) + + assert list(light_model.logs.keys()) == [f"val_loss"]