Skip to content

Commit

Permalink
Feature/1124 Added Pytorch Lightning Test support for all models (#1125)
Browse files Browse the repository at this point in the history
* feature/1124 Added Pytorch Lightning Test support for mmdet

* feature/1124 Added Pytorch Lightning Test support for efficientdet

* feature/1124 Added Pytorch Lightning Test support for torchvision

* feature/1124 Added Pytorch Lightning Test support for yolov5

* feature/1124 Renamed shared evaluation method for mmdet

* feature/1124 Renamed shared evaluation method for efficientdet

* feature/1124 Renamed shared evaluation method for torchvision

* feature/1124 Renamed shared evaluation method for yolov5

* feature/1124 Added test for PL test step of efficientdet

* feature/1124 Added test for PL test step of mmdet

* feature/1124 Added test for PL test step of torchvision models

* feature/1124 Added test for PL test step of yolov5

* feature/1124 Updated object detection getting started guide to add PL test

* feature/1124 Updated environment.yml to fix icevision version not found

* feature/1124 Added unit tests for efficientdet lightning model adapter

* feature/1124 Added unit tests for mmdet lightning model adapter

* feature/1124 Added unit tests for torchvision lightning model adapter

* feature/1124 Added unit tests for yolov5 lightning model adapter

* feature/1124 Added unit tests for fastai unet lightning model adapter

* feature/1124 Fixed memory consumption in PL tests
  • Loading branch information
AlexandreBrown authored Jul 26, 2022
1 parent 356d29b commit ca73528
Show file tree
Hide file tree
Showing 27 changed files with 1,546 additions and 66 deletions.
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ dependencies:
- fastai >=2.5.2,<2.6
- pip
- pip:
- icevision[all]==0.12.0rc1
- icevision[all]==0.12.0
- --find-links https://download.openmmlab.com/mmcv/dist/cu102/torch1.10.0/index.html
- mmcv-full==1.3.17
- mmdet==2.17.0
38 changes: 28 additions & 10 deletions icevision/models/fastai/unet/lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,28 +33,46 @@ def training_step(self, batch, batch_idx):
(xb, yb), _ = batch
preds = self(xb)

loss = self.loss_func(preds, yb)
loss = self.compute_loss(preds, yb)

self.log("train_loss", loss)

return loss

def compute_loss(self, preds, yb):
return self.loss_func(preds, yb)

def validation_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="val")

def _shared_eval(self, batch, loss_log_key):
(xb, yb), records = batch

with torch.no_grad():
preds = self(xb)
loss = self.loss_func(preds, yb)
preds = self(xb)
loss = self.compute_loss(preds, yb)

preds = unet.convert_raw_predictions(
batch=xb,
raw_preds=preds,
records=records,
)
preds = self.convert_raw_predictions(
batch=xb,
raw_preds=preds,
records=records,
)

self.accumulate_metrics(preds)

self.log("val_loss", loss)
self.log(f"{loss_log_key}_loss", loss)

def convert_raw_predictions(self, batch, raw_preds, records):
return unet.convert_raw_predictions(
batch=batch,
raw_preds=raw_preds,
records=records,
)

def validation_epoch_end(self, outs):
self.finalize_metrics()

def test_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="test")

def test_epoch_end(self, outs):
self.finalize_metrics()
28 changes: 16 additions & 12 deletions icevision/models/mmdet/lightning/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


class MMDetModelAdapter(LightningModelAdapter, ABC):
"""Lightning module specialized for EfficientDet, with metrics support.
"""Lightning module specialized for MMDet, with metrics support.
The methods `forward`, `training_step`, `validation_step`, `validation_epoch_end`
are already overriden.
Expand Down Expand Up @@ -45,30 +45,34 @@ def training_step(self, batch, batch_idx):
outputs = self.model.train_step(data=data, optimizer=None)

for k, v in outputs["log_vars"].items():
self.log(f"train/{k}", v)
self.log(f"train_{k}", v)

return outputs["loss"]

def validation_step(self, batch, batch_idx):
self._shared_eval(batch, loss_log_key="val")

def _shared_eval(self, batch, loss_log_key):
data, records = batch

self.model.eval()
with torch.no_grad():
outputs = self.model.train_step(data=data, optimizer=None)
raw_preds = self.model.forward_test(
imgs=[data["img"]], img_metas=[data["img_metas"]]
)
outputs = self.model.train_step(data=data, optimizer=None)
raw_preds = self.model.forward_test(
imgs=[data["img"]], img_metas=[data["img_metas"]]
)

preds = self.convert_raw_predictions(
batch=data, raw_preds=raw_preds, records=records
)
self.accumulate_metrics(preds)

for k, v in outputs["log_vars"].items():
self.log(f"valid/{k}", v)

# TODO: is train and eval model automatically set by lighnting?
self.model.train()
self.log(f"{loss_log_key}_{k}", v)

def validation_epoch_end(self, outs):
self.finalize_metrics()

def test_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="test")

def test_epoch_end(self, outs):
self.finalize_metrics()
42 changes: 30 additions & 12 deletions icevision/models/ross/efficientdet/lightning/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,31 +31,49 @@ def training_step(self, batch, batch_idx):
(xb, yb), records = batch
preds = self(xb, yb)

loss = efficientdet.loss_fn(preds, yb)
loss = self.compute_loss(preds, yb)

for k, v in preds.items():
self.log(f"train/{k}", v)
self.log(f"train_{k}", v)

return loss

def compute_loss(self, preds, yb):
return efficientdet.loss_fn(preds, yb)

def validation_step(self, batch, batch_idx):
self._shared_eval(batch, loss_log_key="val")

def _shared_eval(self, batch, loss_log_key):
(xb, yb), records = batch

with torch.no_grad():
raw_preds = self(xb, yb)
preds = efficientdet.convert_raw_predictions(
batch=(xb, yb),
raw_preds=raw_preds["detections"],
records=records,
detection_threshold=0.0,
)
loss = efficientdet.loss_fn(raw_preds, yb)
raw_preds = self(xb, yb)

preds = self.convert_raw_predictions(xb, yb, raw_preds, records)

self.compute_loss(raw_preds, yb)

self.accumulate_metrics(preds)

for k, v in raw_preds.items():
if "loss" in k:
self.log(f"valid/{k}", v)
self.log(f"{loss_log_key}_{k}", v)

def convert_raw_predictions(self, xb, yb, raw_preds, records):
# Note: raw_preds["detections"] key is available only during Pytorch Lightning validation/test step
# Calling the method manually (instead of letting the Trainer call it) will raise an exception.
return efficientdet.convert_raw_predictions(
batch=(xb, yb),
raw_preds=raw_preds["detections"],
records=records,
detection_threshold=0.0,
)

def validation_epoch_end(self, outs):
self.finalize_metrics()

def test_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="test")

def test_epoch_end(self, outs):
self.finalize_metrics()
36 changes: 24 additions & 12 deletions icevision/models/torchvision/lightning_model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,26 +29,38 @@ def training_step(self, batch, batch_idx):
(xb, yb), records = batch
preds = self(xb, yb)

loss = loss_fn(preds, yb)
loss = self.compute_loss(preds, yb)
self.log("train_loss", loss)

return loss

def compute_loss(self, preds, yb):
return loss_fn(preds, yb)

def validation_step(self, batch, batch_idx):
self._shared_eval(batch, loss_log_key="val")

def _shared_eval(self, batch, loss_log_key):
(xb, yb), records = batch
with torch.no_grad():
self.train()
train_preds = self(xb, yb)
loss = loss_fn(train_preds, yb)

self.eval()
raw_preds = self(xb)
preds = self.convert_raw_predictions(
batch=batch, raw_preds=raw_preds, records=records
)
self.accumulate_metrics(preds=preds)
self.train()
preds = self(xb, yb)
loss = self.compute_loss(preds, yb)

self.eval()
raw_preds = self(xb)
preds = self.convert_raw_predictions(
batch=batch, raw_preds=raw_preds, records=records
)
self.accumulate_metrics(preds=preds)

self.log("val_loss", loss)
self.log(f"{loss_log_key}_loss", loss)

def validation_epoch_end(self, outs):
self.finalize_metrics()

def test_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="test")

def test_epoch_end(self, outs):
self.finalize_metrics()
41 changes: 30 additions & 11 deletions icevision/models/ultralytics/yolov5/lightning/model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,22 +40,41 @@ def training_step(self, batch, batch_idx):
return loss

def validation_step(self, batch, batch_idx):
self._shared_eval(batch, loss_log_key="val")

def _shared_eval(self, batch, loss_log_key):
(xb, yb), records = batch

with torch.no_grad():
inference_out, training_out = self(xb)
preds = yolov5.convert_raw_predictions(
batch=xb,
raw_preds=inference_out,
records=records,
detection_threshold=0.001,
nms_iou_threshold=0.6,
)
loss = self.compute_loss(training_out, yb)[0]
inference_out, training_out = self(xb)
preds = self.convert_raw_predictions(
batch=xb,
raw_preds=inference_out,
records=records,
detection_threshold=0.001,
nms_iou_threshold=0.6,
)
loss = self.compute_loss(training_out, yb)[0]

self.accumulate_metrics(preds)

self.log("val_loss", loss)
self.log(f"{loss_log_key}_loss", loss)

def convert_raw_predictions(
self, batch, raw_preds, records, detection_threshold, nms_iou_threshold
):
return yolov5.convert_raw_predictions(
batch=batch,
raw_preds=raw_preds,
records=records,
detection_threshold=detection_threshold,
nms_iou_threshold=nms_iou_threshold,
)

def validation_epoch_end(self, outs):
self.finalize_metrics()

def test_step(self, batch, batch_idx):
self._shared_eval(batch=batch, loss_log_key="test")

def test_epoch_end(self, outs):
self.finalize_metrics()
24 changes: 21 additions & 3 deletions notebooks/getting_started_object_detection.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,24 @@
"trainer.fit(light_model, train_dl, valid_dl)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Testing using Pytorch Lightning\n",
"For testing, it is recommended to use a separate test dataset that the model did not see during training but for demonstration purposes we'll re-use the validation dataset."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"trainer = pl.Trainer()\n",
"trainer.test(light_model, valid_dl)"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand Down Expand Up @@ -961,7 +979,7 @@
"provenance": []
},
"kernelspec": {
"display_name": "Python 3.8.5 ('icevision')",
"display_name": "Python 3.10.4 64-bit",
"language": "python",
"name": "python3"
},
Expand All @@ -975,7 +993,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
"version": "3.10.4"
},
"metadata": {
"interpreter": {
Expand All @@ -1002,7 +1020,7 @@
},
"vscode": {
"interpreter": {
"hash": "8de9f07afee82c69462511c5dd73ef92dc31ce4377be5cdd30293ad211c0627b"
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
}
},
"widgets": {
Expand Down
1 change: 0 additions & 1 deletion tests/engines/lightning/test_lightning_model_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ class DummLightningModelAdapter(LightningModelAdapter):
pass


# test if finalize metrics reports metrics correctly
def test_finalze_metrics_reports_metrics_correctly(mocker):
mocker.patch(
"icevision.engines.lightning.lightning_model_adapter.LightningModelAdapter.log"
Expand Down
Loading

0 comments on commit ca73528

Please sign in to comment.