Skip to content

Commit

Permalink
Merge branch 'main' into feat/segmentation-track-best-epoch
Browse files Browse the repository at this point in the history
  • Loading branch information
AlessandroPolidori committed Jan 24, 2024
2 parents b246d98 + d488db9 commit 91711b9
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 16 deletions.
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,23 @@
# Changelog
All notable changes to this project will be documented in this file.

### [1.5.5]

#### Added

- Add best epoch and last epoch in segmentation analysis report

### [1.5.4]

#### Added

- Add test for half precision export

#### Fixed

- Fix half precision export not working properly with onnx iobindings
- Change full precision tolerance to avoid test failures

### [1.5.3]

#### Fixed
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "quadra"
version = "1.5.3"
version = "1.5.5"
description = "Deep Learning experiment orchestration library"
authors = [
"Federico Belotti <[email protected]>",
Expand Down
2 changes: 1 addition & 1 deletion quadra/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "1.5.3"
__version__ = "1.5.5"


def get_version():
Expand Down
2 changes: 1 addition & 1 deletion quadra/models/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ def _forward_from_pytorch(self, input_dict: dict[str, torch.Tensor]):
device_type=device_type,
# Weirdly enough onnx wants 0 for cpu
device_id=0 if device_type == "cpu" else int(self.device.split(":")[1]),
element_type=np.float32,
element_type=np.float16 if v.dtype == torch.float16 else np.float32,
shape=tuple(v.shape),
buffer_ptr=v.data_ptr(),
)
Expand Down
61 changes: 48 additions & 13 deletions tests/models/test_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
smp_resnet18_unetplusplus,
vit_tiny_patch16_224,
)
from quadra.utils.tests.helpers import get_quadra_test_device

try:
import onnx # noqa
Expand All @@ -44,16 +45,19 @@


@torch.inference_mode()
def check_export_model_outputs(tmp_path: Path, model: nn.Module, export_types: list[str], input_shapes: tuple[Any]):
def check_export_model_outputs(
tmp_path: Path, model: nn.Module, export_types: list[str], input_shapes: tuple[Any], half_precision: bool = False
):
exported_models = {}
device = get_quadra_test_device()

for export_type in export_types:
if export_type == "torchscript":
out = export_torchscript_model(
model=model,
input_shapes=input_shapes,
output_path=tmp_path,
half_precision=False,
half_precision=half_precision,
)

torchscript_model_path, input_shapes = out
Expand All @@ -64,7 +68,7 @@ def check_export_model_outputs(tmp_path: Path, model: nn.Module, export_types: l
output_path=tmp_path,
onnx_config=ONNX_CONFIG,
input_shapes=input_shapes,
half_precision=False,
half_precision=half_precision,
)

onnx_model_path, input_shapes = out
Expand All @@ -84,22 +88,23 @@ def check_export_model_outputs(tmp_path: Path, model: nn.Module, export_types: l

models = []
for export_type, model_path in exported_models.items():
model = import_deployment_model(model_path=model_path, inference_config=inference_config, device="cpu")
model = import_deployment_model(model_path=model_path, inference_config=inference_config, device=device)
models.append(model)

inp = torch.rand((1, *input_shapes[0]), dtype=torch.float32)
inp = torch.rand((1, *input_shapes[0]), dtype=torch.float32 if not half_precision else torch.float16, device=device)

outputs = []

for model in models:
outputs.append(model(inp))

tolerance = 1e-4 if not half_precision else 1e-2
for i in range(len(outputs) - 1):
if isinstance(outputs[i], Sequence):
for j in range(len(outputs[i])):
assert torch.allclose(outputs[i][j], outputs[i + 1][j], atol=1e-5)
assert torch.allclose(outputs[i][j], outputs[i + 1][j], atol=tolerance)
else:
assert torch.allclose(outputs[i], outputs[i + 1], atol=1e-5)
assert torch.allclose(outputs[i], outputs[i + 1], atol=tolerance)


@pytest.mark.skipif(not ONNX_AVAILABLE, reason="ONNX not available")
Expand All @@ -113,12 +118,22 @@ def check_export_model_outputs(tmp_path: Path, model: nn.Module, export_types: l
pytest.lazy_fixture("vit_tiny_patch16_224"),
],
)
def test_classification_models_export(tmp_path: Path, model: nn.Module):
@pytest.mark.parametrize("half_precision", [False, True])
def test_classification_models_export(tmp_path: Path, model: nn.Module, half_precision: bool):
if half_precision and get_quadra_test_device() == "cpu":
pytest.skip("Half precision not supported on CPU")

export_types = ["onnx", "torchscript"]

input_shapes = [(3, 224, 224)]

check_export_model_outputs(tmp_path=tmp_path, model=model, export_types=export_types, input_shapes=input_shapes)
check_export_model_outputs(
tmp_path=tmp_path,
model=model,
export_types=export_types,
input_shapes=input_shapes,
half_precision=half_precision,
)


@pytest.mark.skipif(not ONNX_AVAILABLE, reason="ONNX not available")
Expand All @@ -129,12 +144,22 @@ def test_classification_models_export(tmp_path: Path, model: nn.Module):
pytest.lazy_fixture("smp_resnet18_unetplusplus"),
],
)
def test_segmentation_models_export(tmp_path: Path, model: nn.Module):
@pytest.mark.parametrize("half_precision", [False, True])
def test_segmentation_models_export(tmp_path: Path, model: nn.Module, half_precision: bool):
if half_precision and get_quadra_test_device() == "cpu":
pytest.skip("Half precision not supported on CPU")

export_types = ["onnx", "torchscript"]

input_shapes = [(3, 224, 224)]

check_export_model_outputs(tmp_path=tmp_path, model=model, export_types=export_types, input_shapes=input_shapes)
check_export_model_outputs(
tmp_path=tmp_path,
model=model,
export_types=export_types,
input_shapes=input_shapes,
half_precision=half_precision,
)


@pytest.mark.skipif(not ONNX_AVAILABLE, reason="ONNX not available")
Expand All @@ -147,12 +172,22 @@ def test_segmentation_models_export(tmp_path: Path, model: nn.Module):
pytest.lazy_fixture("efficient_ad_small"),
],
)
def test_anomaly_detection_models_export(tmp_path: Path, model: nn.Module):
@pytest.mark.parametrize("half_precision", [False, True])
def test_anomaly_detection_models_export(tmp_path: Path, model: nn.Module, half_precision: bool):
if half_precision and get_quadra_test_device() == "cpu":
pytest.skip("Half precision not supported on CPU")

export_types = ["onnx", "torchscript"]

if isinstance(model, EfficientAdModel):
input_shapes = [(3, 256, 256)]
else:
input_shapes = [(3, 224, 224)]

check_export_model_outputs(tmp_path=tmp_path, model=model, export_types=export_types, input_shapes=input_shapes)
check_export_model_outputs(
tmp_path=tmp_path,
model=model,
export_types=export_types,
input_shapes=input_shapes,
half_precision=half_precision,
)

0 comments on commit 91711b9

Please sign in to comment.