Skip to content

Commit

Permalink
Fix object detection sample and bump datasets version (#2250)
Browse files Browse the repository at this point in the history
### Changes
Fixed a regression introduced in #2196 for the object detection samples
and bumped the `datasets` version for the movement sparsity tests to fix
a `Loading a dataset cached in a LocalFileSystem is not supported` error
in the associated test cases.

### Reason for changes
Torch nightly tests fail otherwise.

### Related tickets
N/A

### Tests
torch_nightly
  • Loading branch information
vshampor authored Nov 10, 2023
1 parent 29bfc89 commit 8b46c0c
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 21 deletions.
12 changes: 6 additions & 6 deletions examples/torch/common/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
import torch

from nncf.api.compression import CompressionAlgorithmController
from nncf.torch.exporter import count_tensors
from nncf.torch.exporter import generate_input_names_list
from nncf.torch.exporter import get_export_args


def export_model(ctrl: CompressionAlgorithmController, save_path: str, no_strip_on_export: bool) -> None:
Expand All @@ -26,11 +28,9 @@ def export_model(ctrl: CompressionAlgorithmController, save_path: str, no_strip_
model = ctrl.model if no_strip_on_export else ctrl.strip()

model = model.eval().cpu()
input_names = generate_input_names_list(len(model.nncf.input_infos))
input_tensor_list = []
for info in model.nncf.input_infos:
input_shape = tuple([1] + list(info.shape)[1:])
input_tensor_list.append(torch.rand(input_shape))

export_args = get_export_args(model)
input_names = generate_input_names_list(count_tensors(export_args))

with torch.no_grad():
torch.onnx.export(model, tuple(input_tensor_list), save_path, input_names=input_names)
torch.onnx.export(model, export_args, save_path, input_names=input_names)
2 changes: 1 addition & 1 deletion examples/torch/object_detection/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ def create_train_data_loader(batch_size):

def create_model(config: SampleConfig):
input_info = FillerInputInfo.from_nncf_config(config.nncf_config)
image_size = input_info[0].shape[-1]
image_size = input_info.elements[0].shape[-1]
ssd_net = build_ssd(config.model, config.ssd_params, image_size, config.num_classes, config)
weights = config.get("weights")
if weights:
Expand Down
31 changes: 18 additions & 13 deletions nncf/torch/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Tuple
from typing import Any, Optional, Tuple

import torch
from torch.onnx import OperatorExportTypes
Expand All @@ -20,6 +20,7 @@
from nncf.telemetry.events import NNCF_PT_CATEGORY
from nncf.torch.dynamic_graph.graph_tracer import create_dummy_forward_fn
from nncf.torch.nested_objects_traversal import objwalk
from nncf.torch.nncf_network import NNCFNetwork
from nncf.torch.utils import get_model_device
from nncf.torch.utils import is_tensor

Expand All @@ -44,6 +45,21 @@ def counter_fn(x: torch.Tensor) -> torch.Tensor:
return count


def get_export_args(model: NNCFNetwork, model_args: Optional[Tuple[Any, ...]] = None) -> Tuple:
args, kwargs = model.nncf.input_infos.get_forward_inputs()

if model_args is not None:
args = tuple(list(args) + list(model_args[:-1]))
kwargs.update(**model_args[-1])

def to_single_batch_tensors(obj: torch.Tensor):
return obj[0:1]

args = objwalk(args, is_tensor, to_single_batch_tensors)
kwargs = objwalk(kwargs, is_tensor, to_single_batch_tensors)
return *args, kwargs # according to a variant of passing kwargs in torch.onnx.export doc


class PTExportFormat:
ONNX = "onnx"

Expand Down Expand Up @@ -126,18 +142,7 @@ def _export_to_onnx(self, save_path: str, opset_version: int) -> None:
original_device = get_model_device(self._model)
model = self._model.eval().cpu()

args, kwargs = self._model.nncf.input_infos.get_forward_inputs()

if self._model_args is not None:
args = tuple(list(args) + list(self._model_args[:-1]))
kwargs.update(**self._model_args[-1])

def to_single_batch_tensors(obj: torch.Tensor):
return obj[0:1]

args = objwalk(args, is_tensor, to_single_batch_tensors)
kwargs = objwalk(kwargs, is_tensor, to_single_batch_tensors)
export_args = (*args, kwargs) # according to a variant of passing kwargs in torch.onnx.export doc
export_args = get_export_args(self._model, model_args=self._model_args)

if self._input_names is not None:
input_names = self._input_names
Expand Down
2 changes: 2 additions & 0 deletions tests/torch/quantization/test_sanity_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from typing import Dict

import pytest
import torch
from torch import nn

from nncf import NNCFConfig
Expand Down Expand Up @@ -314,6 +315,7 @@ def setup_spy(self, mocker):

ctrl_mock = mocker.MagicMock(spec=QuantizationController)
model_mock = mocker.MagicMock(spec=nn.Module)
mocker.patch("examples.torch.common.export.get_export_args", return_value=((torch.Tensor([1, 1]),), {}))
create_model_location = sample_location + ".create_compressed_model"
create_model_patch = mocker.patch(create_model_location)

Expand Down
2 changes: 1 addition & 1 deletion tests/torch/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ pyparsing<3.0
transformers[torch]~=4.30.0

# Required for movement_sparsity tests
datasets~=2.12.0
datasets~=2.14.0
evaluate==0.3.0
timm==0.9.2
openvino-dev==2023.1

0 comments on commit 8b46c0c

Please sign in to comment.