Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Explicitly set batch=1 for NNCF in order to avoid issue with Wave2Vec #312

Merged
merged 7 commits into from
Jun 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions optimum/intel/openvino/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ def __init__(
self._enable_standard_onnx_export_option()
self.optimum_version = kwargs.pop("optimum_version", None)

def add_input_info(self, model_inputs: Dict):
def add_input_info(self, model_inputs: Dict, force_batch_one: bool = False):
self.input_info = [
{
"sample_size": list(value.shape),
"sample_size": [1] + list(value.shape[1:]) if force_batch_one else list(value.shape),
"type": "long" if value.dtype is torch.int64 else "float",
"keyword": name,
}
Expand Down
12 changes: 11 additions & 1 deletion optimum/intel/openvino/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,8 @@ def __init__(
model_inputs = next(iter(train_dataloader))
for label_name in self.label_names:
model_inputs.pop(label_name)
self.ov_config.add_input_info(model_inputs)
force_batch_one = self._is_pruning_enabled()
self.ov_config.add_input_info(model_inputs, force_batch_one)
nncf_config = NNCFConfig.from_dict(self.ov_config.__dict__)
ljaljushkin marked this conversation as resolved.
Show resolved Hide resolved
nncf_config.register_extra_structs(
[
Expand Down Expand Up @@ -770,3 +771,12 @@ def _set_task(self):
if self.task is None:
raise ValueError("The model task defining the model topology needs to be specified for the ONNX export.")
self.task = _TASK_ALIASES.get(self.task, self.task)

def _is_pruning_enabled(compression: Union[Dict, List, None]):
if isinstance(compression, dict) and compression["algorithm"] == "movement_pruning":
return True
if isinstance(compression, list):
for algo_config in compression:
if algo_config["algorithm"] == "movement_pruning":
return True
return False