From 1214ce84af852cd00aa80e1595e14e2fff959f1f Mon Sep 17 00:00:00 2001 From: Nikolay Date: Mon, 21 Oct 2024 15:47:49 +0200 Subject: [PATCH 1/3] Fix for inputs with batch size != 1 in data-aware weight compression --- .../weight_compression/algorithm.py | 2 + .../algorithms/weight_compression/awq.py | 3 +- .../algorithms/weight_compression/gptq.py | 6 +- .../weight_compression/lora_correction.py | 3 +- .../weight_compression/openvino_backend.py | 3 +- .../weight_compression/scale_estimation.py | 3 +- .../weight_compression/torch_backend.py | 2 +- .../weight_compression/torch_fx_backend.py | 2 +- .../weight_compression/weight_lowering.py | 6 +- nncf/quantization/quantize_model.py | 32 ++--- .../native/quantization/test_quantize_api.py | 2 +- .../quantization/test_weights_compression.py | 135 ++++++++++++++---- tests/torch/fx/test_compress_weights.py | 5 +- tests/torch/ptq/test_weights_compression.py | 5 +- 14 files changed, 147 insertions(+), 62 deletions(-) diff --git a/nncf/quantization/algorithms/weight_compression/algorithm.py b/nncf/quantization/algorithms/weight_compression/algorithm.py index 3e3ad32e946..4dcc4822c7c 100644 --- a/nncf/quantization/algorithms/weight_compression/algorithm.py +++ b/nncf/quantization/algorithms/weight_compression/algorithm.py @@ -579,6 +579,8 @@ def _collect_statistics(self, dataset: Dataset, nodes: List[NNCFNode], graph: NN # activation as an input. matmul_input_to_output_nodes_map = defaultdict(list) for node in matmul_nodes: + if node.layer_attributes.input_attributes["transpose"]: + raise nncf.UnsupportedModelError("Transposed input is not supported") act_node, output_port_id = self._get_activation_node_and_port(node, graph) matmul_input_to_output_nodes_map[(act_node, output_port_id)].append(node) diff --git a/nncf/quantization/algorithms/weight_compression/awq.py b/nncf/quantization/algorithms/weight_compression/awq.py index 15e7f797489..1b43f5339c4 100644 --- a/nncf/quantization/algorithms/weight_compression/awq.py +++ b/nncf/quantization/algorithms/weight_compression/awq.py @@ -13,6 +13,7 @@ from dataclasses import dataclass from typing import Any, Dict, List, Optional, TypeVar +import nncf from nncf import Dataset from nncf import nncf_logger from nncf.common.factory import ModelTransformerFactory @@ -117,7 +118,7 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = OVAWQAlgoAlgoBackend(model, self.name_to_node_mapping) self._patterns = self._backend_entity.get_awq_patterns() else: - raise RuntimeError( + raise nncf.UnsupportedBackendError( "Cannot return backend-specific AWQ entity because {} is not supported!".format(model_backend.value) ) diff --git a/nncf/quantization/algorithms/weight_compression/gptq.py b/nncf/quantization/algorithms/weight_compression/gptq.py index 80bb38dabdb..59413087b23 100644 --- a/nncf/quantization/algorithms/weight_compression/gptq.py +++ b/nncf/quantization/algorithms/weight_compression/gptq.py @@ -169,9 +169,9 @@ def _calculate_hessian(self, node: NNCFNode, inputs: List[Tensor]) -> Tensor: nsamples = 0 if node.metatype in self._backend_entity.convolution_metatypes: - raise RuntimeError("Convolution metatypes are not supported") + raise nncf.UnsupportedModelError("Convolution metatypes are not supported") if node.layer_attributes.input_attributes["transpose"]: - raise RuntimeError("Transpose is not supported") + raise nncf.UnsupportedModelError("Transposed input is not supported") hessian = fns.zeros( (inputs[0].shape[-1], inputs[0].shape[-1]), backend=inputs[0].backend, dtype=TensorDataType.float32 @@ -264,7 +264,7 @@ def _quantize_weights( scales.append(scale) else: if self._scale_estimation and block_compression_config.num_bits == 4: - activations = [inp.squeeze()[:, (i1 + i) : (i1 + i + group_size)] for inp in inputs] + activations = [inp.squeeze()[..., (i1 + i) : (i1 + i + group_size)] for inp in inputs] wc_statistics = ScaleEstimation.activations_to_wc_statistics(activations) scale, zero_point = ScaleEstimation.calculate_quantization_params( self._backend_entity, diff --git a/nncf/quantization/algorithms/weight_compression/lora_correction.py b/nncf/quantization/algorithms/weight_compression/lora_correction.py index 710e405c6bb..0c9bb3409ba 100644 --- a/nncf/quantization/algorithms/weight_compression/lora_correction.py +++ b/nncf/quantization/algorithms/weight_compression/lora_correction.py @@ -14,6 +14,7 @@ import matplotlib.pyplot as plt import pandas as pd +import nncf from nncf.common.logging import nncf_logger from nncf.common.tensor_statistics.statistics import WCTensorStatistic from nncf.common.utils.debug import DEBUG_LOG_DIR @@ -178,7 +179,7 @@ def calculate_low_rank_matrices( indexes = do_nf4_quantization(compressed_weight.tensor, compressed_weight.scale, is_normalized_weight=True) fq_weights = do_nf4_dequantization(indexes, compressed_weight.scale, reduction_axis) else: - raise ValueError( + raise nncf.InternalError( f"{mode.value} mode is invalid for Lora Correction algorithm. Supported modes: INT4_SYM, INT4_ASYM, NF4" ) # fq_w + residual = w => residual = w - fq_w diff --git a/nncf/quantization/algorithms/weight_compression/openvino_backend.py b/nncf/quantization/algorithms/weight_compression/openvino_backend.py index bd9aa927262..34b70b05a88 100644 --- a/nncf/quantization/algorithms/weight_compression/openvino_backend.py +++ b/nncf/quantization/algorithms/weight_compression/openvino_backend.py @@ -13,6 +13,7 @@ import openvino as ov from openvino.runtime import opset13 as opset +import nncf from nncf.common.graph import NNCFGraph from nncf.common.graph import NNCFNode from nncf.common.graph.operator_metatypes import OperatorMetatype @@ -236,7 +237,7 @@ def _create_compression_subgraph( elif compression_config.mode == CompressWeightsMode.INT8_ASYM: compression_dtype = ov.Type.u8 else: - raise ValueError(f"{compression_config.mode.value} is not supported.") + raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") original_shape = weight.shape compressed_weight = compress_weight(weight, reduction_axes, compression_config, layer_scales, layer_zero_points) diff --git a/nncf/quantization/algorithms/weight_compression/scale_estimation.py b/nncf/quantization/algorithms/weight_compression/scale_estimation.py index 6c5d3d0106a..0596e94d432 100644 --- a/nncf/quantization/algorithms/weight_compression/scale_estimation.py +++ b/nncf/quantization/algorithms/weight_compression/scale_estimation.py @@ -12,6 +12,7 @@ from copy import deepcopy from typing import Any, Dict, List, Optional, Tuple, TypeVar +import nncf from nncf import Dataset from nncf.common.graph.graph import NNCFGraph from nncf.common.graph.graph import NNCFNode @@ -101,7 +102,7 @@ def _set_backend_entity(self, model: TModel) -> None: self._backend_entity = OVWeightCompressionAlgoBackend(model, self.name_to_node_mapping) else: - raise RuntimeError( + raise nncf.UnsupportedBackendError( "Cannot return backend-specific AWQ entity because {} is not supported!".format(model_backend.value) ) diff --git a/nncf/quantization/algorithms/weight_compression/torch_backend.py b/nncf/quantization/algorithms/weight_compression/torch_backend.py index ee0dbd73c7f..6bfa6432748 100644 --- a/nncf/quantization/algorithms/weight_compression/torch_backend.py +++ b/nncf/quantization/algorithms/weight_compression/torch_backend.py @@ -217,7 +217,7 @@ def transform_model( CompressWeightsMode.INT8_SYM, CompressWeightsMode.INT8, ]: - raise ValueError(f"{compression_config.mode.value} is not supported.") + raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") weight_node = get_const_node(wc_params.node_with_weight, wc_params.weight_port_id, graph) weight_name = weight_node.layer_attributes.name diff --git a/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py b/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py index 128aacd67a2..d9b6c70b7a7 100644 --- a/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py +++ b/nncf/quantization/algorithms/weight_compression/torch_fx_backend.py @@ -181,7 +181,7 @@ def transform_model( CompressWeightsMode.INT8_SYM, CompressWeightsMode.INT8, ]: - raise ValueError(f"{compression_config.mode.value} is not supported.") + raise nncf.ParameterNotSupportedError(f"{compression_config.mode.value} is not supported.") weight_node = get_const_node(wc_params.node_with_weight, wc_params.weight_port_id, graph) weight_name = weight_node.node_name weight = self.get_weight(wc_params.node_with_weight, wc_params.weight_port_id, model, graph) diff --git a/nncf/quantization/algorithms/weight_compression/weight_lowering.py b/nncf/quantization/algorithms/weight_compression/weight_lowering.py index ef36a157040..342725c0237 100644 --- a/nncf/quantization/algorithms/weight_compression/weight_lowering.py +++ b/nncf/quantization/algorithms/weight_compression/weight_lowering.py @@ -82,12 +82,14 @@ def reshape_weight_for_grouped_quantization( if isinstance(reduction_axes, tuple) and len(reduction_axes) == 1: reduction_axes = reduction_axes[0] if not isinstance(reduction_axes, int): - raise NotImplementedError( + raise nncf.UnsupportedModelError( f"Group-wise quantization expects a single reduction axis, but given: {reduction_axes}." ) channel_size = weight.shape[reduction_axes] if channel_size % group_size != 0: - raise nncf.ValidationError(f"Channel size {channel_size} should be divisible by size of group {group_size}") + raise nncf.UnsupportedModelError( + f"Channel size {channel_size} should be divisible by size of group {group_size}" + ) num_groups_per_channel = channel_size // group_size shape = list(weight.shape) # [a1, r, a2] - "r" refers to number of channels along reduction axis diff --git a/nncf/quantization/quantize_model.py b/nncf/quantization/quantize_model.py index 90d69b73747..dc681556596 100644 --- a/nncf/quantization/quantize_model.py +++ b/nncf/quantization/quantize_model.py @@ -161,7 +161,7 @@ def quantize( :rtype: TModel """ if subset_size < 1: - raise ValueError("Subset size must be positive.") + raise nncf.ValidationError("Subset size must be positive.") advanced_parameters = _update_advanced_quantization_parameters(advanced_parameters, calibration_dataset) @@ -471,13 +471,13 @@ def compress_weights( from nncf.torch.quantization.quantize_model import compress_weights_impl as pt_compression_weights_impl if mode not in [CompressWeightsMode.INT8_ASYM, CompressWeightsMode.INT8_SYM]: - raise AttributeError( + raise nncf.ParameterNotSupportedError( "Torch backend supports only INT8_ASYM, INT8_SYM modes for weight compression, " f"but given {mode.value} mode." ) if True in [awq, scale_estimation, gptq, lora_correction]: - raise AttributeError( + raise nncf.ParameterNotSupportedError( "Torch backend does not support 'awq', 'scale_estimation', 'gptq' and 'lora_correction' options. " "Set them to None." ) @@ -487,14 +487,14 @@ def compress_weights( if is_wrapped_model(model): if not model.nncf.trace_parameters: - raise ValueError( + raise nncf.ValidationError( "Tracing capabilities with tracing parameters are required in the PyTorch model " "for nncf.compress_weights(). Please wrap the model using " "nncf.torch.wrap_model(model, example_input, trace_parameters=True) before calling " "nncf.compress_weights()." ) elif dataset is None: - raise AttributeError("Please provide a dataset of at least one element for PyTorch model tracing.") + raise nncf.ValidationError("Please provide a dataset of at least one element for PyTorch model tracing.") else: example_input = next(iter(dataset.get_inference_data())) model = wrap_model(model, example_input=example_input, trace_parameters=True) @@ -507,7 +507,7 @@ def compress_weights( ) if mode not in [CompressWeightsMode.INT8_ASYM, CompressWeightsMode.INT8_SYM]: - raise AttributeError( + raise nncf.ParameterNotSupportedError( "TorchFX backend supports only INT8_ASYM, INT8_SYM modes for weight compression, " f"but given {mode.value} mode." ) @@ -516,12 +516,12 @@ def compress_weights( raise AttributeError("TorchFX backend does not support backup_mode option.") if any((awq, scale_estimation, gptq, lora_correction)): - raise AttributeError( + raise nncf.ParameterNotSupportedError( "TorchFX backend does not support 'awq', 'scale_estimation', 'gptq'," "and 'lora_correction' options. Set them to None." ) if dataset: - raise AttributeError( + raise nncf.ParameterNotSupportedError( "TorchFX only supports data-free weights compression," "Set the 'dataset' option to None" ) compression_weights_impl = fx_compression_weights_impl @@ -532,13 +532,13 @@ def compress_weights( if any((awq, scale_estimation, gptq, lora_correction)) and ( dataset is None or mode == CompressWeightsMode.E2M1 ): - raise AttributeError( + raise nncf.ParameterNotSupportedError( "Scale estimation, AWQ, GPTQ or Lora Correction algorithm is defined, " "but dataset is None or mode is E2M1." ) if gptq and lora_correction: - raise AttributeError( + raise nncf.ValidationError( "Simultaneous use of Lora correction and GPTQ algorithms is not supported. Select one of them." ) @@ -550,13 +550,13 @@ def compress_weights( if group_size is None: group_size = -1 if ratio != 1 or group_size != -1: - raise AttributeError( + raise nncf.ParameterNotSupportedError( "INT8 modes assume per-channel quantization of all layers in 8 bit. " "Default values of `ratio` (1) and `group_size` (-1) parameters can not be overridden" ) if backup_mode is not None: - raise AttributeError("INT8 modes do not support the `backup_mode` option") + raise nncf.ParameterNotSupportedError("INT8 modes do not support the `backup_mode` option") options = { "all_layers": all_layers, @@ -569,7 +569,7 @@ def compress_weights( } unsupported_for_int8 = [name for name, value in options.items() if value is not None] if unsupported_for_int8: - raise AttributeError( + raise nncf.ParameterNotSupportedError( f"INT8 modes do not support {', '.join(unsupported_for_int8)} option(s). Set them to None." ) @@ -598,14 +598,14 @@ def compress_weights( if backup_mode is None: backup_mode = BackupMode.INT8_ASYM if ratio != 1 and dataset is None and sensitivity_metric != SensitivityMetric.WEIGHT_QUANTIZATION_ERROR: - raise AttributeError( + raise nncf.ValidationError( f"Mixed precision selection based on the given sensitivity metric={sensitivity_metric.value} requires " "a dataset, but it's not provided." ) if ratio < 0 or ratio > 1: - raise ValueError(f"The ratio should be between 0 and 1, but ratio={ratio} is specified.") + raise nncf.ValidationError(f"The ratio should be between 0 and 1, but ratio={ratio} is specified.") if subset_size is None or subset_size <= 0: - raise ValueError(f"The subset_size value should be positive, but subset_size={subset_size} is given.") + raise nncf.ValidationError(f"The subset_size value should be positive, but subset_size={subset_size} is given.") if compression_weights_impl is None: raise nncf.UnsupportedBackendError(f"Unsupported type of backend: {backend}") diff --git a/tests/openvino/native/quantization/test_quantize_api.py b/tests/openvino/native/quantization/test_quantize_api.py index 4eeca520af8..b272553c985 100644 --- a/tests/openvino/native/quantization/test_quantize_api.py +++ b/tests/openvino/native/quantization/test_quantize_api.py @@ -32,6 +32,6 @@ def get_mock_model() -> Model: def test_non_positive_subset_size(): model_to_test = get_mock_model() - with pytest.raises(ValueError) as e: + with pytest.raises(nncf.ValidationError) as e: nncf.quantize(model_to_test, Dataset(MockDataset(INPUT_SHAPE)), subset_size=0) assert "Subset size must be positive." in e.info diff --git a/tests/openvino/native/quantization/test_weights_compression.py b/tests/openvino/native/quantization/test_weights_compression.py index 36493e28542..2d59777684a 100644 --- a/tests/openvino/native/quantization/test_weights_compression.py +++ b/tests/openvino/native/quantization/test_weights_compression.py @@ -20,11 +20,11 @@ from attr import dataclass from openvino.runtime import opset13 as opset +import nncf from nncf import CompressWeightsMode from nncf import SensitivityMetric from nncf.common.utils.debug import nncf_debug from nncf.data.dataset import Dataset -from nncf.errors import ValidationError from nncf.experimental.common.tensor_statistics.collectors import AggregatorBase from nncf.openvino.graph.node_utils import get_const_value from nncf.parameters import BackupMode @@ -79,23 +79,29 @@ class LMLinearModel(OVReferenceModel): - HIDDEN_DIM = 16 OUTPUT_DIM = 32 + HIDDEN_DIM = 16 INPUT_SHAPE = [1, 24, HIDDEN_DIM] # [B, SeqLen, HiddenDim] - def _create_ov_model(self, transpose_b: bool = True): - input_1 = opset.parameter(self.INPUT_SHAPE, name="Input") + def _create_ov_model(self, transpose_b: bool = True, transpose_a=False, input_shape=None): + self._input_shape = self.INPUT_SHAPE if input_shape is None else input_shape + hdim_axis = -2 if transpose_a else -1 + self._hidden_dim = self._input_shape[hdim_axis] + input_1 = opset.parameter(self._input_shape, name="Input") weight_shape = self.get_weight_shape(transpose_b) data = self._rng.random(weight_shape).astype(np.float32) - matmul = opset.matmul(input_1, data, transpose_a=False, transpose_b=transpose_b, name="MatMul") + matmul = opset.matmul(input_1, data, transpose_a=transpose_a, transpose_b=transpose_b, name="MatMul") result = opset.result(matmul, name="Result") result.get_output_tensor(0).set_names(set(["Result"])) model = ov.Model([result], [input_1]) return model - @classmethod - def get_weight_shape(cls, transpose_b: bool = True): - return [cls.OUTPUT_DIM, cls.HIDDEN_DIM] if transpose_b else [cls.HIDDEN_DIM, cls.OUTPUT_DIM] + @property + def hidden_dim(self): + return self._hidden_dim + + def get_weight_shape(self, transpose_b: bool = True): + return [self.OUTPUT_DIM, self.hidden_dim] if transpose_b else [self.hidden_dim, self.OUTPUT_DIM] def get_next_node(node): @@ -695,12 +701,12 @@ def test_calculate_scale_per_group(desc: CalculateScaleDesc): def test_raise_error_for_many_axes(): - with pytest.raises(RuntimeError): + with pytest.raises(nncf.UnsupportedModelError): reshape_weight_for_grouped_quantization(WEIGHTS_2x4, reduction_axes=(0, 1), group_size=1) def test_raise_error_channel_size_is_not_divisible_by_group_size(): - with pytest.raises(ValidationError): + with pytest.raises(nncf.UnsupportedModelError): reshape_weight_for_grouped_quantization(WEIGHTS_2x4, reduction_axes=(0,), group_size=3) @@ -724,7 +730,7 @@ def test_raise_error_channel_size_is_not_divisible_by_group_size(): ), ) def test_raise_error_with_unsupported_params_for_int8(mode, params): - with pytest.raises(AttributeError): + with pytest.raises(nncf.ParameterNotSupportedError): compress_weights(ov.Model([], []), mode=mode, **params) @@ -734,15 +740,44 @@ def test_raise_error_with_unsupported_params_for_int8(mode, params): ({"dataset": "anything", "lora_correction": True, "gptq": True},), ) def test_raise_error_with_unsupported_params_for_int4(mode, params): - with pytest.raises(AttributeError): + with pytest.raises(nncf.ValidationError): compress_weights(ov.Model([], []), mode=mode, **params) +@pytest.mark.parametrize( + "algo", + ( + "lora_correction", + "awq", + "scale_estimation", + "gptq", + ), +) +def test_raise_error_with_unsupported_params_for_e2m1(algo): + with pytest.raises(nncf.ParameterNotSupportedError): + compress_weights(ov.Model([], []), dataset="anything", mode=CompressWeightsMode.E2M1, **{algo: True}) + + +@pytest.mark.parametrize("mode", INT4_NF4_MODES) +@pytest.mark.parametrize( + "algo", + ( + "lora_correction", + "awq", + "scale_estimation", + "gptq", + ), +) +def test_raise_error_with_unsupported_params_for_empty_dataset(mode, algo): + with pytest.raises(nncf.ParameterNotSupportedError): + compress_weights(ov.Model([], []), dataset=None, mode=mode, **{algo: True}) + + @pytest.mark.parametrize("mode", INT4_NF4_MODES) @pytest.mark.parametrize("metric", DATA_BASED_SENSITIVITY_METRICS) def test_raise_error_with_data_metric_and_without_dataset(mode, metric): model = IntegerModel().ov_model - with pytest.raises(AttributeError): + with pytest.raises(nncf.ValidationError): compress_weights(model, mode=mode, sensitivity_metric=metric, group_size=-1, ratio=0.8) @@ -879,7 +914,7 @@ def test_default_subset_value(): def test_invalid_subset_size(subset_size): model = IdentityMatmul().ov_model dataset = Dataset([ACTIVATION]) - with pytest.raises(ValueError): + with pytest.raises(nncf.ValidationError): compress_weights(model, mode=CompressWeightsMode.INT4_ASYM, ratio=0.5, dataset=dataset, subset_size=subset_size) @@ -1100,11 +1135,12 @@ def get_shape_for_second_input(op_with_weights: ov.Node) -> List[int]: ) def test_lora_adapters_in_the_graph(params, transpose_b): advanced_parameters = CompressionParams() if params is None else CompressionParams(lora_correction_params=params) - model = LMLinearModel(transpose_b=transpose_b).ov_model - dataset = Dataset(np.ones(inp.shape) for inp in model.inputs) + model = LMLinearModel(transpose_b=transpose_b) + ov_model = model.ov_model + dataset = Dataset(np.ones(inp.shape) for inp in ov_model.inputs) compressed_model = compress_weights( - model, + ov_model, mode=CompressWeightsMode.INT4_SYM, ratio=1.0, group_size=8, @@ -1120,8 +1156,8 @@ def test_lora_adapters_in_the_graph(params, transpose_b): next_node = target_input.get_node() assert next_node.type_info.name == "MatMul" shape = get_shape_for_second_input(next_node) - if shape != LMLinearModel.get_weight_shape(transpose_b): - assert shape == [advanced_parameters.lora_correction_params.adapter_rank, LMLinearModel.HIDDEN_DIM] + if shape != model.get_weight_shape(transpose_b): + assert shape == [advanced_parameters.lora_correction_params.adapter_rank, model.hidden_dim] node = get_next_node(next_node) assert node.type_info.name == "MatMul" assert get_shape_for_second_input(node) == [ @@ -1148,9 +1184,9 @@ def test_lora_adapters_in_the_graph(params, transpose_b): def test_lora_adapters_reduce_noise(zero_seed, mode, apply_regularization, is_per_channel, mocker, tmp_path): mocker.patch("nncf.quantization.algorithms.weight_compression.lora_correction.DEBUG_LOG_DIR", str(tmp_path)) - model_cls = LMLinearModel - group_size = -1 if is_per_channel else model_cls.HIDDEN_DIM // 2 - model = model_cls().ov_model + model = LMLinearModel() + group_size = -1 if is_per_channel else model.hidden_dim // 2 + model = model.ov_model n_iters = 1 ie = ov.Core() input_data = [np.ones(inp.shape) for inp in model.inputs] @@ -1167,7 +1203,7 @@ def test_lora_adapters_reduce_noise(zero_seed, mode, apply_regularization, is_pe int4_out = next(iter(int4_out.values())) noise_before = np.mean(np.abs(fp32_out - int4_out)) - model = model_cls().ov_model + model = LMLinearModel().ov_model with nncf_debug(): int4_model = compress_weights( @@ -1245,8 +1281,9 @@ def test_compression_with_lora_for_different_dtypes(activation_dtype, weight_dty def test_compression_with_lora_with_subset_size(mocker): subset_size = 2 dataset_size = 4 - model = LMLinearModel().ov_model - input_data = [np.ones(inp.shape) for inp in model.inputs] * dataset_size + model = LMLinearModel() + ov_model = model.ov_model + input_data = [np.ones(inp.shape) for inp in ov_model.inputs] * dataset_size dataset = Dataset(input_data) from nncf.quantization.algorithms.weight_compression import lora_correction @@ -1254,7 +1291,7 @@ def test_compression_with_lora_with_subset_size(mocker): get_stats_spy = mocker.spy(lora_correction, "process_stats") compress_weights( - model, + ov_model, mode=CompressWeightsMode.INT4_SYM, ratio=1.0, group_size=8, @@ -1270,8 +1307,8 @@ def test_compression_with_lora_with_subset_size(mocker): get_stats_spy.assert_called_once() s, X = get_stats_spy.spy_return - assert X.shape == (LMLinearModel.HIDDEN_DIM, subset_size) - assert s.shape == (LMLinearModel.HIDDEN_DIM,) + assert X.shape == (model.hidden_dim, subset_size) + assert s.shape == (model.hidden_dim,) def test_lora_with_mixed_precision(): @@ -1383,6 +1420,13 @@ def test_data_aware_algo_with_different_activation_dimensions(n_extra_dims): ) +@pytest.mark.parametrize( + "input_shape", + [ + LMLinearModel.INPUT_SHAPE, + [3, 5, 16], + ], +) @pytest.mark.parametrize( "kwargs", [ @@ -1401,9 +1445,9 @@ def test_data_aware_algo_with_different_activation_dimensions(n_extra_dims): ), ], ) -def test_compression_with_different_algo_combinations(kwargs): +def test_compression_with_different_algo_combinations(input_shape, kwargs): dataset_size = 4 - model = LMLinearModel().ov_model + model = LMLinearModel(input_shape=input_shape).ov_model input_data = [np.ones(inp.shape) for inp in model.inputs] * dataset_size dataset = Dataset(input_data) @@ -1417,3 +1461,34 @@ def test_compression_with_different_algo_combinations(kwargs): all_layers=True, **kwargs, ) + + +@pytest.mark.parametrize( + "kwargs", + [ + dict(scale_estimation=True), + dict(lora_correction=True), + dict( + gptq=True, + scale_estimation=True, + advanced_parameters=CompressionParams(gptq_params=GPTQParams(subset_size=2)), + ), + ], +) +def test_compression_with_transposed_activations(kwargs): + dataset_size = 4 + model = LMLinearModel(transpose_a=True, transpose_b=False).ov_model + input_data = [np.ones(inp.shape) for inp in model.inputs] * dataset_size + dataset = Dataset(input_data) + + with pytest.raises(nncf.UnsupportedModelError): + compress_weights( + model, + mode=CompressWeightsMode.INT4_SYM, + ratio=1.0, + group_size=8, + subset_size=2, + dataset=dataset, + all_layers=True, + **kwargs, + ) diff --git a/tests/torch/fx/test_compress_weights.py b/tests/torch/fx/test_compress_weights.py index 20793e31493..fea9e0ce501 100644 --- a/tests/torch/fx/test_compress_weights.py +++ b/tests/torch/fx/test_compress_weights.py @@ -15,6 +15,7 @@ import torch from torch._export import capture_pre_autograd_graph +import nncf from nncf import BackupMode from nncf import CompressWeightsMode from nncf.common.factory import NNCFGraphFactory @@ -218,7 +219,7 @@ def test_raise_error_with_unsupported_params_for_int8(mode, params): dummy_torch_model = EmptyModel() dummy_input = torch.Tensor() exported_model = _capture_model(dummy_torch_model, dummy_input) - with pytest.raises(AttributeError): + with pytest.raises(nncf.ParameterNotSupportedError): compress_weights(exported_model, mode=mode, **params) @@ -227,7 +228,7 @@ def test_raise_error_with_not_int8(mode): dummy_torch_model = EmptyModel() dummy_input = torch.Tensor() exported_model = _capture_model(dummy_torch_model, dummy_input) - with pytest.raises(AttributeError): + with pytest.raises(nncf.ParameterNotSupportedError): compress_weights(exported_model, mode=mode) diff --git a/tests/torch/ptq/test_weights_compression.py b/tests/torch/ptq/test_weights_compression.py index dee60e92e5f..f982a8375d1 100644 --- a/tests/torch/ptq/test_weights_compression.py +++ b/tests/torch/ptq/test_weights_compression.py @@ -13,6 +13,7 @@ import torch import torch.nn.functional as F +import nncf from nncf import BackupMode from nncf import CompressWeightsMode from nncf import SensitivityMetric @@ -224,7 +225,7 @@ def test_raise_error_with_unsupported_params_for_int8(mode, params): dummy_torch_model = EmptyModel() dummy_input = torch.Tensor() wrapped_model = wrap_model(dummy_torch_model, example_input=dummy_input, trace_parameters=True) - with pytest.raises(AttributeError): + with pytest.raises(nncf.ParameterNotSupportedError): compress_weights(wrapped_model, mode=mode, **params) @@ -233,7 +234,7 @@ def test_raise_error_with_not_int8(mode): dummy_torch_model = EmptyModel() dummy_input = torch.Tensor() wrapped_model = wrap_model(dummy_torch_model, example_input=dummy_input, trace_parameters=True) - with pytest.raises(AttributeError): + with pytest.raises(nncf.ParameterNotSupportedError): compress_weights(wrapped_model, mode=mode) From 2dc74db84228e586afdf9bdc689a59c618187d86 Mon Sep 17 00:00:00 2001 From: Nikolay Date: Mon, 21 Oct 2024 17:43:47 +0200 Subject: [PATCH 2/3] fixed errors with backup_mode --- nncf/quantization/quantize_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nncf/quantization/quantize_model.py b/nncf/quantization/quantize_model.py index dc681556596..03bf2689841 100644 --- a/nncf/quantization/quantize_model.py +++ b/nncf/quantization/quantize_model.py @@ -483,7 +483,7 @@ def compress_weights( ) if backup_mode is not None: - raise AttributeError("Torch backend does not support backup_mode option.") + raise nncf.ParameterNotSupportedError("Torch backend does not support backup_mode option.") if is_wrapped_model(model): if not model.nncf.trace_parameters: @@ -513,7 +513,7 @@ def compress_weights( ) if backup_mode is not None: - raise AttributeError("TorchFX backend does not support backup_mode option.") + raise nncf.ParameterNotSupportedError("TorchFX backend does not support backup_mode option.") if any((awq, scale_estimation, gptq, lora_correction)): raise nncf.ParameterNotSupportedError( From dba63e89da6c688083971cc9c8c0ce56593a0c5b Mon Sep 17 00:00:00 2001 From: Nikolay Date: Wed, 23 Oct 2024 11:37:25 +0200 Subject: [PATCH 3/3] supported corner case with batch and seq len =1 --- nncf/quantization/algorithms/weight_compression/gptq.py | 2 +- tests/openvino/native/quantization/test_weights_compression.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nncf/quantization/algorithms/weight_compression/gptq.py b/nncf/quantization/algorithms/weight_compression/gptq.py index 59413087b23..d2178b19e91 100644 --- a/nncf/quantization/algorithms/weight_compression/gptq.py +++ b/nncf/quantization/algorithms/weight_compression/gptq.py @@ -264,7 +264,7 @@ def _quantize_weights( scales.append(scale) else: if self._scale_estimation and block_compression_config.num_bits == 4: - activations = [inp.squeeze()[..., (i1 + i) : (i1 + i + group_size)] for inp in inputs] + activations = [inp[..., (i1 + i) : (i1 + i + group_size)] for inp in inputs] wc_statistics = ScaleEstimation.activations_to_wc_statistics(activations) scale, zero_point = ScaleEstimation.calculate_quantization_params( self._backend_entity, diff --git a/tests/openvino/native/quantization/test_weights_compression.py b/tests/openvino/native/quantization/test_weights_compression.py index 2d59777684a..edc50652710 100644 --- a/tests/openvino/native/quantization/test_weights_compression.py +++ b/tests/openvino/native/quantization/test_weights_compression.py @@ -1425,6 +1425,7 @@ def test_data_aware_algo_with_different_activation_dimensions(n_extra_dims): [ LMLinearModel.INPUT_SHAPE, [3, 5, 16], + [1, 1, 16], ], ) @pytest.mark.parametrize( @@ -1470,6 +1471,7 @@ def test_compression_with_different_algo_combinations(input_shape, kwargs): dict(lora_correction=True), dict( gptq=True, + awq=True, scale_estimation=True, advanced_parameters=CompressionParams(gptq_params=GPTQParams(subset_size=2)), ),