From 04cd1dc885ba860b931f93e001d2b520ba3529b7 Mon Sep 17 00:00:00 2001 From: Alexander Dokuchaev Date: Mon, 2 Oct 2023 17:34:09 +0300 Subject: [PATCH] Move Tensor to nncf/__init__.py --- nncf/__init__.py | 7 +- .../statistical_functions.py | 8 +- nncf/experimental/tensor/README.md | 10 +- nncf/experimental/tensor/torch_functions.py | 4 +- .../fast_bias_correction/algorithm.py | 14 +-- .../fast_bias_correction/onnx_backend.py | 2 +- .../fast_bias_correction/openvino_backend.py | 2 +- .../fast_bias_correction/torch_backend.py | 2 +- nncf/quantization/fake_quantize.py | 42 +++++---- tests/onnx/quantization/common.py | 2 +- .../test_calculate_quantizer_parameters.py | 10 +- .../template_test_nncf_tensor.py | 93 +++++++++---------- .../ptq/test_calculation_quantizer_params.py | 12 +-- tests/torch/test_tensor.py | 4 +- 14 files changed, 109 insertions(+), 103 deletions(-) diff --git a/nncf/__init__.py b/nncf/__init__.py index 07fac90b908..dc06732d9b4 100644 --- a/nncf/__init__.py +++ b/nncf/__init__.py @@ -18,6 +18,11 @@ from nncf.common.strip import strip from nncf.config import NNCFConfig from nncf.data import Dataset +from nncf.experimental.tensor.enums import TensorBackendType +from nncf.experimental.tensor.enums import TensorDataType +from nncf.experimental.tensor.enums import TensorDeviceType +from nncf.experimental.tensor.functions import * +from nncf.experimental.tensor.tensor import Tensor from nncf.parameters import DropType from nncf.parameters import ModelType from nncf.parameters import TargetDevice @@ -49,7 +54,7 @@ framework_present = True _AVAILABLE_FRAMEWORKS[fw_name] = framework_present -if not any(_AVAILABLE_FRAMEWORKS.values()): +if not sum(_AVAILABLE_FRAMEWORKS.values()): nncf_logger.error( "Neither PyTorch, TensorFlow, ONNX or OpenVINO Python packages have been found in your Python " "environment.\n" diff --git a/nncf/experimental/common/tensor_statistics/statistical_functions.py b/nncf/experimental/common/tensor_statistics/statistical_functions.py index 24fc115a058..f33e3a71d24 100644 --- a/nncf/experimental/common/tensor_statistics/statistical_functions.py +++ b/nncf/experimental/common/tensor_statistics/statistical_functions.py @@ -9,8 +9,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor import functions as fns +import nncf +from nncf import Tensor def mean_per_channel(x: Tensor, axis: int) -> Tensor: @@ -22,9 +22,9 @@ def mean_per_channel(x: Tensor, axis: int) -> Tensor: :return: Reduced Tensor. """ if len(x.shape) < 3: - return fns.mean(x, axis=0) + return nncf.mean(x, axis=0) pos_axis = axis + x.ndim if axis < 0 else axis if pos_axis < 0 or pos_axis >= x.ndim: raise ValueError(f"axis {axis} is out of bounds for array of dimension {x.ndim}") axis = tuple(i for i in range(x.ndim) if i != pos_axis) - return fns.mean(x, axis=axis) + return nncf.mean(x, axis=axis) diff --git a/nncf/experimental/tensor/README.md b/nncf/experimental/tensor/README.md index ea8ae2168c9..5b8984f117f 100644 --- a/nncf/experimental/tensor/README.md +++ b/nncf/experimental/tensor/README.md @@ -11,7 +11,7 @@ Common algorithms should use wrapped tensors and provide the unwrapped tensor to ### Initialization Tensor ```python -from nncf.experimental.tensor import Tensor +from nncf import Tensor import numpy as np numpy_array = np.array([1,2]) @@ -57,16 +57,16 @@ nncf_tensor.max() # Tensor(2) All available functions you can found in [functions.py](functions.py). ```python -from nncf.experimental.tensor import functions as fns -fns.max(nncf_tensor) # Tensor(2) +import nncf +nncf.max(nncf_tensor) # Tensor(2) ``` **NOTE** A function requires at least one positional argument, which is used to dispatch the function to the appropriate implementation depending on the type of argument. ```python -fns.max(nncf_tensor) # Correct -fns.max(a=nncf_tensor) # TypeError: wrapper requires at least 1 positional argument +nncf.max(nncf_tensor) # Correct +nncf.max(a=nncf_tensor) # TypeError: wrapper requires at least 1 positional argument ``` ### Loop over Tensor diff --git a/nncf/experimental/tensor/torch_functions.py b/nncf/experimental/tensor/torch_functions.py index 273d5419781..0830f43a070 100644 --- a/nncf/experimental/tensor/torch_functions.py +++ b/nncf/experimental/tensor/torch_functions.py @@ -13,9 +13,9 @@ import torch -from nncf.experimental.tensor import TensorDataType -from nncf.experimental.tensor import TensorDeviceType from nncf.experimental.tensor import functions as fns +from nncf.experimental.tensor.enums import TensorDataType +from nncf.experimental.tensor.enums import TensorDeviceType DTYPE_MAP = { TensorDataType.float16: torch.float16, diff --git a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py index 78a05fc26bf..faa2d26bef9 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/algorithm.py +++ b/nncf/quantization/algorithms/fast_bias_correction/algorithm.py @@ -12,7 +12,9 @@ from math import inf from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union +import nncf from nncf import Dataset +from nncf import Tensor from nncf.common.factory import EngineFactory from nncf.common.factory import ModelTransformerFactory from nncf.common.graph.graph import NNCFGraph @@ -26,9 +28,7 @@ from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer from nncf.common.utils.backend import BackendType from nncf.common.utils.backend import get_backend -from nncf.experimental.common.tensor_statistics import statistical_functions as s_fns -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor import functions as fns +from nncf.experimental.common.tensor_statistics.statistical_functions import mean_per_channel from nncf.quantization.algorithms.algorithm import Algorithm from nncf.quantization.algorithms.fast_bias_correction.backend import ALGO_BACKENDS @@ -199,8 +199,8 @@ def _get_bias_shift_magnitude(current_bias_value: Tensor, updated_bias_value: Te :return: Magnitude between original and updated bias values. """ bias_shift_magnitude = inf - if fns.count_nonzero(current_bias_value == 0) == 0: - bias_shift_magnitude = fns.max(fns.abs((updated_bias_value - current_bias_value) / current_bias_value)) + if nncf.count_nonzero(current_bias_value == 0) == 0: + bias_shift_magnitude = nncf.max(nncf.abs((updated_bias_value - current_bias_value) / current_bias_value)) return bias_shift_magnitude @staticmethod @@ -318,8 +318,8 @@ def _get_bias_shift( engine = EngineFactory.create(model) raw_output = engine.infer(input_blob) q_outputs = self._backend_entity.process_model_output(raw_output, output_name) - q_outputs = s_fns.mean_per_channel(q_outputs, channel_axis) - bias_shift = fns.stack(output_fp) - q_outputs + q_outputs = mean_per_channel(q_outputs, channel_axis) + bias_shift = nncf.stack(output_fp) - q_outputs return bias_shift def get_statistic_points(self, model: TModel, graph: NNCFGraph) -> StatisticPointsContainer: diff --git a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py index 86a827d07a0..28a63edbae7 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/onnx_backend.py @@ -14,12 +14,12 @@ import numpy as np import onnx +from nncf import Tensor from nncf.common.graph import NNCFGraph from nncf.common.graph import NNCFNode from nncf.common.graph.transformations.commands import TargetType from nncf.common.tensor_statistics.collectors import ReductionShape from nncf.common.utils.backend import BackendType -from nncf.experimental.tensor import Tensor from nncf.onnx.graph.node_utils import get_bias_value from nncf.onnx.graph.node_utils import is_any_weight_quantized from nncf.onnx.graph.node_utils import is_node_with_bias diff --git a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py index e8cb8bec6ba..abb157fb084 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/openvino_backend.py @@ -14,13 +14,13 @@ import numpy as np import openvino.runtime as ov +from nncf import Tensor from nncf.common.graph import NNCFGraph from nncf.common.graph import NNCFNode from nncf.common.graph.transformations.commands import TargetType from nncf.common.tensor_statistics.collectors import ReductionShape from nncf.common.utils.backend import BackendType from nncf.experimental.common.tensor_statistics.collectors import TensorCollector -from nncf.experimental.tensor import Tensor from nncf.openvino.graph.metatypes.groups import FAKE_QUANTIZE_OPERATIONS from nncf.openvino.graph.node_utils import get_bias_value from nncf.openvino.graph.node_utils import is_node_with_bias diff --git a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py index 16fd42e1fce..b0b7ebce0bc 100644 --- a/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py +++ b/nncf/quantization/algorithms/fast_bias_correction/torch_backend.py @@ -14,13 +14,13 @@ import numpy as np import torch +from nncf import Tensor from nncf.common.graph import NNCFGraph from nncf.common.graph import NNCFNode from nncf.common.graph.definitions import NNCFGraphNodeType from nncf.common.graph.transformations.commands import TargetType from nncf.common.tensor_statistics.collectors import ReductionShape from nncf.common.utils.backend import BackendType -from nncf.experimental.tensor import Tensor from nncf.quantization.algorithms.fast_bias_correction.backend import ALGO_BACKENDS from nncf.quantization.algorithms.fast_bias_correction.backend import FastBiasCorrectionAlgoBackend from nncf.torch.graph.transformations.command_creation import create_bias_correction_command diff --git a/nncf/quantization/fake_quantize.py b/nncf/quantization/fake_quantize.py index 74b61523830..e491978dff3 100644 --- a/nncf/quantization/fake_quantize.py +++ b/nncf/quantization/fake_quantize.py @@ -14,6 +14,9 @@ import numpy as np +import nncf +from nncf import Tensor +from nncf import TensorDataType from nncf.common.quantization.quantizers import calculate_asymmetric_level_ranges from nncf.common.quantization.quantizers import calculate_symmetric_level_ranges from nncf.common.quantization.quantizers import get_num_levels @@ -21,9 +24,6 @@ from nncf.common.quantization.structs import QuantizerConfig from nncf.common.quantization.structs import QuantizerGroup from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor import TensorDataType -from nncf.experimental.tensor import functions as fns @dataclass @@ -53,9 +53,9 @@ def fix_zero_filters_symmetric(max_values: Tensor, eps: float = 0.01) -> Tensor: :param eps: Correction coefficient. :return: Fixed the high quant number. """ - max_range = fns.max(max_values) - lower_threshold = fns.maximum(max_range * eps, 8e-5) - return fns.maximum(lower_threshold, max_values) + max_range = nncf.max(max_values) + lower_threshold = nncf.maximum(max_range * eps, 8e-5) + return nncf.maximum(lower_threshold, max_values) def fix_zero_filters_asymmetric(min_values: Tensor, max_values: Tensor, eps: float = 1e-8) -> Tuple[Tensor, Tensor]: @@ -71,7 +71,9 @@ def fix_zero_filters_asymmetric(min_values: Tensor, max_values: Tensor, eps: flo """ ranges = max_values - min_values min_correction = 8e-4 - corrections = fns.where(ranges > min_correction, (fns.maximum(eps * ranges, ranges) - ranges) * 0.5, min_correction) + corrections = nncf.where( + ranges > min_correction, (nncf.maximum(eps * ranges, ranges) - ranges) * 0.5, min_correction + ) level_low = min_values - corrections level_high = max_values + corrections @@ -99,21 +101,21 @@ def tune_range( if unify_zp: scale = (right_border - left_border) / level_high zero_point = -left_border / scale - avg_zpts = fns.round(fns.mean(zero_point)) - qval = fns.ones_like(left_border) * avg_zpts + avg_zpts = nncf.round(nncf.mean(zero_point)) + qval = nncf.ones_like(left_border) * avg_zpts else: s = level_high / (right_border - left_border) fval = -left_border * s - qval = fns.round(fval) + qval = nncf.round(fval) - ra = fns.where(qval < level_high, qval / (qval - level_high) * right_border, left_border) - rb = fns.where(qval > 0.0, (qval - level_high) / qval * left_border, right_border) + ra = nncf.where(qval < level_high, qval / (qval - level_high) * right_border, left_border) + rb = nncf.where(qval > 0.0, (qval - level_high) / qval * left_border, right_border) range_a = right_border - ra range_b = rb - left_border - mask = fns.where(range_a > range_b, 1.0, 0.0) - inv_mask = fns.abs(1.0 - mask) + mask = nncf.where(range_a > range_b, 1.0, 0.0) + inv_mask = nncf.abs(1.0 - mask) ra = mask * ra + inv_mask * left_border rb = inv_mask * rb + mask * right_border @@ -145,8 +147,8 @@ def symmetric_range( else: signed = quantizer_config.signedness_to_force is True level_low = ( - fns.zeros_like(level_high) - if fns.all(min_values >= 0) and not signed + nncf.zeros_like(level_high) + if nncf.all(min_values >= 0) and not signed else -level_high * levels / (levels - 2) ) @@ -175,8 +177,8 @@ def asymmetric_range( level_high - the high quant number """ level_low, level_high = fix_zero_filters_asymmetric(min_values, max_values) - level_low = fns.where(level_low < 0.0, level_low, 0.0) - level_high = fns.where(level_high > 0.0, level_high, 0.0) + level_low = nncf.where(level_low < 0.0, level_low, 0.0) + level_high = nncf.where(level_high > 0.0, level_high, 0.0) if unify_zp and q_group == QuantizerGroup.ACTIVATIONS: raise NotImplementedError("Unified zero point is not supported for activations.") @@ -239,8 +241,8 @@ def calculate_quantizer_parameters( input_low, input_high = asymmetric_range(min_values, max_values, quantizer_config, quant_group) if not quantizer_config.per_channel: - input_low = fns.squeeze(input_low) - input_high = fns.squeeze(input_high) + input_low = nncf.squeeze(input_low) + input_high = nncf.squeeze(input_high) output_low, output_high = input_low, input_high return FakeQuantizeParameters(input_low, input_high, output_low, output_high, levels) diff --git a/tests/onnx/quantization/common.py b/tests/onnx/quantization/common.py index 01a916b61a5..307873bca4e 100644 --- a/tests/onnx/quantization/common.py +++ b/tests/onnx/quantization/common.py @@ -16,7 +16,7 @@ import onnx from nncf import Dataset -from nncf.experimental.tensor import Tensor +from nncf import Tensor from nncf.onnx.graph.nncf_graph_builder import GraphConverter from nncf.onnx.graph.onnx_graph import ONNXGraph from nncf.onnx.statistics.statistics import ONNXMinMaxTensorStatistic diff --git a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py index 3e9e8d8b1c1..90802c02d2c 100644 --- a/tests/post_training/test_templates/test_calculate_quantizer_parameters.py +++ b/tests/post_training/test_templates/test_calculate_quantizer_parameters.py @@ -16,10 +16,10 @@ import numpy as np import pytest +import nncf from nncf.common.quantization.structs import QuantizationMode from nncf.common.quantization.structs import QuantizerConfig from nncf.common.quantization.structs import QuantizerGroup -from nncf.experimental.tensor import functions as fns from nncf.quantization.fake_quantize import FakeQuantizeParameters from nncf.quantization.fake_quantize import calculate_quantizer_parameters from tests.post_training.conftest import FQ_CALCULATED_PARAMETERS_PATH @@ -33,10 +33,10 @@ def compare_fq_parameters(ref_params, params): assert ref_params.input_high.shape == params.input_high.shape assert ref_params.output_low.shape == params.output_low.shape assert ref_params.output_high.shape == params.output_high.shape - assert fns.allclose(ref_params.input_low, params.input_low) - assert fns.allclose(ref_params.input_high, params.input_high) - assert fns.allclose(ref_params.output_low, params.output_low) - assert fns.allclose(ref_params.output_high, params.output_high) + assert nncf.allclose(ref_params.input_low, params.input_low) + assert nncf.allclose(ref_params.input_high, params.input_high) + assert nncf.allclose(ref_params.output_low, params.output_low) + assert nncf.allclose(ref_params.output_high, params.output_high) def get_test_reference_key(q_group, q_config, narrow_range, hf_range): diff --git a/tests/shared/test_templates/template_test_nncf_tensor.py b/tests/shared/test_templates/template_test_nncf_tensor.py index 461deb14fce..18e0150f23f 100644 --- a/tests/shared/test_templates/template_test_nncf_tensor.py +++ b/tests/shared/test_templates/template_test_nncf_tensor.py @@ -17,13 +17,12 @@ import pytest +import nncf +from nncf import Tensor +from nncf import TensorDataType +from nncf import TensorDeviceType from nncf.experimental.common.tensor_statistics import statistical_functions as s_fns -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor import TensorDataType -from nncf.experimental.tensor import TensorDeviceType -from nncf.experimental.tensor import functions as fns -TModel = TypeVar("TModel") TTensor = TypeVar("TTensor") @@ -163,7 +162,7 @@ def test_squeeze(self, val, axis, ref): ref_tensor = self.to_tensor(ref) res = nncf_tensor.squeeze(axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor) + assert nncf.allclose(res, ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -194,9 +193,9 @@ def test_fn_squeeze(self, val, axis, ref): tensor = self.to_tensor(val) nncf_tensor = Tensor(tensor) ref_tensor = self.to_tensor(ref) - res = fns.squeeze(nncf_tensor, axis=axis) + res = nncf.squeeze(nncf_tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor) + assert nncf.allclose(res, ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -213,7 +212,7 @@ def test_flatten(self, val, ref): ref_tensor = self.to_tensor(ref) res = nncf_tensor.flatten() assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor) + assert nncf.allclose(res, ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -229,9 +228,9 @@ def test_fn_max(self, val, axis, ref): tensor = self.to_tensor(val) nncf_tensor = Tensor(tensor) ref_tensor = self.to_tensor(ref) - res = fns.max(nncf_tensor, axis=axis) + res = nncf.max(nncf_tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor) + assert nncf.allclose(res, ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -248,7 +247,7 @@ def test_min(self, val, axis, ref): ref_tensor = self.to_tensor(ref) res = nncf_tensor.min(axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor) + assert nncf.allclose(res, ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -263,7 +262,7 @@ def test_abs(self, val, ref): nncf_ref_tensor = Tensor(self.to_tensor(ref)) res = nncf_tensor.abs() assert isinstance(res, Tensor) - assert fns.allclose(res, nncf_ref_tensor) + assert nncf.allclose(res, nncf_ref_tensor) assert res.device == nncf_tensor.device @pytest.mark.parametrize( @@ -276,9 +275,9 @@ def test_abs(self, val, ref): def test_fn_abs(self, val, ref): nncf_tensor = Tensor(self.to_tensor(val)) nncf_ref_tensor = Tensor(self.to_tensor(ref)) - res = fns.abs(nncf_tensor) + res = nncf.abs(nncf_tensor) assert isinstance(res, Tensor) - assert fns.allclose(res, nncf_ref_tensor) + assert nncf.allclose(res, nncf_ref_tensor) assert res.device == nncf_tensor.device def test_getitem(self): @@ -311,17 +310,17 @@ def test_fn_count_nonzero(self, axis, ref): tensor = self.to_tensor([[1.0, 2.0], [1.0, 0.0]]) nncf_tensor = Tensor(tensor) ref_tensor = self.to_tensor(ref) - res = fns.count_nonzero(nncf_tensor, axis=axis) + res = nncf.count_nonzero(nncf_tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res.data, ref_tensor) + assert nncf.allclose(res.data, ref_tensor) assert res.device == nncf_tensor.device def test_fn_zeros_like(self): tensor = self.to_tensor([1, 2]) nncf_tensor = Tensor(tensor) - res = fns.zeros_like(nncf_tensor) + res = nncf.zeros_like(nncf_tensor) assert all(res == Tensor(tensor * 0)) assert isinstance(res, Tensor) assert res.device == nncf_tensor.device @@ -331,7 +330,7 @@ def test_fn_maximum(self): tensor_b = Tensor(self.to_tensor([2, 1])) tensor_ref = self.to_tensor([2, 2]) - res = fns.maximum(tensor_a, tensor_b) + res = nncf.maximum(tensor_a, tensor_b) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor_a.device @@ -341,7 +340,7 @@ def test_fn_maximum_list(self): tensor_b = [2, 1] tensor_ref = self.to_tensor([2, 2]) - res = fns.maximum(tensor_a, tensor_b) + res = nncf.maximum(tensor_a, tensor_b) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor_a.device @@ -351,7 +350,7 @@ def test_fn_minimum(self): tensor_b = Tensor(self.to_tensor([2, 1])) tensor_ref = self.to_tensor([1, 1]) - res = fns.minimum(tensor_a, tensor_b) + res = nncf.minimum(tensor_a, tensor_b) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor_a.device @@ -361,7 +360,7 @@ def test_fn_minimum_list(self): tensor_b = [2, 1] tensor_ref = self.to_tensor([1, 1]) - res = fns.minimum(tensor_a, tensor_b) + res = nncf.minimum(tensor_a, tensor_b) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor_a.device @@ -370,7 +369,7 @@ def test_fn_ones_like(self): tensor_a = Tensor(self.to_tensor([1, 2])) tensor_ref = self.to_tensor([1, 1]) - res = fns.ones_like(tensor_a) + res = nncf.ones_like(tensor_a) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor_a.device @@ -386,9 +385,9 @@ def test_fn_ones_like(self): ) def test_fn_all(self, val, axis, ref): tensor = Tensor(self.to_tensor(val)) - res = fns.all(tensor, axis=axis) + res = nncf.all(tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res.data, self.to_tensor(ref)) + assert nncf.allclose(res.data, self.to_tensor(ref)) assert res.device == tensor.device @pytest.mark.parametrize( @@ -402,16 +401,16 @@ def test_fn_all(self, val, axis, ref): ) def test_fn_any(self, val, axis, ref): tensor = Tensor(self.to_tensor(val)) - res = fns.any(tensor, axis=axis) + res = nncf.any(tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.allclose(res.data, self.to_tensor(ref)) + assert nncf.allclose(res.data, self.to_tensor(ref)) assert res.device == tensor.device def test_fn_where(self): tensor = Tensor(self.to_tensor([1, -1])) tensor_ref = self.to_tensor([1, 0]) - res = fns.where(tensor > 0, 1, 0) + res = nncf.where(tensor > 0, 1, 0) assert all(res.data == tensor_ref) assert isinstance(res, Tensor) assert res.device == tensor.device @@ -426,7 +425,7 @@ def test_fn_where(self): ) def test_fn_isempty(self, val, ref): tensor = Tensor(self.to_tensor(val)) - res = fns.isempty(tensor) + res = nncf.isempty(tensor) assert res == ref assert isinstance(res, bool) @@ -459,11 +458,11 @@ def test_fn_allclose(self, x1, x2, rtol, atol, ref): tensor1 = Tensor(self.to_tensor(x1)) tensor2 = Tensor(self.to_tensor(x2)) if rtol is not None: - res = fns.allclose(tensor1, tensor2, rtol=rtol) + res = nncf.allclose(tensor1, tensor2, rtol=rtol) elif atol is not None: - res = fns.allclose(tensor1, tensor2, atol=atol) + res = nncf.allclose(tensor1, tensor2, atol=atol) else: - res = fns.allclose(tensor1, tensor2) + res = nncf.allclose(tensor1, tensor2) assert res == ref @pytest.mark.parametrize( @@ -480,11 +479,11 @@ def test_fn_isclose(self, x1, x2, rtol, atol, ref): tensor1 = Tensor(self.to_tensor(x1)) tensor2 = Tensor(self.to_tensor(x2)) if rtol is not None: - res = fns.isclose(tensor1, tensor2, rtol=rtol) + res = nncf.isclose(tensor1, tensor2, rtol=rtol) elif atol is not None: - res = fns.isclose(tensor1, tensor2, atol=atol) + res = nncf.isclose(tensor1, tensor2, atol=atol) else: - res = fns.isclose(tensor1, tensor2) + res = nncf.isclose(tensor1, tensor2) assert all(res == self.to_tensor(ref)) assert isinstance(res, Tensor) @@ -501,7 +500,7 @@ def test_astype(self): def test_fn_astype(self): tensor = Tensor(self.to_tensor([1])) - res = fns.astype(tensor, TensorDataType.int8) + res = nncf.astype(tensor, TensorDataType.int8) assert isinstance(res, Tensor) assert res.dtype == TensorDataType.int8 @@ -514,14 +513,14 @@ def test_reshape(self): def test_fn_reshape(self): tensor = Tensor(self.to_tensor([1, 1])) - res = fns.reshape(tensor, (1, 2)) + res = nncf.reshape(tensor, (1, 2)) assert tensor.shape == (2,) assert res.shape == (1, 2) assert res.device == tensor.device def test_not_implemented(self): with pytest.raises(NotImplementedError, match="is not implemented for"): - fns.device({}, [1, 2]) + nncf.device({}, [1, 2]) @pytest.mark.parametrize( "x, axis, ref", @@ -542,7 +541,7 @@ def test_fn_unstack(self, x, axis, ref): tensor = Tensor(self.to_tensor(x)) ref = [self.to_tensor(r) for r in ref] - res = fns.unstack(tensor, axis=axis) + res = nncf.unstack(tensor, axis=axis) assert isinstance(res, list) for i, _ in enumerate(ref): @@ -568,17 +567,17 @@ def test_fn_stack(self, x, axis, ref): list_tensor = [Tensor(self.to_tensor(i)) for i in x] ref = self.to_tensor(ref) - res = fns.stack(list_tensor, axis=axis) + res = nncf.stack(list_tensor, axis=axis) assert isinstance(res, Tensor) - assert fns.all(res.data == ref) + assert nncf.all(res.data == ref) assert res.device == list_tensor[0].device def test_fn_moveaxis(self): tensor = [[0, 0, 0], [0, 0, 0]] tensor = Tensor(self.to_tensor(tensor)) - res = fns.moveaxis(tensor, 0, -1) + res = nncf.moveaxis(tensor, 0, -1) assert res.shape == (3, 2) @@ -615,10 +614,10 @@ def test_fn_mean(self, x, axis, keepdims, ref): tensor = Tensor(self.to_tensor(x)) ref_tensor = self.to_tensor(ref) - res = fns.mean(tensor, axis, keepdims) + res = nncf.mean(tensor, axis, keepdims) assert isinstance(res, Tensor) - assert fns.allclose(res.data, ref_tensor) + assert nncf.allclose(res.data, ref_tensor) assert res.device == tensor.device @pytest.mark.parametrize( @@ -633,10 +632,10 @@ def test_fn_round(self, val, decimals, ref): tensor = Tensor(self.to_tensor(val)) ref_tensor = self.to_tensor(ref) - res = fns.round(tensor, decimals) + res = nncf.round(tensor, decimals) assert isinstance(res, Tensor) - assert fns.allclose(res.data, ref_tensor) + assert nncf.allclose(res.data, ref_tensor) assert res.device == tensor.device @pytest.mark.parametrize( @@ -688,7 +687,7 @@ def test_fn_mean_per_channel(self, val, axis, ref): ref_tensor = self.to_tensor(ref) res = s_fns.mean_per_channel(tensor, axis) assert isinstance(res, Tensor) - assert fns.allclose(res, ref_tensor), f"{res.data}" + assert nncf.allclose(res, ref_tensor), f"{res.data}" assert res.device == tensor.device @pytest.mark.parametrize("axis", (3, 4, -4, -5)) diff --git a/tests/torch/ptq/test_calculation_quantizer_params.py b/tests/torch/ptq/test_calculation_quantizer_params.py index 843fca5bf40..76d2c97ad14 100644 --- a/tests/torch/ptq/test_calculation_quantizer_params.py +++ b/tests/torch/ptq/test_calculation_quantizer_params.py @@ -18,15 +18,15 @@ import torch from torch import nn +import nncf from nncf import Dataset from nncf import NNCFConfig +from nncf import Tensor from nncf.common.graph.transformations.commands import TargetType from nncf.common.quantization.structs import QuantizationMode from nncf.common.quantization.structs import QuantizationPreset from nncf.common.quantization.structs import QuantizerConfig from nncf.common.quantization.structs import QuantizerGroup -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor import functions as fn from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend from nncf.quantization.fake_quantize import FakeQuantizeParameters @@ -214,8 +214,8 @@ def test_quantizer_params_asym(case_to_test: CaseSymParams): ) quantizer = PTMinMaxAlgoBackend._create_quantizer(qconfig, scale_shape, fq_params, target_type) assert quantizer.levels == fq_params.levels - assert fn.allclose(quantizer.input_low.data, case_to_test.ref_inp_low) - assert fn.allclose(quantizer.input_range.data, case_to_test.ref_inp_range) + assert nncf.allclose(quantizer.input_low.data, case_to_test.ref_inp_low) + assert nncf.allclose(quantizer.input_range.data, case_to_test.ref_inp_range) class LinearTestModel(nn.Module): @@ -343,8 +343,8 @@ def test_quantizer_parameters_export(tmp_path: Path): for name, param in fq_params.items(): assert name in torch_ptq_params - assert fn.allclose(param["input_low"], torch_ptq_params[name]["input_low"]) - assert fn.allclose(param["input_high"], torch_ptq_params[name]["input_high"]) + assert nncf.allclose(param["input_low"], torch_ptq_params[name]["input_low"]) + assert nncf.allclose(param["input_high"], torch_ptq_params[name]["input_high"]) class TestFQParams(TemplateTestFQParams): diff --git a/tests/torch/test_tensor.py b/tests/torch/test_tensor.py index eb4d907022b..95a1cfbcf31 100644 --- a/tests/torch/test_tensor.py +++ b/tests/torch/test_tensor.py @@ -11,8 +11,8 @@ import pytest import torch -from nncf.experimental.tensor import Tensor -from nncf.experimental.tensor.enums import TensorDeviceType +from nncf import Tensor +from nncf import TensorDeviceType from tests.shared.test_templates.template_test_nncf_tensor import TemplateTestNNCFTensorOperators