diff --git a/nncf/common/tensor_statistics/collectors.py b/nncf/common/tensor_statistics/collectors.py index 8446add671f..3e46f26b555 100644 --- a/nncf/common/tensor_statistics/collectors.py +++ b/nncf/common/tensor_statistics/collectors.py @@ -199,9 +199,9 @@ def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCF :return: Reduced NNCFTensor. """ - @staticmethod + @classmethod @abstractmethod - def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: + def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: """ Computes the masked mean of elements across given dimensions of NNCFTensor. @@ -214,9 +214,11 @@ def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, :return: Reduced NNCFTensor. """ - @staticmethod + @classmethod @abstractmethod - def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: + def masked_median( + cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False + ) -> NNCFTensor: """ Computes the masked median of elements across given dimensions of NNCFTensor. @@ -253,8 +255,13 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]: @staticmethod @abstractmethod - def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor: - """""" + def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor: + """ + Remove axes of length one from x. + + :param x: NNCFTensor to squeeze. + :param axis: Selects a subset of the entries of length one in the shape. + """ @staticmethod @abstractmethod @@ -272,15 +279,15 @@ def quantile( tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False ) -> List[TensorElementsType]: """ - Compute the quantile-th percentile(s) of the data along the specified axis. + Compute the quantile(s) of the data along the specified axis. :param tensor: Given NNCFTensor. - :params quantile: Percentile or sequence of percentiles to compute, which must be between + :params quantile: Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive. - :param axis: Axis or axes along which the percentiles are computed. + :param axis: Axis or axes along which the quantiles are computed. :param keepdims: If True, the axes which are reduced are left in the result as dimensions with size one. - :returns: List of the quantile-th percentile(s) of the tensor elements. + :returns: List of the quantile(s) of the tensor elements. """ @classmethod @@ -292,9 +299,17 @@ def precentile( axis: Union[int, tuple, list], keepdims: bool = False, ) -> List[TensorElementsType]: - """""" - quantile = np.true_divide(precentile, 100) - return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims) + """ + Compute the precentile(s) of the data along the specified axis. + + :param tensor: Given NNCFTensor. + :params precentile: Precentile or sequence of precentiles to compute, which must be between + 0 and 100 inclusive. + :param axis: Axis or axes along which the precentiles are computed. + :param keepdims: If True, the axes which are reduced are left in the result + as dimensions with size one. + :returns: List of the precentile(s) of the tensor elements. + """ @staticmethod @abstractmethod @@ -325,20 +340,20 @@ def no_outliers_map( :returns: Result of given masked reduction function on filtered from outliers NNCFTensor. """ - @classmethod - @abstractmethod - def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor: - """ """ - - @classmethod + @staticmethod @abstractmethod - def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: - """""" + def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: + """ + Returns result of a substract b operation. + """ @classmethod @abstractmethod def zero_elements(cls, x: NNCFTensor) -> NNCFTensor: - """ """ + """ + Returns binary mask from the input x which equal true for all elemets that are smaller than + corresponding machine epsilon. + """ class MinMaxStatisticCollector(OnlineTensorStatisticCollector): diff --git a/nncf/experimental/common/tensor_statistics/collectors.py b/nncf/experimental/common/tensor_statistics/collectors.py index 5e781526f27..edccf4333b4 100644 --- a/nncf/experimental/common/tensor_statistics/collectors.py +++ b/nncf/experimental/common/tensor_statistics/collectors.py @@ -34,11 +34,9 @@ class TensorReducerBase(ABC): def __init__(self, reduction_axes: Optional[ReductionShape] = None, inplace: bool = False): """ - :param reduction_shape: Reduction shape for reduction calculation. Equal to list(range(len(input.shape))) + :param reduction_axes: Reduction axes for reduction calculation. Equal to list(range(len(input.shape))) if empty. :param inplace: Whether should be calculated inplace or out of place. - :param keepdims: Should the axes which are reduced are left in the result - as dimensions with size one or not. """ self._reduction_axes = reduction_axes self._tensor_processor: NNCFCollectorTensorProcessor = self._get_processor() @@ -125,6 +123,9 @@ def __init__( ): """ :param tensor_processor: Backend-specific tensor processor. + :param aggregation_axes: Axes along which to operate. + Registered statistics are stacked along zero axis, + axes >=1 correspond to recieved statistic axes shifted left by 1. :param num_samples: Maximum number of samples to collect. Aggregator skips tensor registration if tensor registration was called num_samples times before. Aggregator never skips registration if num_samples is None. @@ -300,11 +301,13 @@ def register_inputs(self, inputs: Dict[int, List[NNCFTensor]]) -> None: if reducer_hash in reduced_inputs: aggregator.register_reduced_input(reduced_inputs[reducer_hash][reducer_port_id]) - def register_unnamed_inputs(self, inputs: NNCFTensor): - formated_inputs = {} - for reducer in self._reducers: - formated_inputs[hash(reducer)] = [inputs] - self.register_inputs(formated_inputs) + def register_unnamed_inputs(self, input_: NNCFTensor) -> None: + """ + Registers given input_ in each avaliable statistic collection branch. + + :param input_: Tensor input to register. + """ + self.register_inputs({hash(reducer): [input_] for reducer in self._reducers}) def _aggregate(self) -> None: result = {} @@ -654,9 +657,11 @@ def _register_reduced_input_impl(self, x: TensorType) -> None: def _aggregate_impl(self) -> Any: stacked_val = self._tensor_processor.stack(self._container) - median_fn = partial(self._tensor_processor.masked_median, axis=self._aggregation_axes, keepdims=True) - filter_fn = self._tensor_processor.zero_elements - median_per_ch = self._tensor_processor.masked_map(stacked_val, median_fn, filter_fn) + + mask = self._tensor_processor.zero_elements(stacked_val) + median_per_ch = self._tensor_processor.masked_median( + stacked_val, mask=mask, axis=self._aggregation_axes, keepdims=True + ) mad_values = self._tensor_processor.median( self._tensor_processor.abs(self._tensor_processor.sub(stacked_val, median_per_ch)), diff --git a/nncf/onnx/statistics/collectors.py b/nncf/onnx/statistics/collectors.py index 5b1194e3240..0685166e354 100644 --- a/nncf/onnx/statistics/collectors.py +++ b/nncf/onnx/statistics/collectors.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Deque, List, Optional, Union +from typing import Deque, List, Optional, Tuple, Union import numpy as np @@ -33,11 +33,11 @@ class ONNXNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor): """ @staticmethod - def reduce_min(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor: + def reduce_min(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor: return ONNXNNCFTensor(np.amin(x.tensor, axis=axis, keepdims=keepdims)) @staticmethod - def reduce_max(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor: + def reduce_max(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor: return ONNXNNCFTensor(np.amax(x.tensor, axis=axis, keepdims=keepdims)) @staticmethod @@ -53,16 +53,16 @@ def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor: return ONNXNNCFTensor(np.maximum(x1.tensor, x2.tensor)) @staticmethod - def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor: + def mean(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor: return ONNXNNCFTensor(np.mean(x.tensor, axis=axis, keepdims=keepdims)) @staticmethod - def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor: + def median(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor: return ONNXNNCFTensor(np.median(x.tensor, axis=axis, keepdims=keepdims)) @classmethod def masked_mean( - cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False + cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False ) -> NNCFTensor: if mask is None: return cls.mean(x, axis=axis, keepdims=keepdims) @@ -71,7 +71,7 @@ def masked_mean( @classmethod def masked_median( - cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False + cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False ) -> NNCFTensor: if mask is None: return cls.median(x, axis=axis, keepdims=keepdims) @@ -82,7 +82,7 @@ def masked_median( def no_outliers_map( cls, x: NNCFTensor, - fn: Callable[[NNCFTensor, int, NNCFTensor], Any], + fn: MaskedReduceFN, axis: int = 0, alpha: float = 0.01, keepdims: bool = False, @@ -108,7 +108,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]: return [ONNXNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)] @staticmethod - def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor: + def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor: raise NotImplementedError() @staticmethod @@ -117,11 +117,21 @@ def sum(tensor: NNCFTensor) -> TensorElementsType: @staticmethod def quantile( - tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False + tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, Tuple, list], keepdims: bool = False ) -> List[TensorElementsType]: result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims) return [ONNXNNCFTensor(x) for x in result] + @classmethod + def precentile( + cls, + tensor: NNCFTensor, + precentile: Union[float, List[float]], + axis: Union[int, Tuple, list], + keepdims: bool = False, + ) -> List[TensorElementsType]: + raise NotImplementedError() + @staticmethod def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor: if len(x.shape) < 3: @@ -134,16 +144,12 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor: def batch_mean(x: NNCFTensor) -> NNCFTensor: return ONNXNNCFTensor(np.mean(x.tensor, axis=0, keepdims=True)) - @classmethod - def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor: - raise NotImplementedError() - - @classmethod - def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: + @staticmethod + def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: raise NotImplementedError() @classmethod - def zero_elements(cls, x: NNCFTensor) -> NNCFTensor: + def zero_elements(x: NNCFTensor) -> NNCFTensor: raise NotImplementedError() diff --git a/nncf/openvino/statistics/collectors.py b/nncf/openvino/statistics/collectors.py index e1494b4a113..229aa6c8445 100644 --- a/nncf/openvino/statistics/collectors.py +++ b/nncf/openvino/statistics/collectors.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Deque, List, Optional, Tuple, Union +from typing import Deque, List, Optional, Tuple, Union import numpy as np @@ -113,7 +113,7 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor: def no_outliers_map( cls, x: NNCFTensor, - fn: Callable[[NNCFTensor, int, NNCFTensor], Any], + fn: MaskedReduceFN, axis: Union[int, Tuple[int, ...]] = 0, alpha: float = 0.01, keepdims: bool = False, @@ -137,7 +137,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]: return [OVNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)] @staticmethod - def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor: + def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor: return OVNNCFTensor(np.squeeze(x.tensor, axis=dim)) @staticmethod @@ -152,15 +152,22 @@ def quantile( return [OVNNCFTensor(x) for x in result] @classmethod - def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor: - return fn(x, mask=filter_fn(x)) + def precentile( + cls, + tensor: NNCFTensor, + precentile: Union[float, List[float]], + axis: Union[int, tuple, list], + keepdims: bool = False, + ) -> List[TensorElementsType]: + quantile = np.true_divide(precentile, 100) + return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims) - @classmethod - def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: + @staticmethod + def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: return NNCFTensor(a.tensor - b.tensor) - @classmethod - def zero_elements(cls, x: NNCFTensor) -> NNCFTensor: + @staticmethod + def zero_elements(x: NNCFTensor) -> NNCFTensor: np_tensor = x.tensor eps = np.finfo(np_tensor.dtype).eps return NNCFTensor(np.abs(np_tensor) < eps) diff --git a/nncf/tensorflow/tensor_statistics/collectors.py b/nncf/tensorflow/tensor_statistics/collectors.py index a2444cd4ef4..13722cb4bae 100644 --- a/nncf/tensorflow/tensor_statistics/collectors.py +++ b/nncf/tensorflow/tensor_statistics/collectors.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Callable, Deque, List, Optional, Union +from typing import Deque, List, Optional, Union import numpy as np import tensorflow as tf @@ -65,12 +65,14 @@ def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTe def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor: raise NotImplementedError() - @staticmethod - def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: + @classmethod + def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: raise NotImplementedError() - @staticmethod - def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor: + @classmethod + def masked_median( + cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False + ) -> NNCFTensor: raise NotImplementedError() @staticmethod @@ -87,7 +89,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]: return [TFNNCFTensor(t) for t in tensor_list] @staticmethod - def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor: + def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor: raise NotImplementedError() @staticmethod @@ -115,21 +117,15 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor: raise NotImplementedError() @classmethod - def no_outliers_map( - cls, x: NNCFTensor, fn: Callable[[NNCFTensor, Optional[int]], Any], axis: int = 0, alpha: float = 0.01 - ): + def no_outliers_map(cls, x: NNCFTensor, fn: MaskedReduceFN, axis: int = 0, alpha: float = 0.01): raise NotImplementedError() - @classmethod - def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor: - raise NotImplementedError() - - @classmethod - def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: + @staticmethod + def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: raise NotImplementedError() - @classmethod - def zero_elements(cls, x: NNCFTensor) -> NNCFTensor: + @staticmethod + def zero_elements(x: NNCFTensor) -> NNCFTensor: raise NotImplementedError() diff --git a/nncf/torch/tensor_statistics/algo.py b/nncf/torch/tensor_statistics/algo.py index e40fc253181..1a1e621cd9c 100644 --- a/nncf/torch/tensor_statistics/algo.py +++ b/nncf/torch/tensor_statistics/algo.py @@ -65,7 +65,6 @@ def hook_obj(x, collector): command = PTInsertionCommand( op.target_point, - # collector.register_inputs, partial(hook_obj, collector=collector), TransformationPriority.FP32_TENSOR_STATISTICS_OBSERVATION, ) diff --git a/nncf/torch/tensor_statistics/collectors.py b/nncf/torch/tensor_statistics/collectors.py index 3bc88d6f5e7..f752da8065e 100644 --- a/nncf/torch/tensor_statistics/collectors.py +++ b/nncf/torch/tensor_statistics/collectors.py @@ -10,7 +10,7 @@ # limitations under the License. from functools import partial -from typing import Any, Callable, Deque, List, Optional, Tuple, Union +from typing import Deque, List, Optional, Tuple, Union import numpy as np import torch @@ -131,7 +131,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]: return [PTNNCFTensor(t) for t in tensor_list] @staticmethod - def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor: + def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor: return PTNNCFTensor(torch.squeeze(x.tensor, dim=dim)) @staticmethod @@ -167,7 +167,7 @@ def precentile( def no_outliers_map( cls, x: NNCFTensor, - fn: Callable[[NNCFTensor, int, NNCFTensor], Any], + fn: MaskedReduceFN, axis: Union[int, Tuple[int, ...]] = 0, alpha: float = 0.01, keepdims: bool = False, @@ -176,16 +176,12 @@ def no_outliers_map( outliers_mask = torch.logical_or(x.tensor < low_values.tensor, high_values.tensor < x.tensor) return fn(x, axis=axis, mask=PTNNCFTensor(outliers_mask), keepdims=keepdims) - @classmethod - def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor: - return fn(x, mask=filter_fn(x)) - - @classmethod - def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: + @staticmethod + def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor: return NNCFTensor(a.tensor - b.tensor) - @classmethod - def zero_elements(cls, x: NNCFTensor) -> NNCFTensor: + @staticmethod + def zero_elements(x: NNCFTensor) -> NNCFTensor: pt_tensor = x.tensor eps = torch.finfo(pt_tensor.dtype).eps return NNCFTensor(pt_tensor.abs() < eps) diff --git a/tests/common/experimental/test_reducers_and_aggregators.py b/tests/common/experimental/test_reducers_and_aggregators.py index e2f40d1a9ec..bd3a91976ce 100644 --- a/tests/common/experimental/test_reducers_and_aggregators.py +++ b/tests/common/experimental/test_reducers_and_aggregators.py @@ -344,8 +344,8 @@ def test_mean_median_agggregators(self, aggregator_cls, refs, tensor_processor, ), ], ) - @pytest.mark.parametrize("aggregation_axes,ref_expand_axes", [(None, (0,)), ((0,), (0,)), ((0, 1), (0, 1))]) - def test_mad_precentile_aggregators(self, aggregator_cls, tensor_processor, aggregation_axes, ref_expand_axes): + @pytest.mark.parametrize("aggregation_axes", [None, (0,), (0, 1)]) + def test_mad_precentile_aggregators(self, aggregator_cls, tensor_processor, aggregation_axes): aggregator = aggregator_cls(tensor_processor=tensor_processor, aggregation_axes=aggregation_axes) input_ = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32) for i in range(9): diff --git a/tests/common/test_statistics_aggregator.py b/tests/common/test_statistics_aggregator.py index b23f5a9cbb7..bbd060fd52a 100644 --- a/tests/common/test_statistics_aggregator.py +++ b/tests/common/test_statistics_aggregator.py @@ -429,8 +429,6 @@ def filter_func(point): shape = (3, 1, 1, 1) ref_min_val, ref_max_val = map(lambda x: np.reshape(x, shape), (ref_min_val, ref_max_val)) - if not np.allclose(stat.min_values, ref_min_val): - stat = tensor_collector.get_statistics() assert np.allclose(stat.min_values, ref_min_val) assert np.allclose(stat.max_values, ref_max_val) if isinstance(ref_min_val, np.ndarray):