Skip to content

Commit

Permalink
Fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Sep 8, 2023
1 parent 79cf566 commit 5ff5953
Show file tree
Hide file tree
Showing 10 changed files with 68 additions and 15 deletions.
7 changes: 3 additions & 4 deletions nncf/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,18 +324,17 @@ def no_outliers_map(
"""

@classmethod
@abstractmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
""" """

@classmethod
@abstractmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
""""""

@classmethod
def filter_by_fn(cls, x: NNCFTensor, filter_fn) -> NNCFTensor:
""" """

@classmethod
@abstractmethod
def non_zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
""" """

Expand Down
25 changes: 21 additions & 4 deletions nncf/onnx/statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from nncf.common.tensor import NNCFTensor
from nncf.common.tensor import TensorElementsType
from nncf.common.tensor_statistics.collectors import MaskedReduceFN
from nncf.common.tensor_statistics.collectors import MeanMinMaxStatisticCollector
from nncf.common.tensor_statistics.collectors import MeanStatisticCollector
from nncf.common.tensor_statistics.collectors import MinMaxStatisticCollector
Expand Down Expand Up @@ -106,6 +107,10 @@ def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFT
def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
return [ONNXNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]

@staticmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
raise NotImplementedError()

@staticmethod
def sum(tensor: NNCFTensor) -> TensorElementsType:
return np.sum(tensor.tensor)
Expand All @@ -129,11 +134,23 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
def batch_mean(x: NNCFTensor) -> NNCFTensor:
return ONNXNNCFTensor(np.mean(x.tensor, axis=0, keepdims=True))

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def non_zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()


class ONNXMinMaxStatisticCollector(MinMaxStatisticCollector):
@staticmethod
def _get_processor() -> NNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

def _register_input(self, x: ONNXNNCFTensor):
self._register_input_common(x)
Expand All @@ -145,7 +162,7 @@ def _get_statistics(self) -> ONNXMinMaxTensorStatistic:
class ONNXMeanMinMaxStatisticCollector(MeanMinMaxStatisticCollector):
@staticmethod
def _get_processor() -> NNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

def _register_input(self, x: ONNXNNCFTensor):
self._register_input_common(x)
Expand All @@ -157,7 +174,7 @@ def _get_statistics(self) -> ONNXMinMaxTensorStatistic:
class ONNXMeanStatisticCollector(MeanStatisticCollector):
@staticmethod
def _get_processor() -> NNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

def _register_input(self, x: ONNXNNCFTensor):
self._register_input_common(x)
Expand All @@ -169,7 +186,7 @@ def _get_statistics(self) -> ONNXMeanTensorStatistic:
class ONNXRawStatisticCollector(RawStatisticCollector):
@staticmethod
def _get_processor() -> NNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

def _register_input(self, x: ONNXNNCFTensor):
self._register_input_common(x)
Expand Down
17 changes: 17 additions & 0 deletions nncf/openvino/statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from nncf.common.tensor import NNCFTensor
from nncf.common.tensor import TensorElementsType
from nncf.common.tensor_statistics.collectors import MaskedReduceFN
from nncf.common.tensor_statistics.collectors import NNCFCollectorTensorProcessor
from nncf.experimental.common.tensor_statistics.collectors import AbsMaxReducer
from nncf.experimental.common.tensor_statistics.collectors import AbsQuantileReducer
Expand Down Expand Up @@ -135,6 +136,10 @@ def stack(x: Union[List[NNCFTensor], Deque[NNCFTensor]], axis: int = 0) -> NNCFT
def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
return [OVNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]

@staticmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
raise NotImplementedError()

@staticmethod
def sum(tensor: NNCFTensor) -> TensorElementsType:
return np.sum(tensor.tensor)
Expand All @@ -146,6 +151,18 @@ def quantile(
result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims)
return [OVNNCFTensor(x) for x in result]

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
raise NotImplemented()

@classmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
raise NotImplemented()

@classmethod
def non_zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
raise NotImplemented()


class OVNoopReducer(NoopReducer):
def get_output_names(self, target_node_name: str, port_id: int) -> List[str]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
class ONNXBiasCorrectionAlgoBackend(BiasCorrectionAlgoBackend):
@property
def tensor_processor(self) -> ONNXNNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

@property
def types_to_insert_bias(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def types_to_insert_bias(self):

@property
def tensor_processor(self) -> ONNXNNCFCollectorTensorProcessor:
return ONNXNNCFCollectorTensorProcessor()
return ONNXNNCFCollectorTensorProcessor

@staticmethod
def target_point(target_type: TargetType, target_node_name: str, port_id: int) -> ONNXTargetPoint:
Expand Down
4 changes: 2 additions & 2 deletions nncf/quantization/algorithms/smooth_quant/openvino_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ def get_abs_max_channel_collector(
num_samples: int, stats_reduction_shape: Tuple[int], inplace: bool, branch_key: str
) -> TensorCollector:
collector = TensorCollector()
reducer = OVAbsMaxReducer(stats_reduction_shape, inplace)
aggregator = MaxAggregator(OVNNCFCollectorTensorProcessor, num_samples)
reducer = OVAbsMaxReducer(reduction_axes=stats_reduction_shape, inplace=inplace)
aggregator = MaxAggregator(tensor_processor=OVNNCFCollectorTensorProcessor, num_samples=num_samples)
collector.register_statistic_branch(branch_key, reducer, aggregator)
return collector

Expand Down
16 changes: 16 additions & 0 deletions nncf/tensorflow/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
tensor_list = tf.unstack(tensor, axis=axis)
return [TFNNCFTensor(t) for t in tensor_list]

@staticmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
raise NotImplementedError()

@staticmethod
def sum(tensor: NNCFTensor) -> TensorElementsType:
return tf.reduce_sum(tensor.tensor).numpy()
Expand All @@ -105,6 +109,18 @@ def no_outliers_map(
):
raise NotImplementedError()

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def non_zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()


class TFMinMaxStatisticCollector(MinMaxStatisticCollector):
@staticmethod
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from nncf.common.quantization.structs import QuantizationMode
from nncf.common.quantization.structs import QuantizerConfig
from nncf.common.quantization.structs import QuantizerGroup
from nncf.common.tensor_statistics.statistics import MinMaxTensorStatistic
from nncf.quantization.fake_quantize import FakeQuantizeParameters
from nncf.quantization.fake_quantize import calculate_quantizer_parameters
from tests.post_training.conftest import FQ_CALCULATED_PARAMETERS_PATH
Expand Down Expand Up @@ -213,7 +214,9 @@ def test_calculate_quantizer_parameters(self, case_to_test):
else:
max_values = np.amax(data, axis=axes, keepdims=q_config.per_channel)

statistics = self.tensor_statistic(max_values=max_values, min_values=min_values)
statistics = self.tensor_statistic(
{MinMaxTensorStatistic.MIN_STAT: max_values, MinMaxTensorStatistic.MAX_STAT: min_values}
)

if not case_to_test.should_fail:
fq_params = calculate_quantizer_parameters(statistics, q_config, quant_group, narrow_range, half_range)
Expand Down
3 changes: 2 additions & 1 deletion tests/post_training/test_templates/test_channel_alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,4 +496,5 @@ def test_statistic_collectors(self, inplace_ref, q_ref):
for aggr in statistic_collector.aggregators.values():
assert isinstance(aggr, MedianAggregator)
assert aggr.num_samples == num_samples_ref
assert not aggr._use_per_sample_stats
assert not aggr._keepdims
assert aggr._aggregation_axes == (0,)
2 changes: 1 addition & 1 deletion tests/torch/ptq/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def mock_collect_statistics(mocker):
min_, max_ = 0.0, 1.0
min_, max_ = torch.tensor(min_), torch.tensor(max_)
_ = mocker.patch(
"nncf.common.tensor_statistics.collectors.TensorStatisticCollectorBase.get_statistics",
"nncf.experimental.common.tensor_statistics.collectors.TensorCollector.get_statistics",
return_value=PTMinMaxTensorStatistic(
{PTMinMaxTensorStatistic.MIN_STAT: min_, PTMinMaxTensorStatistic.MAX_STAT: max_}
),
Expand Down

0 comments on commit 5ff5953

Please sign in to comment.