Skip to content

Commit

Permalink
WIP
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Sep 7, 2023
1 parent 4e099c8 commit 9b119fe
Show file tree
Hide file tree
Showing 7 changed files with 221 additions and 70 deletions.
4 changes: 3 additions & 1 deletion nncf/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,9 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:

@classmethod
@abstractmethod
def no_outliers_map(cls, x: NNCFTensor, fn: MaskedReduceFN, axis: int = 0, alpha: float = 0.01) -> NNCFTensor:
def no_outliers_map(
cls, x: NNCFTensor, fn: MaskedReduceFN, axis: Union[int, Tuple[int, ...]] = 0, alpha: float = 0.01
) -> NNCFTensor:
"""
Computes quantiles [alpha, 1 - alpha] on given tensor, masks all elements that
are smaller that alpha and bigger than 1 - alpha quantile and applies
Expand Down
21 changes: 11 additions & 10 deletions nncf/experimental/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class TensorAggregatorBase:
def __init__(
self,
tensor_processor: NNCFCollectorTensorProcessor,
aggregation_axes: Union[int, Tuple[int, ...]] = (0,),
aggregation_axes: Optional[Tuple[int, ...]] = None,
keepdims: bool = False,
num_samples: Optional[int] = None,
):
Expand All @@ -132,7 +132,7 @@ def __init__(
"""

self._tensor_processor = tensor_processor
self._aggregation_axes = aggregation_axes
self._aggregation_axes = (0,) if aggregation_axes is None else aggregation_axes
self._keepdims = keepdims
self._num_samples = num_samples
self._collected_samples = 0
Expand Down Expand Up @@ -495,15 +495,17 @@ class AbsQuantileReducer(QuantileReducerBase):
def __init__(
self,
reduction_axes: Optional[ReductionShape] = None,
quantile: Union[float, List[float]] = 0.99,
quantile: Optional[Union[float, List[float]]] = None,
inplace: bool = False,
keepdims: bool = True,
):
super().__init__(reduction_axes, quantile, False)
quantile = (0.99,) if quantile is None else quantile
super().__init__(reduction_axes, quantile, False, keepdims)

def _reduce_out_of_place(self, x: List[NNCFTensor]) -> List[NNCFTensor]:
x = self._tensor_processor.abs(x[0])
reduction_shape = self._get_reduction_shape(x)
return self._tensor_processor.quantile(x, [self._quantile], reduction_shape, keepdims=self._keepdims)
return self._tensor_processor.quantile(x, self._quantile, reduction_shape, keepdims=self._keepdims)


class BatchMeanReducer(TensorReducerBase):
Expand Down Expand Up @@ -551,7 +553,7 @@ class OnlineOfflineAggregatorBase(TensorAggregatorBase):
def __init__(
self,
tensor_processor: NNCFCollectorTensorProcessor,
aggregation_axes: Union[int, Tuple[int, ...]] = 0,
aggregation_axes: Optional[Tuple[int, ...]] = None,
keepdims: bool = False,
num_samples: Optional[int] = None,
window_size=None,
Expand Down Expand Up @@ -618,15 +620,14 @@ class NoOutliersAggregatorBase(OfflineAggregatorBase, ABC):
def __init__(
self,
tensor_processor: NNCFCollectorTensorProcessor,
aggregation_axes: Union[int, Tuple[int, ...]] = 0,
aggregation_axes: Optional[Tuple[int, ...]] = None,
keepdims: bool = False,
num_samples: Optional[int] = None,
window_size=None,
quantile: float = 0.01,
):
assert len(aggregation_axes) == 1
super().__init__(
tensor_processor, aggregation_axes=aggregation_axes[0], keepdims=keepdims, num_samples=num_samples
tensor_processor, aggregation_axes=aggregation_axes, keepdims=keepdims, num_samples=num_samples
)
self._window_size = window_size
self._container = deque(maxlen=window_size)
Expand Down Expand Up @@ -681,7 +682,7 @@ def __init__(
self,
tensor_processor: NNCFCollectorTensorProcessor,
percentiles_to_collect: List[float],
aggregation_axes: Union[int, Tuple[int, ...]] = 0,
aggregation_axes: Optional[Tuple[int, ...]] = None,
keepdims: bool = False,
num_samples: Optional[int] = None,
window_size=None,
Expand Down
25 changes: 11 additions & 14 deletions nncf/openvino/statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Callable, Deque, List, Optional, Union
from typing import Any, Callable, Deque, List, Optional, Tuple, Union

import numpy as np

Expand Down Expand Up @@ -49,11 +49,11 @@ class OVNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor):
"""

@staticmethod
def reduce_min(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = True) -> NNCFTensor:
def reduce_min(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = True) -> NNCFTensor:
return OVNNCFTensor(np.amin(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
def reduce_max(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = True) -> NNCFTensor:
def reduce_max(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = True) -> NNCFTensor:
return OVNNCFTensor(np.amax(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
Expand All @@ -69,16 +69,16 @@ def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
return OVNNCFTensor(np.maximum(x1.tensor, x2.tensor))

@staticmethod
def mean(x: NNCFTensor, axis: Union[int, tuple], keepdims: bool = False) -> NNCFTensor:
def mean(x: NNCFTensor, axis: Union[int, Tuple], keepdims: bool = False) -> NNCFTensor:
return OVNNCFTensor(np.mean(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
def median(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
return OVNNCFTensor(np.median(x.tensor, axis=axis, keepdims=keepdims))

@classmethod
def masked_mean(
cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
) -> NNCFTensor:
if mask is None:
return cls.mean(x, axis=axis, keepdims=keepdims)
Expand All @@ -87,7 +87,7 @@ def masked_mean(

@classmethod
def masked_median(
cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
) -> NNCFTensor:
if mask is None:
return cls.median(x, axis=axis, keepdims=keepdims)
Expand All @@ -107,20 +107,17 @@ def no_outliers_map(
cls,
x: NNCFTensor,
fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
axis: int = 0,
axis: Union[int, Tuple[int, ...]] = 0,
alpha: float = 0.01,
keepdims: bool = False,
) -> NNCFTensor:
if len(x.shape) == 1:
return fn(x, axis=None, mask=None, keepdims=keepdims)

x = x.tensor
if axis:
x = np.moveaxis(x, axis, 0)

low_values, high_values = np.quantile(x, [alpha, 1 - alpha], 0)
low_values, high_values = np.quantile(x, [alpha, 1 - alpha], axis=axis)
outliers_mask = np.logical_or(x < low_values, high_values < x)
return fn(OVNNCFTensor(x), axis=0, mask=OVNNCFTensor(outliers_mask), keepdims=keepdims)
return fn(OVNNCFTensor(x), axis=axis, mask=OVNNCFTensor(outliers_mask), keepdims=keepdims)

@staticmethod
def batch_mean(x: NNCFTensor) -> NNCFTensor:
Expand All @@ -141,7 +138,7 @@ def sum(tensor: NNCFTensor) -> TensorElementsType:

@staticmethod
def quantile(
tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, Tuple, list], keepdims: bool = False
) -> List[NNCFTensor]:
result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims)
return [OVNNCFTensor(x) for x in result]
Expand Down
16 changes: 6 additions & 10 deletions nncf/torch/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,8 @@ def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTen
if mask is None:
return cls.mean(x, axis=axis, keepdims=keepdims)
masked_x = np.ma.array(x.tensor.detach().cpu().numpy(), mask=mask.tensor)
result = np.ma.mean(masked_x, axis=axis, keepdims=False)
if len(result) == 1:
result = np.ma.mean(masked_x, axis=axis, keepdims=False).astype(masked_x.dtype)
if result.size <= 1:
return PTNNCFTensor(torch.tensor(result))
return PTNNCFTensor(torch.tensor(result.data))

Expand Down Expand Up @@ -181,20 +181,16 @@ def no_outliers_map(
cls,
x: NNCFTensor,
fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
axis: int = 0,
axis: Union[int, Tuple[int, ...]] = 0,
alpha: float = 0.01,
keepdims: bool = False,
):
if len(x.shape) == 1:
return fn(x, axis=None, mask=None, keepdims=keepdims)

x = x.tensor
if axis:
x = torch.moveaxis(x, [axis] if isinstance(axis, int) else axis, 0)

low_values, high_values = cls.quantile(x, [alpha, 1 - alpha], 0)
outliers_mask = np.logical_or(x < low_values, high_values < x)
return fn(x, axis=0, mask=PTNNCFTensor(outliers_mask), keepdims=keepdims)
low_values, high_values = cls.quantile(x, [alpha, 1 - alpha], axis=axis)
outliers_mask = torch.logical_or(x.tensor < low_values.tensor, high_values.tensor < x.tensor)
return fn(x, axis=axis, mask=PTNNCFTensor(outliers_mask), keepdims=keepdims)

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
Expand Down
Loading

0 comments on commit 9b119fe

Please sign in to comment.