Skip to content

Commit

Permalink
Docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Sep 15, 2023
1 parent 8848d75 commit 884aecb
Show file tree
Hide file tree
Showing 9 changed files with 114 additions and 92 deletions.
59 changes: 37 additions & 22 deletions nncf/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,9 @@ def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCF
:return: Reduced NNCFTensor.
"""

@staticmethod
@classmethod
@abstractmethod
def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
def masked_mean(cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
"""
Computes the masked mean of elements across given dimensions of NNCFTensor.
Expand All @@ -214,9 +214,11 @@ def masked_mean(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor,
:return: Reduced NNCFTensor.
"""

@staticmethod
@classmethod
@abstractmethod
def masked_median(x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False) -> NNCFTensor:
def masked_median(
cls, x: NNCFTensor, axis: Union[int, tuple, list], mask: NNCFTensor, keepdims=False
) -> NNCFTensor:
"""
Computes the masked median of elements across given dimensions of NNCFTensor.
Expand Down Expand Up @@ -253,8 +255,13 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:

@staticmethod
@abstractmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
""""""
def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
"""
Remove axes of length one from x.
:param x: NNCFTensor to squeeze.
:param axis: Selects a subset of the entries of length one in the shape.
"""

@staticmethod
@abstractmethod
Expand All @@ -272,15 +279,15 @@ def quantile(
tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
) -> List[TensorElementsType]:
"""
Compute the quantile-th percentile(s) of the data along the specified axis.
Compute the quantile(s) of the data along the specified axis.
:param tensor: Given NNCFTensor.
:params quantile: Percentile or sequence of percentiles to compute, which must be between
:params quantile: Quantile or sequence of quantiles to compute, which must be between
0 and 1 inclusive.
:param axis: Axis or axes along which the percentiles are computed.
:param axis: Axis or axes along which the quantiles are computed.
:param keepdims: If True, the axes which are reduced are left in the result
as dimensions with size one.
:returns: List of the quantile-th percentile(s) of the tensor elements.
:returns: List of the quantile(s) of the tensor elements.
"""

@classmethod
Expand All @@ -292,9 +299,17 @@ def precentile(
axis: Union[int, tuple, list],
keepdims: bool = False,
) -> List[TensorElementsType]:
""""""
quantile = np.true_divide(precentile, 100)
return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims)
"""
Compute the precentile(s) of the data along the specified axis.
:param tensor: Given NNCFTensor.
:params precentile: Precentile or sequence of precentiles to compute, which must be between
0 and 100 inclusive.
:param axis: Axis or axes along which the precentiles are computed.
:param keepdims: If True, the axes which are reduced are left in the result
as dimensions with size one.
:returns: List of the precentile(s) of the tensor elements.
"""

@staticmethod
@abstractmethod
Expand Down Expand Up @@ -325,20 +340,20 @@ def no_outliers_map(
:returns: Result of given masked reduction function on filtered from outliers NNCFTensor.
"""

@classmethod
@abstractmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
""" """

@classmethod
@staticmethod
@abstractmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
""""""
def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
"""
Returns result of a substract b operation.
"""

@classmethod
@abstractmethod
def zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
""" """
"""
Returns binary mask from the input x which equal true for all elemets that are smaller than
corresponding machine epsilon.
"""


class MinMaxStatisticCollector(OnlineTensorStatisticCollector):
Expand Down
27 changes: 16 additions & 11 deletions nncf/experimental/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,9 @@ class TensorReducerBase(ABC):

def __init__(self, reduction_axes: Optional[ReductionShape] = None, inplace: bool = False):
"""
:param reduction_shape: Reduction shape for reduction calculation. Equal to list(range(len(input.shape)))
:param reduction_axes: Reduction axes for reduction calculation. Equal to list(range(len(input.shape)))
if empty.
:param inplace: Whether should be calculated inplace or out of place.
:param keepdims: Should the axes which are reduced are left in the result
as dimensions with size one or not.
"""
self._reduction_axes = reduction_axes
self._tensor_processor: NNCFCollectorTensorProcessor = self._get_processor()
Expand Down Expand Up @@ -125,6 +123,9 @@ def __init__(
):
"""
:param tensor_processor: Backend-specific tensor processor.
:param aggregation_axes: Axes along which to operate.
Registered statistics are stacked along zero axis,
axes >=1 correspond to recieved statistic axes shifted left by 1.
:param num_samples: Maximum number of samples to collect. Aggregator
skips tensor registration if tensor registration was called num_samples times before.
Aggregator never skips registration if num_samples is None.
Expand Down Expand Up @@ -300,11 +301,13 @@ def register_inputs(self, inputs: Dict[int, List[NNCFTensor]]) -> None:
if reducer_hash in reduced_inputs:
aggregator.register_reduced_input(reduced_inputs[reducer_hash][reducer_port_id])

def register_unnamed_inputs(self, inputs: NNCFTensor):
formated_inputs = {}
for reducer in self._reducers:
formated_inputs[hash(reducer)] = [inputs]
self.register_inputs(formated_inputs)
def register_unnamed_inputs(self, input_: NNCFTensor) -> None:
"""
Registers given input_ in each avaliable statistic collection branch.
:param input_: Tensor input to register.
"""
self.register_inputs({hash(reducer): [input_] for reducer in self._reducers})

def _aggregate(self) -> None:
result = {}
Expand Down Expand Up @@ -654,9 +657,11 @@ def _register_reduced_input_impl(self, x: TensorType) -> None:

def _aggregate_impl(self) -> Any:
stacked_val = self._tensor_processor.stack(self._container)
median_fn = partial(self._tensor_processor.masked_median, axis=self._aggregation_axes, keepdims=True)
filter_fn = self._tensor_processor.zero_elements
median_per_ch = self._tensor_processor.masked_map(stacked_val, median_fn, filter_fn)

mask = self._tensor_processor.zero_elements(stacked_val)
median_per_ch = self._tensor_processor.masked_median(
stacked_val, mask=mask, axis=self._aggregation_axes, keepdims=True
)

mad_values = self._tensor_processor.median(
self._tensor_processor.abs(self._tensor_processor.sub(stacked_val, median_per_ch)),
Expand Down
40 changes: 23 additions & 17 deletions nncf/onnx/statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Callable, Deque, List, Optional, Union
from typing import Deque, List, Optional, Tuple, Union

import numpy as np

Expand All @@ -33,11 +33,11 @@ class ONNXNNCFCollectorTensorProcessor(NNCFCollectorTensorProcessor):
"""

@staticmethod
def reduce_min(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
def reduce_min(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
return ONNXNNCFTensor(np.amin(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
def reduce_max(x: NNCFTensor, axis: Union[int, tuple, list], keepdims: bool = False) -> NNCFTensor:
def reduce_max(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims: bool = False) -> NNCFTensor:
return ONNXNNCFTensor(np.amax(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
Expand All @@ -53,16 +53,16 @@ def max(x1: NNCFTensor, x2: NNCFTensor) -> NNCFTensor:
return ONNXNNCFTensor(np.maximum(x1.tensor, x2.tensor))

@staticmethod
def mean(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
def mean(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor:
return ONNXNNCFTensor(np.mean(x.tensor, axis=axis, keepdims=keepdims))

@staticmethod
def median(x: NNCFTensor, axis: Union[int, tuple, list], keepdims=False) -> NNCFTensor:
def median(x: NNCFTensor, axis: Union[int, Tuple, list], keepdims=False) -> NNCFTensor:
return ONNXNNCFTensor(np.median(x.tensor, axis=axis, keepdims=keepdims))

@classmethod
def masked_mean(
cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
) -> NNCFTensor:
if mask is None:
return cls.mean(x, axis=axis, keepdims=keepdims)
Expand All @@ -71,7 +71,7 @@ def masked_mean(

@classmethod
def masked_median(
cls, x: NNCFTensor, axis: Optional[Union[int, tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
cls, x: NNCFTensor, axis: Optional[Union[int, Tuple, list]], mask: Optional[NNCFTensor], keepdims: bool = False
) -> NNCFTensor:
if mask is None:
return cls.median(x, axis=axis, keepdims=keepdims)
Expand All @@ -82,7 +82,7 @@ def masked_median(
def no_outliers_map(
cls,
x: NNCFTensor,
fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
fn: MaskedReduceFN,
axis: int = 0,
alpha: float = 0.01,
keepdims: bool = False,
Expand All @@ -108,7 +108,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
return [ONNXNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]

@staticmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
raise NotImplementedError()

@staticmethod
Expand All @@ -117,11 +117,21 @@ def sum(tensor: NNCFTensor) -> TensorElementsType:

@staticmethod
def quantile(
tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, tuple, list], keepdims: bool = False
tensor: NNCFTensor, quantile: Union[float, List[float]], axis: Union[int, Tuple, list], keepdims: bool = False
) -> List[TensorElementsType]:
result = np.quantile(tensor.tensor, quantile, axis, keepdims=keepdims)
return [ONNXNNCFTensor(x) for x in result]

@classmethod
def precentile(
cls,
tensor: NNCFTensor,
precentile: Union[float, List[float]],
axis: Union[int, Tuple, list],
keepdims: bool = False,
) -> List[TensorElementsType]:
raise NotImplementedError()

@staticmethod
def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
if len(x.shape) < 3:
Expand All @@ -134,16 +144,12 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
def batch_mean(x: NNCFTensor) -> NNCFTensor:
return ONNXNNCFTensor(np.mean(x.tensor, axis=0, keepdims=True))

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
@staticmethod
def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()

@classmethod
def zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
def zero_elements(x: NNCFTensor) -> NNCFTensor:
raise NotImplementedError()


Expand Down
25 changes: 16 additions & 9 deletions nncf/openvino/statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Callable, Deque, List, Optional, Tuple, Union
from typing import Deque, List, Optional, Tuple, Union

import numpy as np

Expand Down Expand Up @@ -113,7 +113,7 @@ def mean_per_channel(x: NNCFTensor, axis: int) -> NNCFTensor:
def no_outliers_map(
cls,
x: NNCFTensor,
fn: Callable[[NNCFTensor, int, NNCFTensor], Any],
fn: MaskedReduceFN,
axis: Union[int, Tuple[int, ...]] = 0,
alpha: float = 0.01,
keepdims: bool = False,
Expand All @@ -137,7 +137,7 @@ def unstack(x: NNCFTensor, axis: int = 0) -> List[NNCFTensor]:
return [OVNNCFTensor(np.squeeze(e, axis)) for e in np.split(x.tensor, x.tensor.shape[axis], axis=axis)]

@staticmethod
def squeeze(x: NNCFTensor, dim: Optional[int] = None) -> NNCFTensor:
def squeeze(x: NNCFTensor, dim: Optional[Union[int, Tuple[int, ...]]] = None) -> NNCFTensor:
return OVNNCFTensor(np.squeeze(x.tensor, axis=dim))

@staticmethod
Expand All @@ -152,15 +152,22 @@ def quantile(
return [OVNNCFTensor(x) for x in result]

@classmethod
def masked_map(cls, x: NNCFTensor, fn: MaskedReduceFN, filter_fn) -> NNCFTensor:
return fn(x, mask=filter_fn(x))
def precentile(
cls,
tensor: NNCFTensor,
precentile: Union[float, List[float]],
axis: Union[int, tuple, list],
keepdims: bool = False,
) -> List[TensorElementsType]:
quantile = np.true_divide(precentile, 100)
return cls.quantile(tensor, quantile=quantile, axis=axis, keepdims=keepdims)

@classmethod
def sub(cls, a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
@staticmethod
def sub(a: NNCFTensor, b: NNCFTensor) -> NNCFTensor:
return NNCFTensor(a.tensor - b.tensor)

@classmethod
def zero_elements(cls, x: NNCFTensor) -> NNCFTensor:
@staticmethod
def zero_elements(x: NNCFTensor) -> NNCFTensor:
np_tensor = x.tensor
eps = np.finfo(np_tensor.dtype).eps
return NNCFTensor(np.abs(np_tensor) < eps)
Expand Down
Loading

0 comments on commit 884aecb

Please sign in to comment.