Skip to content

Commit

Permalink
Merge branch 'develop' into onnx_bump_version
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexanderDokuchaev authored Nov 1, 2024
2 parents 6c05247 + 3ba9b06 commit b262869
Show file tree
Hide file tree
Showing 99 changed files with 3,944 additions and 751 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

import nncf
from nncf.common.logging import nncf_logger
from nncf.quantization.advanced_parameters import AdvancedCompressionParameters

DataItem = TypeVar("DataItem")
ModelInput = TypeVar("ModelInput")
Expand Down Expand Up @@ -63,6 +64,7 @@ def compress_model(
group_size=group_size,
awq=awq,
sensitivity_metric=nncf.parameters.SensitivityMetric.MAX_ACTIVATION_VARIANCE,
advanced_parameters=AdvancedCompressionParameters(statistics_path="statistics"),
)
return optimized_ov_model

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def validate_ov_model(
) -> Tuple[Dict, int, int]:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.batch_i = 1
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)
compiled_model = ov.compile_model(ov_model, device_name="CPU")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def validate(
) -> Tuple[Dict, int, int]:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.batch_i = 1
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)

Expand Down Expand Up @@ -101,13 +101,12 @@ def print_statistics(stats: np.ndarray, total_images: int, total_objects: int) -


def prepare_validation(model: YOLO, args: Any) -> Tuple[Validator, torch.utils.data.DataLoader]:
validator = model.smart_load("validator")(args)
validator = model.task_map[model.task]["validator"](args=args)
validator.data = check_det_dataset(args.data)
validator.stride = 32

data_loader = validator.get_dataloader(f"{DATASETS_DIR}/coco128-seg", 1)

validator = model.smart_load("validator")(args)

validator.is_coco = True
validator.class_map = coco80_to_coco91_class()
validator.names = model.model.names
Expand Down Expand Up @@ -146,7 +145,7 @@ def validation_ac(
) -> float:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.batch_i = 1
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)

Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ultralytics==8.0.170
ultralytics==8.3.22
onnx==1.17.0
openvino==2024.4
5 changes: 3 additions & 2 deletions examples/post_training_quantization/openvino/yolov8/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def validate(
) -> Tuple[Dict, int, int]:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)
model.reshape({0: [1, 3, -1, -1]})
compiled_model = ov.compile_model(model, device_name="CPU")
Expand Down Expand Up @@ -66,8 +66,9 @@ def print_statistics(stats: np.ndarray, total_images: int, total_objects: int) -


def prepare_validation(model: YOLO, args: Any) -> Tuple[Validator, torch.utils.data.DataLoader]:
validator = model.smart_load("validator")(args)
validator = model.task_map[model.task]["validator"](args=args)
validator.data = check_det_dataset(args.data)
validator.stride = 32
dataset = validator.data["val"]
print(f"{dataset}")

Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ultralytics==8.0.170
ultralytics==8.3.22
onnx>=1.12.0,<1.16.2
openvino==2024.4
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def validate(
) -> Tuple[Dict, int, int]:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.batch_i = 1
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)
model.reshape({0: [1, 3, -1, -1]})
Expand Down Expand Up @@ -92,15 +92,14 @@ def print_statistics(stats: np.ndarray, total_images: int, total_objects: int) -


def prepare_validation(model: YOLO, args: Any) -> Tuple[Validator, torch.utils.data.DataLoader]:
validator = model.smart_load("validator")(args)
validator = model.task_map[model.task]["validator"](args=args)
validator.data = check_det_dataset(args.data)
validator.stride = 32
dataset = validator.data["val"]
print(f"{dataset}")

data_loader = validator.get_dataloader(f"{DATASETS_DIR}/coco128-seg", 1)

validator = model.smart_load("validator")(args)

validator.is_coco = True
validator.class_map = coco80_to_coco91_class()
validator.names = model.model.names
Expand Down Expand Up @@ -146,7 +145,7 @@ def validation_ac(
) -> float:
validator.seen = 0
validator.jdict = []
validator.stats = []
validator.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
validator.batch_i = 1
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)
num_outputs = len(compiled_model.outputs)
Expand Down Expand Up @@ -211,6 +210,7 @@ def main():
model = YOLO(f"{ROOT}/{MODEL_NAME}.pt")
args = get_cfg(cfg=DEFAULT_CFG)
args.data = "coco128-seg.yaml"
args.workers = 0

# Prepare validation dataset and helper
validator, data_loader = prepare_validation(model, args)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ultralytics==8.0.170
ultralytics==8.3.22
onnx>=1.12.0,<1.16.2
openvino==2024.4
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ openvino==2024.4
scikit-learn
torch==2.4.0
torchvision==0.19.0
setuptools<=72.1.0
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,4 @@ torch==2.4.0
torchmetrics==1.0.1
torchvision==0.19.0
numpy<2
setuptools<=72.1.0
13 changes: 12 additions & 1 deletion examples/post_training_quantization/torch_fx/resnet18/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,13 @@
import torchvision.models as models
import torchvision.transforms as transforms
from fastdownload import FastDownload
from torch._dynamo.exc import BackendCompilerFailed

import nncf
import nncf.torch
from nncf.common.logging.track_progress import track
from nncf.common.utils.helpers import create_table
from nncf.common.utils.os import is_windows
from nncf.torch import disable_patching

IMAGE_SIZE = 64
Expand Down Expand Up @@ -205,7 +207,16 @@ def transform_fn(data_item):
print("Benchmark FP32 model compiled with default backend ...")
with disable_patching():
compiled_model = torch.compile(model)
fp32_latency = measure_latency(compiled_model, example_inputs=example_input)
try:
fp32_latency = measure_latency(compiled_model, example_inputs=example_input)
except BackendCompilerFailed as exp:
if not is_windows():
raise exp
print(
"WARNING: Torch Inductor is currently unavailable on Windows. "
"For more information, visit https://github.com/pytorch/pytorch/issues/135954"
)
fp32_latency = float("nan")
print(f"{fp32_latency:.3f} ms")

print("Benchmark FP32 model compiled with openvino backend ...")
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
anomalib[core,openvino]==1.0.0
anomalib[core,openvino]==1.0.1
setuptools<=72.1.0
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@ fastdownload==0.0.7
openvino==2024.4
torch==2.4.0
torchvision==0.19.0
setuptools<=72.1.0
1 change: 1 addition & 0 deletions nncf/common/quantization/quantizer_propagation/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -719,6 +719,7 @@ def remove_propagating_quantizer(
if prop_quantizer.unified_scale_type is not None:
gid = self._unified_scale_group_manager.get_group_id_by_propagating_quantizer_id(prop_quantizer.id)
self._unified_scale_group_manager.remove_from_group(gid, prop_quantizer)
self._pqs_after_weight_dependent_output_quantized_nodes.pop(prop_quantizer, None)

def propagate_quantizer_via_path(
self, prop_quantizer: PropagatingQuantizer, path: PropagationPath
Expand Down
68 changes: 68 additions & 0 deletions nncf/common/tensor_statistics/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,22 +8,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABC
from abc import abstractmethod
from itertools import islice
from typing import Any, Dict, Optional, TypeVar

import nncf
import nncf.common.tensor_statistics.statistics_serializer as statistics_serializer
import nncf.common.tensor_statistics.statistics_validator as statistics_validator
from nncf.common import factory
from nncf.common.graph.graph import NNCFGraph
from nncf.common.graph.transformations.commands import TargetPoint
from nncf.common.graph.transformations.layout import TransformationLayout
from nncf.common.logging import nncf_logger
from nncf.common.logging.track_progress import track
from nncf.common.tensor import NNCFTensor
from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer
from nncf.common.utils.backend import BackendType
from nncf.data.dataset import DataItem
from nncf.data.dataset import Dataset
from nncf.data.dataset import ModelInput
from nncf.experimental.common.tensor_statistics.statistics import TensorStatistic

TensorType = TypeVar("TensorType")
TModel = TypeVar("TModel")
Expand All @@ -38,6 +44,8 @@ class StatisticsAggregator(ABC):
Base class for statistics collection.
"""

BACKEND: BackendType

def __init__(self, dataset: Dataset[DataItem, ModelInput]):
self.dataset = dataset
self.stat_subset_size = None
Expand Down Expand Up @@ -88,6 +96,56 @@ def collect_statistics(self, model: TModel, graph: NNCFGraph) -> None:
f"smaller than the requested subset size {self.stat_subset_size}."
)

def load_statistics_from_dir(self, dir_path: str) -> None:
"""
Loads statistics from a directory and populates the statistic points with the loaded data.
:param dir_path: The name of the directory from which to load the statistics.
"""
loaded_data, metadata = statistics_serializer.load_from_dir(dir_path)
statistics_validator.validate_backend(metadata, self.BACKEND)
self._load_statistics(loaded_data)
nncf_logger.info(f"Statistics were successfully loaded from a directory {dir_path}.")

def _load_statistics(self, data: Dict[str, Any]) -> None:
"""
Loads statistics into the registered statistic points from the given data.
:param data: A dictionary containing the statistics loaded from a file.
"""
for _, statistic_point, tensor_collector in self.statistic_points.get_tensor_collectors():
statistics = tensor_collector.get_statistics()
statistics_key = self._get_statistics_key(statistics, statistic_point.target_point)
if statistics_key not in data:
raise nncf.ValidationError(f"Not found statistics for {statistics_key}")
statistics_container = tensor_collector.create_statistics_container(data[statistics_key])
tensor_collector.set_cache(statistics_container)

def dump_statistics(self, dir_path: str) -> None:
"""
Dumps the current statistics to a directory in a compressed format.
:param dir_path: The path of the directory where the statistics will be saved.
"""
data_to_dump = self._prepare_statistics()
metadata = {"backend": self.BACKEND.value, "subset_size": self.stat_subset_size}
statistics_serializer.dump_to_dir(data_to_dump, dir_path, metadata)
nncf_logger.info(f"Statistics were successfully saved to a directory {dir_path}.")

def _prepare_statistics(self) -> Dict[str, Any]:
"""
Prepares the statistics data for dumping into a directory.
:return: A dictionary containing the statistics data to be dumped.
"""
data_to_dump = {}
for _, statistic_point, tensor_collector in self.statistic_points.get_tensor_collectors():
statistics = tensor_collector.get_statistics()
statistics_key = self._get_statistics_key(statistics, statistic_point.target_point)
data = statistics.get_data()
data_to_dump[statistics_key] = data
return data_to_dump

def register_statistic_points(self, statistic_points: StatisticPointsContainer) -> None:
"""
Register statistic points for statistics collection and recalculates the maximum number samples
Expand Down Expand Up @@ -154,3 +212,13 @@ def _process_outputs(outputs: Any) -> Dict[str, NNCFTensor]:
:param outputs: raw model outputs
:return: processed model outputs in Dict[str, Tensor] format
"""

@abstractmethod
def _get_statistics_key(self, statistics: TensorStatistic, target_point: TargetPoint) -> str:
"""
Returns key of statistics.
:param statistics: Statistics value.
:param target_point: Statistics target point.
:return: Statistics key.
"""
4 changes: 2 additions & 2 deletions nncf/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,14 +59,14 @@ def register_input(self, x: TensorType) -> TensorType:
def _register_input(self, x: TensorType) -> None:
pass

def get_statistics(self) -> None:
def get_statistics(self) -> Any:
"""Returns collected statistics, if present."""
if self._collected_samples == 0:
raise StatisticsNotCollectedError()
return self._get_statistics()

@abstractmethod
def _get_statistics(self) -> None:
def _get_statistics(self) -> Any:
pass

def enable(self) -> None:
Expand Down
14 changes: 4 additions & 10 deletions nncf/common/tensor_statistics/statistic_point.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@
from typing import Any, Callable, Generator, Optional, Tuple, cast

from nncf.common.graph.transformations.commands import TargetPoint
from nncf.common.tensor import NNCFTensor
from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
from nncf.experimental.common.tensor_statistics.collectors import TensorCollector


class StatisticPoint:
Expand All @@ -25,7 +24,7 @@ class StatisticPoint:
algorithm implies on what algorithm nedeed this statistics.
"""

def __init__(self, target_point: TargetPoint, tensor_collector: TensorStatisticCollectorBase, algorithm: str):
def __init__(self, target_point: TargetPoint, tensor_collector: TensorCollector, algorithm: str):
self.target_point = target_point
self.algorithm_to_tensor_collectors = {algorithm: [tensor_collector]}

Expand All @@ -36,11 +35,6 @@ def __eq__(self, other: Any) -> bool:
and self.algorithm_to_tensor_collectors == other.self.algorithm_to_tensor_collectors,
)

def register_tensor(self, x: NNCFTensor) -> None:
for tensor_collectors in self.algorithm_to_tensor_collectors.values():
for tensor_collector in tensor_collectors:
tensor_collector.register_input(x)


class StatisticPointsContainer(UserDict): # type: ignore
"""
Expand Down Expand Up @@ -88,7 +82,7 @@ def iter_through_statistic_points_in_target_node(

def get_tensor_collectors(
self, filter_fn: Optional[Callable[[StatisticPoint], bool]] = None
) -> Generator[Tuple[str, StatisticPoint, TensorStatisticCollectorBase], None, None]:
) -> Generator[Tuple[str, StatisticPoint, TensorCollector], None, None]:
"""
Returns iterable through all tensor collectors.
Expand All @@ -115,7 +109,7 @@ def get_algo_statistics_for_node(
target_node_name: str,
filter_fn: Callable[[StatisticPoint], bool],
algorithm: str,
) -> Generator[TensorStatisticCollectorBase, None, None]:
) -> Generator[TensorCollector, None, None]:
"""
Returns iterable through all statistic collectors in node with target_node_name.
Expand Down
Loading

0 comments on commit b262869

Please sign in to comment.