From d113c2ab6089d1ab4e12d38307f438ed63d5660e Mon Sep 17 00:00:00 2001 From: Alexander Dokuchaev Date: Tue, 16 Jul 2024 13:26:49 +0300 Subject: [PATCH] Fix pytest warnings (#2813) ### Changes Fix warnings in tests: - PytestCollectionWarning: cannot collect test class 'Test' because it has a __init_ constructor - UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead. - UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. - Add `pytest-dependency` to requirements for common tests - Remove xfail for expected exception in `tests/common/quantization/test_minmax.py::test_mode_against_default_map` - Fix marks for onnx tests ### Reason for changes Reduce output log. ### Related tickets 138690 --- .../accuracy_control/test_calculate_drop.py | 20 +++--- tests/common/quantization/test_minmax.py | 33 +++++----- tests/common/quantization/test_passes.py | 13 ++-- .../quantization/test_quantizer_removal.py | 34 +++++----- tests/common/requirements.txt | 1 + tests/onnx/pytest.ini | 2 +- .../test_classification_models_graph.py | 62 +++++++++++++++---- .../onnx/quantization/test_opset_versions.py | 2 +- tests/onnx/test_nncf_graph_builder.py | 50 ++++++++++++--- tests/onnx/test_weightless_model.py | 5 +- .../test_templates/test_quantizer_config.py | 18 +++--- tests/tensorflow/pytest.ini | 2 + tests/torch/nas/test_state.py | 2 +- .../torch/nncf_network/test_hook_handlers.py | 10 +-- tests/torch/nncf_network/test_nncf_network.py | 4 +- .../experimental/test_nodes_grouping.py | 4 +- tests/torch/quantization/test_tracing.py | 6 +- tests/torch/test_algo_common.py | 11 +++- tests/torch/test_no_nncf_trace_patching.py | 8 +-- 19 files changed, 187 insertions(+), 100 deletions(-) diff --git a/tests/common/accuracy_control/test_calculate_drop.py b/tests/common/accuracy_control/test_calculate_drop.py index 50899d3eb3d..f31da6a2ef6 100644 --- a/tests/common/accuracy_control/test_calculate_drop.py +++ b/tests/common/accuracy_control/test_calculate_drop.py @@ -18,7 +18,7 @@ @dataclass -class TestCase: +class AccuracyDropTestCase: initial_metric: float quantized_metric: float drop_type: DropType @@ -31,28 +31,28 @@ class TestCase: "ts", [ # ABSOLUTE - TestCase( + AccuracyDropTestCase( initial_metric=0.2923, quantized_metric=0.3185, drop_type=DropType.ABSOLUTE, expected_should_terminate=True, expected_accuracy_drop=-0.0262, ), - TestCase( + AccuracyDropTestCase( initial_metric=0.3185, quantized_metric=0.2923, drop_type=DropType.ABSOLUTE, expected_should_terminate=False, expected_accuracy_drop=0.0262, ), - TestCase( + AccuracyDropTestCase( initial_metric=-0.2923, quantized_metric=-0.3185, drop_type=DropType.ABSOLUTE, expected_should_terminate=False, expected_accuracy_drop=0.0262, ), - TestCase( + AccuracyDropTestCase( initial_metric=-0.3185, quantized_metric=-0.2923, drop_type=DropType.ABSOLUTE, @@ -60,28 +60,28 @@ class TestCase: expected_accuracy_drop=-0.0262, ), # RELATIVE - TestCase( + AccuracyDropTestCase( initial_metric=0.2923, quantized_metric=0.3185, drop_type=DropType.RELATIVE, expected_should_terminate=True, expected_accuracy_drop=None, ), - TestCase( + AccuracyDropTestCase( initial_metric=0.3185, quantized_metric=0.2923, drop_type=DropType.RELATIVE, expected_should_terminate=False, expected_accuracy_drop=0.08226059, ), - TestCase( + AccuracyDropTestCase( initial_metric=-0.2923, quantized_metric=-0.3185, drop_type=DropType.RELATIVE, expected_should_terminate=False, expected_accuracy_drop=0.0896339, ), - TestCase( + AccuracyDropTestCase( initial_metric=-0.3185, quantized_metric=-0.2923, drop_type=DropType.RELATIVE, @@ -90,7 +90,7 @@ class TestCase: ), ], ) -def test_calculate_accuracy_drop(ts: TestCase): +def test_calculate_accuracy_drop(ts: AccuracyDropTestCase): should_terminate, accuracy_drop = calculate_accuracy_drop( ts.initial_metric, ts.quantized_metric, ts.max_drop, ts.drop_type ) diff --git a/tests/common/quantization/test_minmax.py b/tests/common/quantization/test_minmax.py index 5cec1ea20ab..2a63d3c7ac8 100644 --- a/tests/common/quantization/test_minmax.py +++ b/tests/common/quantization/test_minmax.py @@ -134,24 +134,23 @@ def test_mode_against_default_map(algo_params, is_error): qconf_attr_vs_constraint_dict_to_compare = {"mode": QuantizationScheme.SYMMETRIC} if is_error: - try: + with pytest.raises(nncf.ParameterNotSupportedError): minmax = MinMaxQuantization(**algo_params) - except nncf.ParameterNotSupportedError: - pytest.xfail("Caught expected error") - minmax = MinMaxQuantization(**algo_params) - for ref_parameter_name, ref_parameter_value in default_values_to_compare[mode_param].items(): - parameter_value = getattr(minmax, ref_parameter_name) - assert parameter_value == ref_parameter_value - - global_quantizer_constraints = getattr(minmax, "_global_quantizer_constraints") - assert ( - global_quantizer_constraints[QuantizerGroup.ACTIVATIONS].qconf_attr_vs_constraint_dict - == qconf_attr_vs_constraint_dict_to_compare - ) - assert ( - global_quantizer_constraints[QuantizerGroup.WEIGHTS].qconf_attr_vs_constraint_dict - == qconf_attr_vs_constraint_dict_to_compare - ) + else: + minmax = MinMaxQuantization(**algo_params) + for ref_parameter_name, ref_parameter_value in default_values_to_compare[mode_param].items(): + parameter_value = getattr(minmax, ref_parameter_name) + assert parameter_value == ref_parameter_value + + global_quantizer_constraints = getattr(minmax, "_global_quantizer_constraints") + assert ( + global_quantizer_constraints[QuantizerGroup.ACTIVATIONS].qconf_attr_vs_constraint_dict + == qconf_attr_vs_constraint_dict_to_compare + ) + assert ( + global_quantizer_constraints[QuantizerGroup.WEIGHTS].qconf_attr_vs_constraint_dict + == qconf_attr_vs_constraint_dict_to_compare + ) @pytest.mark.parametrize( diff --git a/tests/common/quantization/test_passes.py b/tests/common/quantization/test_passes.py index d737debcf8d..3c10f196bf0 100644 --- a/tests/common/quantization/test_passes.py +++ b/tests/common/quantization/test_passes.py @@ -26,11 +26,14 @@ DATA_ROOT = TEST_ROOT / "common" / "data" / "reference_graphs" -class TestModes(Enum): +class ParameterTestModes(Enum): VALID = "valid" WRONG_TENSOR_SHAPE = "wrong_dropout_node" WRONG_PARALLEL_EDGES = "wrong_parallel_edges" + def __str__(self): + return self.value + def _check_graphs(dot_file_name, nncf_graph) -> None: nx_graph = nncf_graph.get_graph_for_structure_analysis() @@ -38,18 +41,18 @@ def _check_graphs(dot_file_name, nncf_graph) -> None: compare_nx_graph_with_reference(nx_graph, path_to_dot, check_edge_attrs=True) -@pytest.mark.parametrize("mode", [TestModes.VALID, TestModes.WRONG_TENSOR_SHAPE, TestModes.WRONG_PARALLEL_EDGES]) -def test_remove_nodes_and_reconnect_graph(mode: TestModes): +@pytest.mark.parametrize("mode", ParameterTestModes) +def test_remove_nodes_and_reconnect_graph(mode: ParameterTestModes): dot_reference_path_before = Path("passes") / "dropout_synthetic_model_before.dot" dot_reference_path_after = Path("passes") / "dropout_synthetic_model_after.dot" dropout_metatype = "DROPOUT_METATYPE" kwargs = {} - if mode != TestModes.VALID: + if mode != ParameterTestModes.VALID: kwargs.update({mode.value: True}) nncf_graph = NNCFGraphDropoutRemovingCase(dropout_metatype, **kwargs).nncf_graph - if mode != TestModes.VALID: + if mode != ParameterTestModes.VALID: with pytest.raises(AssertionError): remove_nodes_and_reconnect_graph(nncf_graph, [dropout_metatype]) return diff --git a/tests/common/quantization/test_quantizer_removal.py b/tests/common/quantization/test_quantizer_removal.py index 13596083e66..46ee43e189c 100644 --- a/tests/common/quantization/test_quantizer_removal.py +++ b/tests/common/quantization/test_quantizer_removal.py @@ -194,7 +194,7 @@ class Graph: @dataclass -class TestCase: +class ParameterTestCase: """ :param node_name: Quantizer node's name. We want to remove this quantizer from the model. @@ -212,42 +212,46 @@ class TestCase: TEST_CASES = { "simple_graph": [ - TestCase( + ParameterTestCase( "fake_quantize_119", ["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"], ["add_117", "conv2d_161"], ), - TestCase("fake_quantize_128", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]), - TestCase("fake_quantize_134", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]), - TestCase( + ParameterTestCase("fake_quantize_128", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]), + ParameterTestCase("fake_quantize_134", ["fake_quantize_134", "fake_quantize_128"], ["conv2d_127"]), + ParameterTestCase( "fake_quantize_139", ["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"], ["add_117", "conv2d_161"], ), - TestCase("fake_quantize_147", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]), - TestCase("fake_quantize_153", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]), - TestCase( + ParameterTestCase("fake_quantize_147", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]), + ParameterTestCase("fake_quantize_153", ["fake_quantize_153", "fake_quantize_147"], ["conv2d_146"]), + ParameterTestCase( "fake_quantize_162", ["fake_quantize_139", "fake_quantize_162", "fake_quantize_119"], ["add_117", "conv2d_161"], ), ], - "graph_with_shapeof": [TestCase("fake_quantize_105", ["fake_quantize_105"], ["interpolate_115"])], + "graph_with_shapeof": [ParameterTestCase("fake_quantize_105", ["fake_quantize_105"], ["interpolate_115"])], "simple_graph_quantize_dequantize": [ - TestCase("quantize_37", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]), - TestCase("quantize_39", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"]), + ParameterTestCase( + "quantize_37", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"] + ), + ParameterTestCase( + "quantize_39", ["quantize_37", "dequantize_38", "quantize_39", "dequantize_40"], ["conv2d_41"] + ), # - TestCase( + ParameterTestCase( "quantize_42", ["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"], ["conv2d_46", "add_65"], ), - TestCase( + ParameterTestCase( "quantize_44", ["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"], ["conv2d_46", "add_65"], ), - TestCase( + ParameterTestCase( "quantize_63", ["quantize_42", "dequantize_43", "quantize_44", "dequantize_45", "quantize_63", "dequantize_64"], ["conv2d_46", "add_65"], @@ -295,7 +299,7 @@ def create_test_params(): @pytest.mark.parametrize("nncf_graph,test_case", TEST_PARAMS, ids=IDS) -def test_find_quantizer_nodes_to_cut(nncf_graph: NNCFGraph, test_case: TestCase): +def test_find_quantizer_nodes_to_cut(nncf_graph: NNCFGraph, test_case: ParameterTestCase): quantizer_node = nncf_graph.get_node_by_name(test_case.node_name) # As test graphs are fully connected and does not have readvariable metatype, # this should work diff --git a/tests/common/requirements.txt b/tests/common/requirements.txt index 6ee495bde6e..7ba0333267b 100644 --- a/tests/common/requirements.txt +++ b/tests/common/requirements.txt @@ -1,5 +1,6 @@ -c ../../constraints.txt pytest pytest-cov +pytest-dependency pytest-mock pytest-xdist diff --git a/tests/onnx/pytest.ini b/tests/onnx/pytest.ini index 5d4ced66df5..7659952faf6 100644 --- a/tests/onnx/pytest.ini +++ b/tests/onnx/pytest.ini @@ -1,7 +1,7 @@ [pytest] markers = e2e_ptq: e2e ptq tests - e2e_eval_original_model: original model evaluation + e2e_eval_reference_model: original model evaluation python_files = test_* xfail_strict = true diff --git a/tests/onnx/quantization/test_classification_models_graph.py b/tests/onnx/quantization/test_classification_models_graph.py index fb678852e7a..bb5c8232fb5 100644 --- a/tests/onnx/quantization/test_classification_models_graph.py +++ b/tests/onnx/quantization/test_classification_models_graph.py @@ -23,21 +23,61 @@ from tests.onnx.weightless_model import load_model_topology_with_zeros_weights TORCHVISION_TEST_DATA = [ - (ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(pretrained=True), {}), + ( + ModelToTest("resnet18", [1, 3, 224, 224]), + models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1), + {}, + ), ( ModelToTest("resnet50_cpu_spr", [1, 3, 224, 224]), - models.resnet50(pretrained=True), + models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1), {"target_device": TargetDevice.CPU_SPR}, ), - (ModelToTest("mobilenet_v2", [1, 3, 224, 224]), models.mobilenet_v2(pretrained=True), {}), - (ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), models.mobilenet_v3_small(pretrained=True), {}), - (ModelToTest("inception_v3", [1, 3, 224, 224]), models.inception_v3(pretrained=True), {}), - (ModelToTest("googlenet", [1, 3, 224, 224]), models.googlenet(pretrained=True), {}), - (ModelToTest("vgg16", [1, 3, 224, 224]), models.vgg16(pretrained=True), {}), - (ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), models.shufflenet_v2_x1_0(pretrained=True), {}), - (ModelToTest("squeezenet1_0", [1, 3, 224, 224]), models.squeezenet1_0(pretrained=True), {}), - (ModelToTest("densenet121", [1, 3, 224, 224]), models.densenet121(pretrained=True), {}), - (ModelToTest("mnasnet0_5", [1, 3, 224, 224]), models.mnasnet0_5(pretrained=True), {}), + ( + ModelToTest("mobilenet_v2", [1, 3, 224, 224]), + models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), + models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("inception_v3", [1, 3, 224, 224]), + models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("googlenet", [1, 3, 224, 224]), + models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("vgg16", [1, 3, 224, 224]), + models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), + models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("squeezenet1_0", [1, 3, 224, 224]), + models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("densenet121", [1, 3, 224, 224]), + models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1), + {}, + ), + ( + ModelToTest("mnasnet0_5", [1, 3, 224, 224]), + models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1), + {}, + ), ] diff --git a/tests/onnx/quantization/test_opset_versions.py b/tests/onnx/quantization/test_opset_versions.py index 37ca24e8fc7..c31a04f74cb 100644 --- a/tests/onnx/quantization/test_opset_versions.py +++ b/tests/onnx/quantization/test_opset_versions.py @@ -22,7 +22,7 @@ @pytest.mark.parametrize("opset_version", TEST_OPSETS) def test_model_opset_version(tmp_path, opset_version): - model = models.mobilenet_v2(pretrained=True) + model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1) input_shape = [1, 3, 224, 224] x = torch.randn(input_shape, requires_grad=False) torch.onnx.export(model, x, tmp_path / "model.onnx", opset_version=opset_version) diff --git a/tests/onnx/test_nncf_graph_builder.py b/tests/onnx/test_nncf_graph_builder.py index 576aa55067c..8194f6858fb 100644 --- a/tests/onnx/test_nncf_graph_builder.py +++ b/tests/onnx/test_nncf_graph_builder.py @@ -42,16 +42,46 @@ def test_compare_nncf_graph_synthetic_models(model_cls_to_test): CLASSIFICATION_MODEL_DEF_AND_OBJ = [ - (ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(pretrained=True)), - (ModelToTest("mobilenet_v2", [1, 3, 224, 224]), models.mobilenet_v2(pretrained=True)), - (ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), models.mobilenet_v3_small(pretrained=True)), - (ModelToTest("inception_v3", [1, 3, 224, 224]), models.inception_v3(pretrained=True)), - (ModelToTest("googlenet", [1, 3, 224, 224]), models.googlenet(pretrained=True)), - (ModelToTest("vgg16", [1, 3, 224, 224]), models.vgg16(pretrained=True)), - (ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), models.shufflenet_v2_x1_0(pretrained=True)), - (ModelToTest("squeezenet1_0", [1, 3, 224, 224]), models.squeezenet1_0(pretrained=True)), - (ModelToTest("densenet121", [1, 3, 224, 224]), models.densenet121(pretrained=True)), - (ModelToTest("mnasnet0_5", [1, 3, 224, 224]), models.mnasnet0_5(pretrained=True)), + ( + ModelToTest("resnet18", [1, 3, 224, 224]), + models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("mobilenet_v2", [1, 3, 224, 224]), + models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), + models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("inception_v3", [1, 3, 224, 224]), + models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("googlenet", [1, 3, 224, 224]), + models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("vgg16", [1, 3, 224, 224]), + models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), + models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("squeezenet1_0", [1, 3, 224, 224]), + models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("densenet121", [1, 3, 224, 224]), + models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1), + ), + ( + ModelToTest("mnasnet0_5", [1, 3, 224, 224]), + models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1), + ), ] diff --git a/tests/onnx/test_weightless_model.py b/tests/onnx/test_weightless_model.py index 04968c875f9..4116da98ac2 100644 --- a/tests/onnx/test_weightless_model.py +++ b/tests/onnx/test_weightless_model.py @@ -21,7 +21,10 @@ @pytest.mark.parametrize( - ("model_to_test", "model"), [(ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(pretrained=True))] + ("model_to_test", "model"), + [ + (ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)), + ], ) def test_save_weightless_model(tmp_path, model_to_test, model): onnx_model_path = tmp_path / (model_to_test.model_name + ".onnx") diff --git a/tests/post_training/test_templates/test_quantizer_config.py b/tests/post_training/test_templates/test_quantizer_config.py index a037ccf9ea9..4b92a2d0f79 100644 --- a/tests/post_training/test_templates/test_quantizer_config.py +++ b/tests/post_training/test_templates/test_quantizer_config.py @@ -79,7 +79,7 @@ def conv_sum_aggregation_nncf_graph(self) -> NNCFGraphToTestSumAggregation: pass @dataclass - class TestGetStatisticsCollectorParameters: + class GetStatisticsCollectorParameters: target_type: TargetType target_node_name: str batchwise_statistics: bool @@ -89,31 +89,31 @@ class TestGetStatisticsCollectorParameters: @pytest.fixture( params=[ pytest.param( - TestGetStatisticsCollectorParameters(TargetType.PRE_LAYER_OPERATION, "/Sum_1_0", True, (2,), (1, 2)), + GetStatisticsCollectorParameters(TargetType.PRE_LAYER_OPERATION, "/Sum_1_0", True, (2,), (1, 2)), ), - TestGetStatisticsCollectorParameters( + GetStatisticsCollectorParameters( TargetType.POST_LAYER_OPERATION, "/Conv_1_0", True, (2, 3), (1, 2, 3), ), - TestGetStatisticsCollectorParameters( + GetStatisticsCollectorParameters( TargetType.OPERATION_WITH_WEIGHTS, "/Conv_1_0", True, (1, 2, 3), (0, 1, 2, 3), ), - TestGetStatisticsCollectorParameters(TargetType.PRE_LAYER_OPERATION, "/Sum_1_0", False, (0, 2), (0, 1, 2)), - TestGetStatisticsCollectorParameters( + GetStatisticsCollectorParameters(TargetType.PRE_LAYER_OPERATION, "/Sum_1_0", False, (0, 2), (0, 1, 2)), + GetStatisticsCollectorParameters( TargetType.POST_LAYER_OPERATION, "/Conv_1_0", False, (0, 2, 3), (0, 1, 2, 3), ), - TestGetStatisticsCollectorParameters( + GetStatisticsCollectorParameters( TargetType.OPERATION_WITH_WEIGHTS, "/Conv_1_0", False, @@ -122,7 +122,7 @@ class TestGetStatisticsCollectorParameters: ), ] ) - def statistic_collector_parameters(self, request) -> TestGetStatisticsCollectorParameters: + def statistic_collector_parameters(self, request) -> GetStatisticsCollectorParameters: return request.param def test_default_quantizer_config(self, single_conv_nncf_graph): @@ -263,7 +263,7 @@ def test_get_stat_collector( q_config_per_channel, num_samples, conv_sum_aggregation_nncf_graph, - statistic_collector_parameters: TestGetStatisticsCollectorParameters, + statistic_collector_parameters: GetStatisticsCollectorParameters, ): params = statistic_collector_parameters min_max_algo = MinMaxQuantization( diff --git a/tests/tensorflow/pytest.ini b/tests/tensorflow/pytest.ini index 46562c67779..ecccea3c4d8 100644 --- a/tests/tensorflow/pytest.ini +++ b/tests/tensorflow/pytest.ini @@ -2,3 +2,5 @@ markers = install nightly + eval + oveval diff --git a/tests/torch/nas/test_state.py b/tests/torch/nas/test_state.py index 3c2cc24d9fb..d78f11f72df 100644 --- a/tests/torch/nas/test_state.py +++ b/tests/torch/nas/test_state.py @@ -37,7 +37,7 @@ from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel -@pytest.yield_fixture() +@pytest.fixture def _nncf_caplog(caplog): nncf_logger.propagate = True yield caplog diff --git a/tests/torch/nncf_network/test_hook_handlers.py b/tests/torch/nncf_network/test_hook_handlers.py index 06c22d24a84..9f38c637c25 100644 --- a/tests/torch/nncf_network/test_hook_handlers.py +++ b/tests/torch/nncf_network/test_hook_handlers.py @@ -33,7 +33,7 @@ ], ) class TestHookHandles: - class TestHook(torch.nn.Module): + class HookTest(torch.nn.Module): def __init__(self): super().__init__() self._p = torch.nn.Parameter(torch.zeros((1,))) @@ -66,7 +66,7 @@ def test_temporary_insert_at_point_by_hook_group_name( self, target_type: TargetType, target_node_name: str, input_port_id: int ): nncf_model, ip, _check = self._prepare_hook_handles_test(target_type, target_node_name, input_port_id) - permanent_hook = self.TestHook() + permanent_hook = self.HookTest() TEMPORARY_HOOK_GROUP_NAME = "tmp" # Make temporary hook a ref to the permanent hook # to check tmp hooks are not removed by their id() @@ -76,7 +76,7 @@ def test_temporary_insert_at_point_by_hook_group_name( _check(ref_hooks) for _ in range(2): - temporary_hook = self.TestHook() + temporary_hook = self.HookTest() nncf_model.nncf.insert_at_point(ip, temporary_hook, TEMPORARY_HOOK_GROUP_NAME) ref_hooks.append(temporary_hook) _check(ref_hooks) @@ -92,7 +92,7 @@ def test_temporary_insert_at_point_by_hook_group_name( def test_insert_at_point_hook_handles(self, target_type: TargetType, target_node_name: str, input_port_id: int): nncf_model, ip, _check = self._prepare_hook_handles_test(target_type, target_node_name, input_port_id) - permanent_hook = self.TestHook() + permanent_hook = self.HookTest() # Make temporary hook a ref to the permanent hook # to check tmp hooks are not removed by their id() temporary_hook = permanent_hook @@ -103,7 +103,7 @@ def test_insert_at_point_hook_handles(self, target_type: TargetType, target_node _check(ref_hooks) for _ in range(2): - temporary_hook = self.TestHook() + temporary_hook = self.HookTest() tmp_hh.append(nncf_model.nncf.insert_at_point(ip, temporary_hook)) ref_hooks.append(temporary_hook) _check(ref_hooks) diff --git a/tests/torch/nncf_network/test_nncf_network.py b/tests/torch/nncf_network/test_nncf_network.py index 02b1d4adf0b..cbe125cbd3b 100644 --- a/tests/torch/nncf_network/test_nncf_network.py +++ b/tests/torch/nncf_network/test_nncf_network.py @@ -883,7 +883,7 @@ def fn_to_check_input_type(input_): assert visited_times == 1 -class TestWhisperDecoderModel(torch.nn.Module): +class WhisperDecoderTestModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 3) @@ -905,7 +905,7 @@ def forward(self, x): def test_insert_hook_after_parameter(): - model = TestWhisperDecoderModel() + model = WhisperDecoderTestModel() example_input = torch.randint(0, 9, (2,)) nncf_model = wrap_model(model, example_input, trace_parameters=True) result = nncf_model(example_input) diff --git a/tests/torch/pruning/experimental/test_nodes_grouping.py b/tests/torch/pruning/experimental/test_nodes_grouping.py index 931bc3e42a8..d906e1e8eda 100644 --- a/tests/torch/pruning/experimental/test_nodes_grouping.py +++ b/tests/torch/pruning/experimental/test_nodes_grouping.py @@ -488,14 +488,14 @@ def test_groups(desc: GroupTestDesc, mocker, tmp_path): BIG_MODEL_DESCS = [ GroupTestDesc( model_desc=GeneralModelDesc( - model_name="MobileBERT big", + model_name="MobileBERT_big", input_info=[dict(sample_size=[1, 128], type="long")] * 4, model_builder=get_mobile_bert_big_model, ), ), GroupTestDesc( model_desc=GeneralModelDesc( - model_name="Swin big", + model_name="Swin_big", input_info=dict(sample_size=[1, 3, 224, 224]), model_builder=get_swin_tiny_model, ) diff --git a/tests/torch/quantization/test_tracing.py b/tests/torch/quantization/test_tracing.py index 578664f75a1..74521338fea 100644 --- a/tests/torch/quantization/test_tracing.py +++ b/tests/torch/quantization/test_tracing.py @@ -17,7 +17,7 @@ from nncf.torch.quantization.layers import SymmetricQuantizer -class TestModel(nn.Module): +class SimpleModel(nn.Module): def __init__(self, fq) -> None: super().__init__() self.fq = fq @@ -64,7 +64,7 @@ def test_trace_asymmetric_quantizer(is_per_channel): quantizer.input_low.data = input_low quantizer.input_range.data = input_range - model = TestModel(quantizer) + model = SimpleModel(quantizer) traced = torch.jit.trace(model, torch.ones(1, 2, 1, 1)) check_fq_op(traced, is_per_channel) @@ -89,6 +89,6 @@ def test_trace_symmetric_quantizer(is_per_channel, is_signed): quantizer.scale.data = scale quantizer.signed = is_signed - model = TestModel(quantizer) + model = SimpleModel(quantizer) traced = torch.jit.trace(model, torch.ones(1, 2, 1, 1)) check_fq_op(traced, is_per_channel) diff --git a/tests/torch/test_algo_common.py b/tests/torch/test_algo_common.py index 60c38faa448..b41446052ad 100644 --- a/tests/torch/test_algo_common.py +++ b/tests/torch/test_algo_common.py @@ -452,15 +452,20 @@ def test_compressed_model_has_controller_references(algos: List[str]): ALGOS_SUPPORTING_SINGLE_LINE_CONFIGS = [ x for x in sorted(PT_COMPRESSION_ALGORITHMS.registry_dict.keys()) - if x not in ["knowledge_distillation", "movement_sparsity", "elasticity", "progressive_shrinking"] + if x + not in [ + "knowledge_distillation", + "movement_sparsity", + "elasticity", + "progressive_shrinking", + "NoCompressionAlgorithm", + ] ] @pytest.mark.parametrize("algo_name", ALGOS_SUPPORTING_SINGLE_LINE_CONFIGS) def test_can_apply_algo_with_single_line(algo_name, nncf_caplog): model = BasicLinearTestModel() - if algo_name == "NoCompressionAlgorithm": - pytest.skip() config = ConfigCreator().add_algo(algo_name).create() with nncf_caplog.at_level(logging.INFO): create_compressed_model_and_algo_for_test(model, config) diff --git a/tests/torch/test_no_nncf_trace_patching.py b/tests/torch/test_no_nncf_trace_patching.py index c39687ca51e..3c1b3c88053 100644 --- a/tests/torch/test_no_nncf_trace_patching.py +++ b/tests/torch/test_no_nncf_trace_patching.py @@ -20,7 +20,7 @@ from tests.torch.helpers import get_empty_config -class TestModel(nn.Module): +class SimpleModel(nn.Module): """ A test model with an operation resulting in an ambiguous graph. Ambiguous operation output is put into the model output for testing convenience. @@ -50,10 +50,10 @@ def test_no_trace_model_patching(): config["input_info"] = {"sample_size": [1, 1, 4, 4], "filler": "random"} # Not patching anything: all output nodes are traced - _, compressed_model = create_compressed_model(TestModel(True), config) + _, compressed_model = create_compressed_model(SimpleModel(True), config) assert len(compressed_model.nncf.get_original_graph().get_output_nodes()) == 2 # Patching a function results with no_nncf_trace in method not producing an output node - disable_tracing(TestModel.ambiguous_op) - _, compressed_model = create_compressed_model(TestModel(False), get_empty_config()) + disable_tracing(SimpleModel.ambiguous_op) + _, compressed_model = create_compressed_model(SimpleModel(False), get_empty_config()) assert len(compressed_model.nncf.get_original_graph().get_output_nodes()) == 1