Skip to content

Commit

Permalink
Add --no-eval arg to test_quantize_conformance.py (#2179)
Browse files Browse the repository at this point in the history
### Changes

Add `--no-eval` argument to debug
  • Loading branch information
AlexanderDokuchaev authored Oct 13, 2023
1 parent bd9e843 commit 4d47869
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 3 deletions.
5 changes: 3 additions & 2 deletions tests/post_training/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@


def pytest_addoption(parser):
parser.addoption("--data", action="store")
parser.addoption("--output", action="store", default="./tmp/")
parser.addoption("--data", action="store", help="Data directory")
parser.addoption("--output", action="store", default="./tmp/", help="Directory to store artifacts")
parser.addoption("--no-eval", action="store_true", help="Skip validation step")


def pytest_configure(config):
Expand Down
6 changes: 6 additions & 0 deletions tests/post_training/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ def __init__(
output_dir: Path,
data_dir: Path,
reference_data: dict,
no_eval: bool,
params: dict = None,
) -> None:
self.reported_name = reported_name
Expand All @@ -118,6 +119,7 @@ def __init__(
self.data_dir = Path(data_dir)
self.reference_data = reference_data
self.params = params or {}
self.no_eval = no_eval

self.output_model_dir = self.output_dir / self.reported_name / self.backend.value
self.output_model_dir.mkdir(parents=True, exist_ok=True)
Expand Down Expand Up @@ -250,7 +252,11 @@ def validate(self) -> None:
"""
Validate and compare result with reference
"""
if self.no_eval:
print("Validation skipped")
return
print("Validation...")

self._validate()

metric_value = self.run_info.metric_value
Expand Down
8 changes: 7 additions & 1 deletion tests/post_training/test_quantize_conformance.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ def fixture_result(pytestconfig):
return pytestconfig.test_results


@pytest.fixture(scope="session", name="no_eval")
def fixture_no_eval(pytestconfig):
return pytestconfig.getoption("no_eval")


def read_reference_data():
path_reference = Path(__file__).parent / "reference_data.yaml"
with path_reference.open() as f:
Expand All @@ -48,7 +53,7 @@ def read_reference_data():


@pytest.mark.parametrize("test_case_name", TEST_CASES.keys())
def test_ptq_quantization(test_case_name, data, output, result):
def test_ptq_quantization(test_case_name, data, output, result, no_eval):
pipeline = None
err_msg = None
test_model_param = None
Expand All @@ -75,6 +80,7 @@ def test_ptq_quantization(test_case_name, data, output, result):
"output_dir": output,
"data_dir": data,
"reference_data": REFERENCE_DATA[test_case_name],
"no_eval": no_eval,
}

pipeline = pipeline_cls(**pipeline_kwargs)
Expand Down

0 comments on commit 4d47869

Please sign in to comment.