Skip to content

Commit

Permalink
Attempt to quantize an anomalib model
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Jun 4, 2024
1 parent a40c281 commit 4ded876
Showing 1 changed file with 5 additions and 0 deletions.
5 changes: 5 additions & 0 deletions examples/quantization_aware_training/torch/anomalib/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from anomalib.deploy import ExportType
from anomalib.engine import Engine
from anomalib.models import Stfpm
from torch._export import capture_pre_autograd_graph

import nncf

Expand Down Expand Up @@ -124,6 +125,10 @@ def transform_fn(data_item):

# Quantize the inference model using Post-Training Quantization
inference_model = model.model

example_input = torch.ones((1, 3, 255, 255))
with torch.no_grad():
capture_pre_autograd_graph(inference_model, example_input)
quantized_inference_model = nncf.quantize(model=inference_model, calibration_dataset=calibration_dataset)

# Deepcopy the original model and set the quantized inference model
Expand Down

0 comments on commit 4ded876

Please sign in to comment.