Skip to content

Commit

Permalink
remove testing
Browse files Browse the repository at this point in the history
  • Loading branch information
Giuseppe5 committed Jun 18, 2024
1 parent 8c5985c commit d342b93
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 5 deletions.
5 changes: 1 addition & 4 deletions tests/brevitas_ort/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
from brevitas.nn import QuantConvTranspose2d
from brevitas.nn import QuantConvTranspose3d
from brevitas.nn import QuantLinear
from brevitas.quant.experimental.float_quant_fnuz import Fp8e4m3FNUZActPerTensorFloat
from brevitas.quant.experimental.float_quant_fnuz import Fp8e4m3FNUZWeightPerTensorFloat
from brevitas.quant.experimental.float_quant_ocp import Fp8e4m3OCPActPerTensorFloat
from brevitas.quant.experimental.float_quant_ocp import Fp8e4m3OCPWeightPerTensorFloat
from brevitas.quant.fixed_point import Int8ActPerTensorFixedPoint
Expand Down Expand Up @@ -64,8 +62,7 @@ class A2QWeightQuantizerForTests(Int8AccumulatorAwareWeightQuant):
(Int8WeightPerChannelFixedPoint, Int8ActPerTensorFixedPoint),
'weight_symmetric_activation_dynamic_asymmetric_per_tensor_float':
(Int8WeightPerTensorFloat, ShiftedUint8DynamicActPerTensorFloat),
'fp8_ocp_per_tensor_float': (Fp8e4m3OCPWeightPerTensorFloat, Fp8e4m3OCPActPerTensorFloat),
'fp8_fnuz_per_tensor_float': (Fp8e4m3FNUZWeightPerTensorFloat, Fp8e4m3FNUZActPerTensorFloat)}
'fp8_ocp_per_tensor_float': (Fp8e4m3OCPWeightPerTensorFloat, Fp8e4m3OCPActPerTensorFloat)}
LSTM_QUANTIZERS = {
'asymmetric_per_tensor_float':
(ShiftedUint8WeightPerTensorFloat, ShiftedUint8ActPerTensorFloat),
Expand Down
2 changes: 1 addition & 1 deletion tests/brevitas_ort/quant_module_cases.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def case_quant_wbiol(
set_case_id(request.node.callspec.id, QuantWBIOLCases.case_quant_wbiol)

weight_quant, io_quant = quantizers
is_fp8 = weight_quant == Fp8e4m3OCPWeightPerTensorFloat or weight_quant == Fp8e4m3FNUZWeightPerTensorFloat
is_fp8 = weight_quant == Fp8e4m3OCPWeightPerTensorFloat
if is_fp8:
if weight_bit_width < 8 or input_bit_width < 8 or output_bit_width < 8:
pytest.skip('FP8 export requires total bitwidth equal to 8')
Expand Down

0 comments on commit d342b93

Please sign in to comment.