Skip to content

Commit

Permalink
Merge pull request #76 from fastmachinelearning/feature/test_rect_dwi…
Browse files Browse the repository at this point in the history
…se_dilated_conv_lowering

Add extra conv lowering tests + fix linter issues
  • Loading branch information
maltanar authored Oct 23, 2023
2 parents 47e4357 + 2a425ec commit be72cb2
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 30 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
exclude: '^docs/conf.py'

default_language_version:
python: python3.8
python: python3.10

repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
Expand Down Expand Up @@ -29,7 +29,7 @@ repos:
rev: 23.1.0
hooks:
- id: black
language_version: python3
language_version: python3.10
args: [--line-length=125]

- repo: https://github.com/PyCQA/flake8
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ git clone https://github.com/fastmachinelearning/qonnx
cd qonnx
virtualenv -p python3.8 venv
source venv/bin/activate
pip install -e .[qkeras,testing,docs]
pip install -e .[qkeras,testing]
```

Run entire test suite, parallelized across CPU cores:
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ install_requires =
bitstring>=3.1.7
numpy>=1.24.1
onnx>=1.13.0
onnxruntime>=1.15.0
onnxruntime>=1.16.1
sigtools>=4.0.1
toposort>=1.7.0

Expand Down
69 changes: 44 additions & 25 deletions tests/transformation/test_conv_lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,31 +68,7 @@ def test_conv_lowering_convmnist():
assert np.isclose(produced, expected).all()


# input datatype
@pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]])
# kernel size
@pytest.mark.parametrize("k_h", [2, 3])
@pytest.mark.parametrize("k_w", [2, 3, 1])
# input dimension
@pytest.mark.parametrize("ifm_dim_h", [9, 11])
@pytest.mark.parametrize("ifm_dim_w", [9, 11, 1])
# input channels
@pytest.mark.parametrize("ifm_ch", [2, 3])
# stride
@pytest.mark.parametrize("stride", [[1, 1], [1, 2], [2, 1], [2, 2]])
# padding
@pytest.mark.parametrize("padding", [[0, 0, 0, 0], [1, 1, 1, 1]])
# dilations
@pytest.mark.parametrize("dilations", [[1, 1], [2, 2], [3, 3]])
# depthwise or channelwise
@pytest.mark.parametrize("dw", [True, False])
# conv bias
@pytest.mark.parametrize("bias", [True, False])
def test_dws_reg_conv_lowering(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias):
if k_h > ifm_dim_h:
pytest.skip("Kernel height must be smaller than image height")
if k_w > ifm_dim_w:
pytest.skip("Kernel width must be smaller than image height")
def run_conv_lowering_test(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias):
# Ensure the right padding parameters are set
if ifm_dim_w == 1:
dilations[1] = 1
Expand Down Expand Up @@ -191,6 +167,34 @@ def test_dws_reg_conv_lowering(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stri
assert im2col_node.get_nodeattr("depthwise") == 1


# input datatype
@pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]])
# kernel size
@pytest.mark.parametrize("k_h", [2, 3])
@pytest.mark.parametrize("k_w", [2, 3, 1])
# input dimension
@pytest.mark.parametrize("ifm_dim_h", [9, 11])
@pytest.mark.parametrize("ifm_dim_w", [9, 11, 1])
# input channels
@pytest.mark.parametrize("ifm_ch", [2, 3])
# stride
@pytest.mark.parametrize("stride", [[1, 1], [1, 2], [2, 1], [2, 2]])
# padding
@pytest.mark.parametrize("padding", [[0, 0, 0, 0], [1, 1, 1, 1]])
# dilations
@pytest.mark.parametrize("dilations", [[1, 1], [2, 2], [3, 3]])
# depthwise or channelwise
@pytest.mark.parametrize("dw", [True, False])
# conv bias
@pytest.mark.parametrize("bias", [True, False])
def test_dws_reg_conv_lowering(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias):
if k_h > ifm_dim_h:
pytest.skip("Kernel height must be smaller than image height")
if k_w > ifm_dim_w:
pytest.skip("Kernel width must be smaller than image height")
run_conv_lowering_test(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias)


# input datatype
@pytest.mark.parametrize("idt", [DataType["INT2"], DataType["INT4"]])
# kernel size
Expand Down Expand Up @@ -336,3 +340,18 @@ def test_conv_lowering_conv_1x1():
assert new_model.graph.node[1].op_type == "MatMul"
assert new_model.graph.node[2].op_type == "Transpose"
assert len(new_model.graph.node) == 3


def test_rect_dwise_dilated_conv_lowering():
idt = DataType["INT4"]
k_h = 3
k_w = 3
padding = [2, 3, 2, 3]
ifm_dim_h = 192
ifm_dim_w = 14
ifm_ch = 64
stride = [1, 1]
dilations = [2, 3]
dw = True
bias = False
run_conv_lowering_test(idt, k_h, k_w, ifm_dim_h, ifm_dim_w, ifm_ch, stride, padding, dilations, dw, bias)
1 change: 0 additions & 1 deletion tests/transformation/test_expose_intermediate.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,5 +51,4 @@ def test_expose_intermediate(model_name):
# break out all dynamic (non-weight) quantizer outputs
pattern_list = ["Quant"]
model = model.transform(ExposeIntermediateTensorsPatternList(pattern_list, dynamic_only=True))
model.save(model_name + "_dbg.onnx")
assert len(model.graph.output) == model_details_expint[model_name]["n_quant_outputs"] + 1

0 comments on commit be72cb2

Please sign in to comment.