Skip to content

Commit

Permalink
dynMM: set rtlsim backend
Browse files Browse the repository at this point in the history
  • Loading branch information
aziz bahri committed Dec 13, 2024
1 parent 01c4628 commit 0816e62
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 37 deletions.
21 changes: 21 additions & 0 deletions src/finn/custom_op/fpgadataflow/rtl/dynmvau_rtl.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,3 +316,24 @@ def _resolve_impl_style(self, dsp_block):
return "mvu_4sx4u_dsp48e2"
else:
return "mvu_8sx8u_dsp48"

def get_rtl_file_list(self, abspath=False):
if abspath:
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen") + "/"
rtllib_dir = os.path.join(os.environ["FINN_ROOT"], "finn-rtllib/mvu/")
else:
code_gen_dir = ""
rtllib_dir = ""
verilog_files = [
code_gen_dir + self.get_nodeattr("gen_top_module") + "_wrapper_sim.v",
rtllib_dir + "mv_matrix_load.sv",
rtllib_dir + "mv_matrix_load_wide.sv",
rtllib_dir + "mvu_4sx4u.sv",
rtllib_dir + "mvu_8sx8u_dsp48.sv",
rtllib_dir + "mvu_dyn_axi.sv",
rtllib_dir + "mvu_vvu_8sx9_dsp58.sv",
rtllib_dir + "mvu_vvu_axi.sv",
rtllib_dir + "ram_p_c.sv",
rtllib_dir + "replay_buffer.sv",
]
return verilog_files
60 changes: 23 additions & 37 deletions tests/fpgadataflow/test_fpgadataflow_dynamic_mvau.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,56 +29,36 @@
import pytest

import numpy as np
import qonnx.custom_op.general.xnorpopcount as xp
from onnx import TensorProto, helper
from qonnx.core.datatype import DataType
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.custom_op.general.multithreshold import multithreshold
from qonnx.custom_op.registry import getCustomOp
from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO

from qonnx.transformation.general import (
ApplyConfig,
GiveReadableTensorNames,
GiveUniqueNodeNames,
)
from qonnx.transformation.general import ApplyConfig, GiveUniqueNodeNames
from qonnx.transformation.infer_datatypes import InferDataTypes
from qonnx.util.basic import (
calculate_signed_dot_prod_range,
gen_finn_dt_tensor,
qonnx_make_model,
)
from qonnx.util.basic import gen_finn_dt_tensor, qonnx_make_model

import finn.core.onnx_exec as oxe
import finn.transformation.fpgadataflow.convert_to_hw_layers as to_hw
from finn.analysis.fpgadataflow.exp_cycles_per_layer import exp_cycles_per_layer
from finn.analysis.fpgadataflow.hls_synth_res_estimation import hls_synth_res_estimation
from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
from finn.transformation.fpgadataflow.create_stitched_ip import CreateStitchedIP
from finn.transformation.fpgadataflow.derive_characteristic import DeriveCharacteristic
from finn.transformation.fpgadataflow.hlssynth_ip import HLSSynthIP
from finn.transformation.fpgadataflow.minimize_accumulator_width import (
MinimizeAccumulatorWidth,
)
from finn.transformation.fpgadataflow.minimize_weight_bit_width import (
MinimizeWeightBitWidth,
)
from finn.transformation.fpgadataflow.insert_fifo import InsertFIFO
from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
from finn.transformation.fpgadataflow.prepare_ip import PrepareIP
from finn.transformation.fpgadataflow.prepare_rtlsim import PrepareRTLSim
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.fpgadataflow.set_fifo_depths import InsertAndSetFIFODepths
from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers

import pdb

def save_model(model, name):
model.save("dynMM_debug_" + name + ".onnx")


#
# Init
#


def make_dynamic_matmul_modelwrapper(M, N, K, A_dtype, B_dtype):
inp_A = [1, M, N]
inp_B = [1, N, K]
Expand All @@ -93,18 +73,18 @@ def make_dynamic_matmul_modelwrapper(M, N, K, A_dtype, B_dtype):
nodes=[matmul_node],
name="matmul_graph_2_inputs",
inputs=[A_vi, B_vi],
outputs=[outp_tensor_value_info])
outputs=[outp_tensor_value_info],
)

model = qonnx_make_model(graph, producer_name="fclayer-model")
model = ModelWrapper(model)
model.set_tensor_datatype("inp_A", A_dtype)
model.set_tensor_datatype("inp_B", B_dtype)
model.set_tensor_datatype(
"outp", DataType["INT32"]
)
model.set_tensor_datatype("outp", DataType["INT32"])

return model


# matrix size [MxN] * [NxK]
@pytest.mark.parametrize("M", [128])
@pytest.mark.parametrize("N", [32])
Expand Down Expand Up @@ -134,7 +114,10 @@ def test_fpgadataflow_infer_dyn_mvau(M, N, K, A_dtype, B_dtype):
model = model.transform(to_hw.InferQuantizedMatrixVectorActivation())
output_mvau = oxe.execute_onnx(model, input_dict)["outp"]

assert np.allclose(output_matmul, output_mvau), "Output of ONNX model not matching output of MVAU!"
assert np.allclose(
output_matmul, output_mvau
), "Output of ONNX model not matching output of MVAU!"


# matrix size [MxN] * [NxK]
@pytest.mark.parametrize("M", [128])
Expand All @@ -152,7 +135,6 @@ def test_fpgadataflow_dynamic_mvau_cppsim(M, N, K, pe, simd, A_dtype, B_dtype):
This test generates a MatMul Onnx graph, and then applies transformations
"""
part = "xcvc1902-vsva2197-2MP-e-S"
clk_ns = 4

# Folding
assert K % pe == 0
Expand Down Expand Up @@ -181,7 +163,7 @@ def test_fpgadataflow_dynamic_mvau_cppsim(M, N, K, pe, simd, A_dtype, B_dtype):
inst.set_nodeattr("preferred_impl_style", "rtl")
inst.set_nodeattr("mem_mode", "external")
inst.set_nodeattr("rtlsim_trace", "MVAU_dyn.vcd")
inst.set_nodeattr("inFIFODepths", [16,16])
inst.set_nodeattr("inFIFODepths", [16, 16])
# Apply convert-to-rtl step
model = model.transform(SpecializeLayers(part))
model = model.transform(GiveUniqueNodeNames())
Expand Down Expand Up @@ -210,6 +192,7 @@ def test_fpgadataflow_dynamic_mvau_cppsim(M, N, K, pe, simd, A_dtype, B_dtype):
output_matmul == output_mvau_cppsim
).all(), "Output of ONNX model not matching output of node-by-node CPPsim!"


# matrix size [MxN] * [NxK]
@pytest.mark.parametrize("M", [128])
@pytest.mark.parametrize("N", [32])
Expand Down Expand Up @@ -254,8 +237,8 @@ def test_fpgadataflow_dynamic_mvau_rtlsim(M, N, K, pe, simd, A_dtype, B_dtype):
inst = getCustomOp(node)
inst.set_nodeattr("preferred_impl_style", "rtl")
inst.set_nodeattr("mem_mode", "external")
inst.set_nodeattr("rtlsim_trace", "MVAU_dyn.vcd")
inst.set_nodeattr("inFIFODepths", [16,16])
inst.set_nodeattr("rtlsim_backend", "pyverilator")
inst.set_nodeattr("inFIFODepths", [16, 16])
# Apply convert-to-rtl step
model = model.transform(SpecializeLayers(part))
model = model.transform(GiveUniqueNodeNames())
Expand All @@ -276,6 +259,8 @@ def test_fpgadataflow_dynamic_mvau_rtlsim(M, N, K, pe, simd, A_dtype, B_dtype):
model = model.transform(PrepareIP(part, clk_ns))
model = model.transform(HLSSynthIP())
model = model.transform(PrepareRTLSim())
model.set_metadata_prop("rtlsim_backend", "pyverilator")

output_mvau_rtl = oxe.execute_onnx(model, input_dict)["outp"]
assert (
output_matmul == output_mvau_rtl
Expand Down Expand Up @@ -327,8 +312,8 @@ def test_fpgadataflow_rtl_dynamic_mvau(M, N, K, pe, simd, A_dtype, B_dtype):
inst = getCustomOp(node)
inst.set_nodeattr("preferred_impl_style", "rtl")
inst.set_nodeattr("mem_mode", "external")
inst.set_nodeattr("rtlsim_trace", "MVAU_dyn.vcd")
inst.set_nodeattr("inFIFODepths", [16,16])
inst.set_nodeattr("rtlsim_backend", "pyverilator")
inst.set_nodeattr("inFIFODepths", [16, 16])
# Apply convert-to-rtl step
model = model.transform(SpecializeLayers(part))
model = model.transform(GiveUniqueNodeNames())
Expand All @@ -355,11 +340,12 @@ def test_fpgadataflow_rtl_dynamic_mvau(M, N, K, pe, simd, A_dtype, B_dtype):
model = model.transform(PrepareRTLSim())
model = model.transform(SetExecMode("rtlsim"))
model.set_metadata_prop("exec_mode", "rtlsim")
model.set_metadata_prop("rtlsim_backend", "pyverilator")

output_mvau_rtl_stitch = oxe.execute_onnx(model, input_dict)["outp"]

assert (
output_matmul == output_mvau_rtl_stitch
).all(), "Output of ONNX model not matching output of stitched-IP RTL model!"

return 0
return 0

0 comments on commit 0816e62

Please sign in to comment.