From ad4678a4d460814444a6368a206b7ff5559876d0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 3 Apr 2023 15:35:47 +0100 Subject: [PATCH 01/77] [jenkins] add node label to Jenkinsfile Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index e3e5b5f7f9..2e195d105e 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,4 +1,4 @@ -node { +node('finn-build') { def app stage('Clone repository') { /* Let's make sure we have the repository cloned to our workspace */ From a14bf7ea9e41c1dc5b21bf18bcb8e04105803b41 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 15:31:09 +0100 Subject: [PATCH 02/77] [jenkins] introduce basic declaritive pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 67 +++++++++++++------------------------- 1 file changed, 22 insertions(+), 45 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2e195d105e..fee116da3a 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,46 +1,23 @@ -node('finn-build') { - def app - stage('Clone repository') { - /* Let's make sure we have the repository cloned to our workspace */ - checkout scm +pipeline { + agent { node { label 'finn-build' } } + environment { + FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" + FINN_XILINX_VERSION="2022.1" + FINN_DOCKER_TAG="xilinx/finn:jenkins" + FINN_HOST_BUILD_DIR="/scratch/users/finn_ci" + PLATFORM_REPO_PATHS="/opt/xilinx/platforms" + } + stages { + stage('Quicktest') { + steps { + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh 'run-docker.sh quicktest' + } } - withEnv([ - "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64", - "FINN_XILINX_VERSION=2022.1", - "FINN_DOCKER_TAG=xilinx/finn:jenkins", - "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", - "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" - ]){ - parallel firstBranch: { - stage('Brevitas export') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") - } - } - }, secondBranch: { - stage('Streamlining transformations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") - } - } - }, thirdBranch: { - stage('Util functions') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") - } - } - }, fourthBranch: { - stage('General transformations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") - } - } - }, fifthBranch: { - stage('Fpgadataflow transformations and simulations') { - dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") - } - } - } - } -} + } +} \ No newline at end of file From 60033063c3ca57542120e8b49b1f2baf374ebfe3 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:08:35 +0100 Subject: [PATCH 03/77] [jenkins] move into the test dir before running quicktest Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index fee116da3a..dfe8b42f58 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -10,13 +10,15 @@ pipeline { stages { stage('Quicktest') { steps { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh 'run-docker.sh quicktest' + dir("finn") { + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh './run-docker.sh quicktest' + } } } } From da54e373e3be095804533648e39273366b42aef8 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:09:44 +0100 Subject: [PATCH 04/77] [jenkins] keep 30 builds in build history Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index dfe8b42f58..db0bf15815 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,4 +1,7 @@ pipeline { + options { + buildDiscarder(logRotator(numToKeepStr: '30', artifactNumToKeepStr: '30')) + } agent { node { label 'finn-build' } } environment { FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" From da57f9bcdbfac83e8b2f9545a74582ccdb2c2d4c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:19:49 +0100 Subject: [PATCH 05/77] Revert "[jenkins] move into the test dir before running quicktest" This reverts commit 60033063c3ca57542120e8b49b1f2baf374ebfe3. --- docker/jenkins/Jenkinsfile | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index db0bf15815..1497c5f843 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -13,15 +13,13 @@ pipeline { stages { stage('Quicktest') { steps { - dir("finn") { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh './run-docker.sh quicktest' - } + sh 'echo "Hello FINN"' + sh 'hostname' + sh 'whoami' + sh 'pwd' + sh 'docker login' + sh 'printenv | sort' + sh 'run-docker.sh quicktest' } } } From 0d3d69228b5d816a156e30acc11b9e1b48d220a0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 4 Apr 2023 16:22:24 +0100 Subject: [PATCH 06/77] [jenkins] the './' was necessary to run the test, not moving into a new directory Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1497c5f843..2107524169 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -19,7 +19,7 @@ pipeline { sh 'pwd' sh 'docker login' sh 'printenv | sort' - sh 'run-docker.sh quicktest' + sh './run-docker.sh quicktest' } } } From 5180d1f987f11a27471a86a0ee061b333f9d7fba Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:51:41 +0100 Subject: [PATCH 07/77] Change end2end tests to use board type as opposed to kind to allow for more boards to be parameterized Signed-off-by: Fionn O'Donohoe --- src/finn/util/test.py | 22 +++---- tests/end2end/test_end2end_bnn_pynq.py | 84 +++++++++++------------- tests/end2end/test_end2end_cybsec_mlp.py | 6 +- tests/end2end/test_ext_weights.py | 6 +- 4 files changed, 54 insertions(+), 64 deletions(-) diff --git a/src/finn/util/test.py b/src/finn/util/test.py index 4250079ef3..3545e2be8e 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -106,26 +106,26 @@ def load_test_checkpoint_or_skip(filename): pytest.skip(filename + " not found from previous test step, skipping") -def get_build_env(kind, target_clk_ns): +def get_build_env(board, target_clk_ns): """Get board-related build environment for testing. - - kind = either zynq or alveo. + - board = any from pynq_part_map or alveo_part_map """ ret = {} - if kind == "zynq": - ret["board"] = os.getenv("PYNQ_BOARD", default="Pynq-Z1") - ret["part"] = pynq_part_map[ret["board"]] - ret["build_fxn"] = ZynqBuild(ret["board"], target_clk_ns) - elif kind == "alveo": - ret["board"] = os.getenv("ALVEO_BOARD", default="U250") - ret["part"] = alveo_part_map[ret["board"]] + if board in pynq_part_map: + ret["kind"] = "zynq" + ret["part"] = pynq_part_map[board] + ret["build_fxn"] = ZynqBuild(board, target_clk_ns) + elif board in alveo_part_map: + ret["kind"] = "alveo" + ret["part"] = alveo_part_map[board] ret["build_fxn"] = VitisBuild( ret["part"], target_clk_ns, - alveo_default_platform[ret["board"]], + alveo_default_platform[board], strategy=VitisOptStrategy.BUILD_SPEED, ) else: - raise Exception("Unknown test build environment spec") + raise Exception("Unknown board specified") return ret diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 27aaa1986d..5274d923c1 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -296,7 +296,7 @@ def topology2dataset(topology): @pytest.mark.parametrize("QONNX_export", [False, True]) @pytest.mark.end2end class TestEnd2End: - def test_export(self, topology, wbits, abits, QONNX_export): + def test_export(self, topology, wbits, abits, QONNX_export, board): if wbits > abits: pytest.skip("No wbits > abits end2end network configs for now") if topology == "lfc" and not (wbits == 1 and abits == 1): @@ -313,7 +313,7 @@ def test_export(self, topology, wbits, abits, QONNX_export): export_finn_onnx(model, torch.randn(ishape), chkpt_name) assert os.path.isfile(chkpt_name) - def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): + def test_import_and_tidy(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "export" ) @@ -329,7 +329,7 @@ def test_import_and_tidy(self, topology, wbits, abits, QONNX_export): ) model.save(chkpt) - def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): + def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "import_and_tidy" ) @@ -366,7 +366,7 @@ def test_add_pre_and_postproc(self, topology, wbits, abits, QONNX_export): model.save(chkpt_name) assert os.path.isfile(chkpt_name) - def test_streamline(self, topology, wbits, abits, QONNX_export): + def test_streamline(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "pre_post" ) @@ -389,7 +389,7 @@ def test_streamline(self, topology, wbits, abits, QONNX_export): get_checkpoint_name(topology, wbits, abits, QONNX_export, "streamline") ) - def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): + def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "streamline" ) @@ -455,7 +455,7 @@ def test_convert_to_hls_layers(self, topology, wbits, abits, QONNX_export): for (op_type, exp_count) in exp_layer_counts: assert len(model.get_nodes_by_op_type(op_type)) == exp_count - def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): + def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "convert_to_hls_layers" ) @@ -474,7 +474,7 @@ def test_create_dataflow_partition(self, topology, wbits, abits, QONNX_export): ) dataflow_model.save(dataflow_model_chkpt) - def test_fold(self, topology, wbits, abits, QONNX_export): + def test_fold(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "dataflow_model" ) @@ -483,7 +483,7 @@ def test_fold(self, topology, wbits, abits, QONNX_export): model = folding_fxn(model) model.save(get_checkpoint_name(topology, wbits, abits, QONNX_export, "fold")) - def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): + def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "fold" ) @@ -497,7 +497,7 @@ def test_minimize_bit_width(self, topology, wbits, abits, QONNX_export): @pytest.mark.slow @pytest.mark.vivado - def test_cppsim(self, topology, wbits, abits, QONNX_export): + def test_cppsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "minimize_bit_width" ) @@ -520,49 +520,46 @@ def test_cppsim(self, topology, wbits, abits, QONNX_export): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_ipgen(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_ipgen(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( topology, wbits, abits, QONNX_export, "fold" ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] model = model.transform(GiveUniqueNodeNames()) - model = model.transform(PrepareIP(test_fpga_part, target_clk_ns)) + model = model.transform(PrepareIP(build_data["part"], target_clk_ns)) model = model.transform(HLSSynthIP()) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "ipgen_" + board) ) @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, kind): + def test_set_fifo_depths(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipgen_" + kind + topology, wbits, abits, QONNX_export, "ipgen_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] + test_fpga_part = get_build_env(board, target_clk_ns)["part"] model = model.transform(InsertAndSetFIFODepths(test_fpga_part, target_clk_ns)) fifo_layers = model.get_nodes_by_op_type("StreamingFIFO") assert len(fifo_layers) > 0 model.save( get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) ) @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): + def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - test_fpga_part = get_build_env(kind, target_clk_ns)["part"] + test_fpga_part = get_build_env(board, target_clk_ns)["part"] model = model.transform(InsertDWC()) model = model.transform(GiveUniqueNodeNames()) model = model.transform(AnnotateCycles()) @@ -582,7 +579,7 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): ) os.environ["RTLSIM_TRACE_DEPTH"] = "3" rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) model.save(rtlsim_chkpt) parent_chkpt = get_checkpoint_name( @@ -596,10 +593,9 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): + def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, board): prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) n_nodes = len(model.graph.node) @@ -615,8 +611,7 @@ def test_throughput_rtlsim(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado - @pytest.mark.parametrize("kind", ["zynq"]) - def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): + def test_validate_top1(self, topology, wbits, abits, QONNX_export, board): if "TEST_END2END_VALIDATE_TOP1" not in os.environ: pytest.skip("TEST_END2END_VALIDATE_TOP1 not set") prepostproc_chkpt = get_checkpoint_name( @@ -632,7 +627,7 @@ def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): topology, wbits, abits, QONNX_export, "cppsim" ) rtlsim_chkpt = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + kind + topology, wbits, abits, QONNX_export, "ipstitch_rtlsim_" + board ) dataset = topology2dataset(topology) assert measure_top1_accuracy(prepostproc_chkpt, dataset) > 80 @@ -643,34 +638,33 @@ def test_validate_top1(self, topology, wbits, abits, QONNX_export, kind): @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_build(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_build(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "fifodepth_" + kind + topology, wbits, abits, QONNX_export, "fifodepth_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - cfg = get_build_env(kind, target_clk_ns) - model = model.transform(cfg["build_fxn"]) + model = model.transform(build_data["build_fxn"]) model = model.transform(AnnotateResources("synth")) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "build_" + board) ) @pytest.mark.slow @pytest.mark.vivado @pytest.mark.vitis - @pytest.mark.parametrize("kind", ["zynq", "alveo"]) - def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, kind): - if kind == "alveo" and ("VITIS_PATH" not in os.environ): + def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, board): + build_data = get_build_env(board, target_clk_ns) + if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, QONNX_export, "build_" + kind + topology, wbits, abits, QONNX_export, "build_" + board ) model = load_test_checkpoint_or_skip(prev_chkpt_name) - kind_to_driver_platform = {"zynq": "zynq-iodma", "alveo": "alveo"} - model = model.transform(MakePYNQDriver(kind_to_driver_platform[kind])) + board_to_driver_platform = "alveo" if build_data["kind"] == "alveo" else "zynq-iodma" + model = model.transform(MakePYNQDriver(board_to_driver_platform)) model.save( - get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + kind) + get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + board) ) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 5e402bdeb4..ba1de29735 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -48,10 +48,9 @@ import finn.builder.build_dataflow_config as build_cfg from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN from finn.util.basic import make_build_dir -from finn.util.test import get_build_env, load_test_checkpoint_or_skip +from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 -build_kind = "zynq" build_dir = os.environ["FINN_BUILD_DIR"] @@ -183,14 +182,13 @@ def test_end2end_cybsec_mlp_export(QONNX_export): def test_end2end_cybsec_mlp_build(QONNX_export): model_file = get_checkpoint_name("export", QONNX_export) load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) output_dir = make_build_dir(f"test_end2end_cybsec_mlp_build_QONNX-{QONNX_export}") cfg = build.DataflowBuildConfig( output_dir=output_dir, target_fps=1000000, synth_clk_period_ns=target_clk_ns, - board=build_env["board"], + board="Pynq-Z1", shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index bef2e0ffa7..8bbfb4be9a 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -38,10 +38,9 @@ import finn.builder.build_dataflow as build import finn.builder.build_dataflow_config as build_cfg from finn.util.basic import make_build_dir -from finn.util.test import get_build_env, load_test_checkpoint_or_skip +from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 -build_kind = "zynq" build_dir = os.environ["FINN_BUILD_DIR"] onnx_zip_url = "https://github.com/Xilinx/finn-examples" onnx_zip_url += "/releases/download/v0.0.1a/onnx-models-bnn-pynq.zip" @@ -83,7 +82,6 @@ def test_end2end_ext_weights_download(): def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) folding_config_file = pk.resource_filename( "finn.qnn-data", "test_ext_weights/tfc-w1a1-extw.json" ) @@ -93,7 +91,7 @@ def test_end2end_ext_weights_build(): verbose=True, folding_config_file=folding_config_file, synth_clk_period_ns=target_clk_ns, - board=build_env["board"], + board="Pynq-Z1", shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, From 56e43152931207189741034659b34e626da63705 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:56:08 +0100 Subject: [PATCH 08/77] Add test_deploy method for BNN end2end tests for Jenkins setup Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 52 +++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 5274d923c1..02ea7c24ff 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -38,6 +38,8 @@ import warnings from brevitas.export import export_finn_onnx, export_qonnx from dataset_loading import cifar, mnist +from distutils.dir_util import copy_tree +from shutil import copy from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -89,7 +91,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root +from finn.util.basic import get_finn_root, make_build_dir from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -290,6 +292,42 @@ def topology2dataset(topology): raise Exception("Unrecognized topology") +def deploy_based_on_board(model, model_title, topology, wbits, abits, board): + if os.environ.get('FINN_DEPLOY_DIR') is not None: + deploy_dir_root = os.environ["FINN_DEPLOY_DIR"] + else: + deploy_dir_root = make_build_dir(prefix="hw_deployment_" + board + "_") + # Set it for the next round if multiple bitstreams are selected for generation + os.environ["FINN_DEPLOY_DIR"] = deploy_dir_root + + # create directory for deployment files + deployment_dir = deploy_dir_root + "/" + board + "/" + model_title + os.makedirs(deployment_dir) + model.set_metadata_prop("pynq_deployment_dir", deployment_dir) + + # get and copy necessary files + # .bit and .hwh file + bitfile = model.get_metadata_prop("bitfile") + hwh_file = model.get_metadata_prop("hw_handoff") + deploy_files = [bitfile, hwh_file] + + for dfile in deploy_files: + if dfile is not None: + copy(dfile, deployment_dir) + + # create input and output test files + (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( + topology, wbits, abits, return_topk=1 + ) + np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy) + np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) + + # driver.py and python libraries + pynq_driver_dir = model.get_metadata_prop("pynq_driver_dir") + copy_tree(pynq_driver_dir, deployment_dir) + model.set_metadata_prop("pynq_deploy_dir", deployment_dir) + + @pytest.mark.parametrize("wbits", [1, 2]) @pytest.mark.parametrize("abits", [1, 2]) @pytest.mark.parametrize("topology", ["lfc", "tfc", "cnv"]) @@ -668,3 +706,15 @@ def test_make_pynq_driver(self, topology, wbits, abits, QONNX_export, board): model.save( get_checkpoint_name(topology, wbits, abits, QONNX_export, "driver_" + board) ) + + def test_deploy(self, topology, wbits, abits, QONNX_export, board): + prev_chkpt_name = get_checkpoint_name( + topology, wbits, abits, QONNX_export, "driver_" + board + ) + model = load_test_checkpoint_or_skip(prev_chkpt_name) + model_title = "%s_w%d_a%d_%s_QE-%s" % ("bnn", wbits, abits, topology, QONNX_export) + deploy_based_on_board(model, model_title, topology, wbits, abits, board) + # save the model to be able to link it to the parent + model.save( + get_checkpoint_name(topology, wbits, abits, QONNX_export, "deploy_" + board) + ) From 5c03333b923194a1f3c4d2359b7f8701aa2f4410 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 07:59:58 +0100 Subject: [PATCH 09/77] Add parameterized tests for all supported boards. Split test matrix by board marker Signed-off-by: Fionn O'Donohoe --- src/finn/util/basic.py | 3 ++ tests/end2end/test_end2end_bnn_pynq.py | 69 ++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 3bc5b803db..abbf85d37d 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -31,6 +31,9 @@ import sys import tempfile +# supported boards +test_support_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] + # mapping from PYNQ board names to FPGA part names pynq_part_map = dict() pynq_part_map["Ultra96"] = "xczu3eg-sbva484-1-e" diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 02ea7c24ff..30bbadb6fc 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -328,11 +328,70 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): model.set_metadata_prop("pynq_deploy_dir", deployment_dir) -@pytest.mark.parametrize("wbits", [1, 2]) -@pytest.mark.parametrize("abits", [1, 2]) -@pytest.mark.parametrize("topology", ["lfc", "tfc", "cnv"]) -@pytest.mark.parametrize("QONNX_export", [False, True]) -@pytest.mark.end2end +# parameters that make up inputs to test case(s) +def get_full_parameterized_test_list(marker, wbits_list, abits_list, topology_list, QONNX_export_list, board_list): + test_cases = [ + (f'{marker}_w{param1}_a{param2}_{param3}_QE{param4}_{param5}', { + 'wbits': param1, + 'abits': param2, + 'topology': param3, + 'QONNX_export': param4, + 'board': param5 + }) + for param1, param2, param3, param4, param5 in itertools.product( + wbits_list, + abits_list, + topology_list, + QONNX_export_list, + board_list, + ) + ] + return test_cases + + +def pytest_generate_tests(metafunc): + idlist = [] + argvalues = [] + scenarios = [] + + # Full set of test parameters + wbits = [1, 2] + abits = [1, 2] + topology = ["lfc", "tfc", "cnv"] + QONNX_export = [False, True] + + # Separate the full list of markers used on command line. + # This allows a user to select multiple markers + all_markers_used = metafunc.config.getoption("-m").split(" ") + + for marker in all_markers_used: + if "sanity_bnn" in marker: + # Define a set of sanity tests that target each of the supported boards with fixed parameters + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[1], topology_list=["lfc"], QONNX_export_list=[False], board_list=[test_support_board_map[0]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[2], topology_list=["cnv"], QONNX_export_list=[True], board_list=[test_support_board_map[1]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["tfc"], QONNX_export_list=[False], board_list=[test_support_board_map[2]])) + scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["cnv"], QONNX_export_list=[True], board_list=[test_support_board_map[3]])) + + if "bnn_" in marker: + # Target the full set of parameters for a single board + # Extract the board name from the marker used, as it is in the form of 'bnn_' + bnn_board = next((element for element in test_support_board_map if marker.split("_")[1] in element.lower()), None) + test_cases = get_full_parameterized_test_list("bnn", wbits, abits, topology, QONNX_export, [bnn_board]) + scenarios.extend(test_cases) + + if len(scenarios) > 0: + for scenario in scenarios: + idlist.append(scenario[0]) + items = scenario[1].items() + argnames = [x[0] for x in items] + argvalues.append([x[1] for x in items]) + metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + +@pytest.mark.sanity_bnn +@pytest.mark.bnn_pynq +@pytest.mark.bnn_zcu104 +@pytest.mark.bnn_kv260 +@pytest.mark.bnn_u250 class TestEnd2End: def test_export(self, topology, wbits, abits, QONNX_export, board): if wbits > abits: From 8c98882a1609f5c5cbd6aa853756806132ed545b Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 08:03:06 +0100 Subject: [PATCH 10/77] Add scripts used by Jenkins to test bnn end2end hardware tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/hack_driver_script.py | 53 ++++++++ docker/jenkins/test_bnn_hw_pytest.py | 177 +++++++++++++++++++++++++++ 2 files changed, 230 insertions(+) create mode 100755 docker/jenkins/hack_driver_script.py create mode 100755 docker/jenkins/test_bnn_hw_pytest.py diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py new file mode 100755 index 0000000000..cd3becf7cf --- /dev/null +++ b/docker/jenkins/hack_driver_script.py @@ -0,0 +1,53 @@ +import os + +def remove_cache_dirs(dir_list): + tmp_list = list(dir_list) + for i in range(len(tmp_list)-1, -1, -1): + if ".pytest_cache" in tmp_list[i]: + del tmp_list[i] + elif "__pycache__" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def hack_driver_script(board, test_dir): + test_script_file = "driver.py" + # Read the contents of the test script file + with open(test_script_file, "r") as f: + lines = f.readlines() + + # Specify the line to be replaced and the new line + line_to_replace = "ishape_normal" + if "cnv" in test_dir: + new_line = " \"ishape_normal\" : [(1, 32, 32, 3)]," + else: + # Usually a size of (1, 784) to being with + if board == "Pynq-Z1": + new_line = " \"ishape_normal\" : [(1, 28, 28, 1)]," + else: + new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," + + # Iterate over the lines and replace the specified line + for i in range(len(lines)): + if line_to_replace in lines[i]: + lines[i] = new_line + "\n" + break # Only replace the first occurrence + + # Write the modified contents back to the test script file + with open(test_script_file, "w") as f: + f.writelines(lines) + +if __name__ == "__main__": + current_dir = os.getcwd() + board = os.path.basename(current_dir) + + # Get list of local directories - removing the Python cache directories + local_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + local_dirs = remove_cache_dirs(local_dirs) + + # Now create the full paths for each relative path + local_dirs_full_path = [os.path.join(current_dir, name) for name in local_dirs if os.path.isdir(os.path.join(current_dir, name))] + + # Change the driver.py script for each of the test directories + for dir in local_dirs_full_path: + os.chdir(dir) + hack_driver_script(board, dir) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py new file mode 100755 index 0000000000..09e62fd1d9 --- /dev/null +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -0,0 +1,177 @@ +import os +import numpy as np +from scipy.stats import linregress +import subprocess +import pytest +import itertools +import logging + +# no __init__ constructors allowed in Pytest - so use global variables instead +base_dir_global = os.getcwd() +default_test_run_timeout = 30 # seconds +output_execute_results_file = "output.npy" +execute_results_reference_file = "output_reference.npy" +output_throughput_results_file = "nw_metrics.txt" +throughput_results_formatted_file = "throughput_metrics_formatted.txt" +logger = logging.getLogger(__name__) + + +def remove_cache_dirs(dir_list): + tmp_list = list(dir_list) + for i in range(len(tmp_list)-1, -1, -1): + if ".pytest_cache" in tmp_list[i]: + del tmp_list[i] + elif "__pycache__" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def remove_destructive_board_tests(board, test_list): + tmp_list = list(test_list) + if "Pynq" in board: + # both tests are destructive to the Pynq-Z1 board and require a board reboot + for i in range(len(tmp_list)-1, -1, -1): + if "bnn_w2_a2_cnv_QE-True" in tmp_list[i]: + del tmp_list[i] + elif "bnn_w1_a1_tfc_QE-True" in tmp_list[i]: + del tmp_list[i] + return tmp_list + +def delete_file(file_path): + # Check if the file exists before deleting it + if os.path.exists(file_path): + try: + os.remove(file_path) + logger.info(f"File '{file_path}' deleted successfully.") + except Exception as e: + logger.error(f"An error occurred while deleting the file: {e}") + else: + logger.info(f"File '{file_path}' does not exist. Continuing with the script.") + +def get_platform(board_str): + return "alveo" if "U250" in board_str else "zynq-iodma" + +def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, platform_list): + test_cases = [ + (f'{marker}_{param1}_batchSize-{param2}_platform-{param3}', { + 'test_dir': param1, + 'batch_size': param2, + 'platform': param3, + }) + for param1, param2, param3 in itertools.product( + test_dir_list, + batch_size_list, + platform_list, + ) + ] + return test_cases + +def pytest_generate_tests(metafunc): + idlist = [] + argvalues = [] + scenarios = [] + + # Separate the full list of markers used on command line. + # This allows a user to select multiple markers + all_markers_used = metafunc.config.getoption("-m").split(" ") + current_dir = os.getcwd() + test_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + test_dirs = remove_cache_dirs(test_dirs) + + for marker in all_markers_used: + platform = get_platform(marker) + if "Pynq" in marker: + remove_destructive_board_tests("Pynq", test_dirs) + scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + elif "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + + if len(scenarios) > 0: + for scenario in scenarios: + idlist.append(scenario[0]) + items = scenario[1].items() + argnames = [x[0] for x in items] + argvalues.append([x[1] for x in items]) + metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + + +@pytest.mark.Pynq +@pytest.mark.U250 +@pytest.mark.ZCU104 +@pytest.mark.KV260_SOM +class TestBnn: + def test_type_execute(self, test_dir, batch_size, platform): + # Enter into test directory and clean any files from a potential previous run + os.chdir(os.path.join(base_dir_global, test_dir)) + delete_file(output_execute_results_file) + + # Run test option: execute + result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + assert result.returncode == 0 + + # Load the output and reference arrays + output_array = np.load(output_execute_results_file) + reference_array = np.load(execute_results_reference_file) + + # Compare the arrays + try: + assert np.isclose(output_array, reference_array).all() + except AssertionError as e: + logger.error("AssertionError occurred: %s", e, exc_info=True) + raise + + def test_type_throughput(self, test_dir, batch_size, platform): + os.chdir(os.path.join(base_dir_global, test_dir)) + delete_file(output_throughput_results_file) + + result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + assert result.returncode == 0 + + # Check if nw_metrics.txt now exists after test run + assert os.path.exists(output_throughput_results_file) + + with open(output_throughput_results_file, "r") as file: + res = eval(file.read()) + + # try a range of batch sizes, some may fail due to insufficient DMA + # buffers + bsize_range_in = [8**i for i in range(5)] + bsize_range = [] + ret = dict() + for bsize in bsize_range_in: + if res is not None: + ret[bsize] = res + bsize_range.append(bsize) + else: + # assume we reached largest possible N + break + + y = [ret[key]["runtime[ms]"] for key in bsize_range] + lrret = linregress(bsize_range, y) + ret_str = "" + ret_str += "\n" + "%s Throughput Test Results" % test_dir + ret_str += "\n" + "-----------------------------" + ret_str += "\n" + "From linear regression:" + ret_str += "\n" + "Invocation overhead: %f ms" % lrret.intercept + ret_str += "\n" + "Time per sample: %f ms" % lrret.slope + ret_str += "\n" + "Raw data:" + + ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( + "N", "runtime[ms]", "fclk[mhz]", "fps", "DRAM rd[MB/s]", "DRAM wr[MB/s]" + ) + for k in bsize_range: + v = ret[k] + ret_str += "\n" + "{:<8} {:<16} {:<16} {:<16} {:<16} {:<16}".format( + k, + np.round(v["runtime[ms]"], 4), + v["fclk[mhz]"], + np.round(v["throughput[images/s]"], 2), + np.round(v["DRAM_in_bandwidth[MB/s]"], 2), + np.round(v["DRAM_out_bandwidth[MB/s]"], 2), + ) + ret_str += "\n" + "-----------------------------" + largest_bsize = bsize_range[-1] + + # Dump the metrics to a text file + with open(throughput_results_formatted_file, "w") as f: + f.write(ret_str) + assert os.path.exists(throughput_results_formatted_file) \ No newline at end of file From b3166e4548253afa9b780d6643998e983a213b10 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 27 Jun 2023 09:37:17 +0100 Subject: [PATCH 11/77] Add U250 xclbin for end2end bnn testing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/test_bnn_hw_pytest.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 09e62fd1d9..f2b437e800 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -105,7 +105,8 @@ def test_type_execute(self, test_dir, batch_size, platform): delete_file(output_execute_results_file) # Run test option: execute - result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" + result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 # Load the output and reference arrays @@ -123,7 +124,9 @@ def test_type_throughput(self, test_dir, batch_size, platform): os.chdir(os.path.join(base_dir_global, test_dir)) delete_file(output_throughput_results_file) - result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", "--bitfile=resizer.bit", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + # Run test option: throughput + bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" + result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 # Check if nw_metrics.txt now exists after test run From b355a6cb530a2a7c0687b5164ac1417564f2a239 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 28 Jun 2023 16:22:54 +0100 Subject: [PATCH 12/77] Forgot to add test_support_board_map inclusion into test file Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 30bbadb6fc..14616522ec 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -91,7 +91,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root, make_build_dir +from finn.util.basic import get_finn_root, make_build_dir, test_support_board_map from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, From f2872c7fbe3ced9c9441da7e5a07383e2b757fcf Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Thu, 29 Jun 2023 11:42:14 +0100 Subject: [PATCH 13/77] Add missing itertools library import Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 14616522ec..564a1ee7cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -30,6 +30,8 @@ import numpy as np +import itertools + # as of Feb'20 there is a bug that segfaults ONNX shape inference if we # import pytorch before onnx, so we make sure to import onnx first import onnx # NOQA From 1d6d5ee3d45deacc9700fb188af6284b94a136e1 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 11 Jul 2023 11:40:52 +0100 Subject: [PATCH 14/77] Remove reference to get_build_env Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_cybsec_mlp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index a1681dc6fa..e31c86c985 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -50,6 +50,7 @@ from finn.util.test import load_test_checkpoint_or_skip target_clk_ns = 10 +build_board = "Pynq-Z1" build_dir = os.environ["FINN_BUILD_DIR"] @@ -150,14 +151,13 @@ def test_end2end_cybsec_mlp_export(): def test_end2end_cybsec_mlp_build(): model_file = get_checkpoint_name("export") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) output_dir = make_build_dir("test_end2end_cybsec_mlp_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, target_fps=1000000, synth_clk_period_ns=target_clk_ns, - board="Pynq-Z1", + board=build_board, shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ, generate_outputs=[ build_cfg.DataflowOutputType.ESTIMATE_REPORTS, From 1e898d83737331a1346dcd9b802a20ef5ba8c58d Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 17 Jul 2023 21:22:52 +0100 Subject: [PATCH 15/77] Adjust how deployment dirs are created for sanity_bnn suite Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 8198538388..6b288bd382 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -294,7 +294,8 @@ def topology2dataset(topology): def deploy_based_on_board(model, model_title, topology, wbits, abits, board): - if os.environ.get('FINN_DEPLOY_DIR') is not None: + # Check if a deployment directory for this board type already exists + if ("FINN_DEPLOY_DIR" in os.environ) and (board in os.environ["FINN_DEPLOY_DIR"]): deploy_dir_root = os.environ["FINN_DEPLOY_DIR"] else: deploy_dir_root = make_build_dir(prefix="hw_deployment_" + board + "_") From a641f011945d79a4a0028b1ea40a1b169ef15efe Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Mon, 17 Jul 2023 21:24:02 +0100 Subject: [PATCH 16/77] Latest dev changes has affected what tests pass or are destructive, adjust test scripts to workaround these changes Signed-off-by: Fionn O'Donohoe --- docker/jenkins/hack_driver_script.py | 8 ++------ docker/jenkins/test_bnn_hw_pytest.py | 18 ++---------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py index cd3becf7cf..568c62150d 100755 --- a/docker/jenkins/hack_driver_script.py +++ b/docker/jenkins/hack_driver_script.py @@ -18,13 +18,9 @@ def hack_driver_script(board, test_dir): # Specify the line to be replaced and the new line line_to_replace = "ishape_normal" if "cnv" in test_dir: - new_line = " \"ishape_normal\" : [(1, 32, 32, 3)]," + new_line = " \"ishape_normal\" : [(1, 3, 32, 32)]," else: - # Usually a size of (1, 784) to being with - if board == "Pynq-Z1": - new_line = " \"ishape_normal\" : [(1, 28, 28, 1)]," - else: - new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," + new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," # Iterate over the lines and replace the specified line for i in range(len(lines)): diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index f2b437e800..1d1e22ed2c 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -25,17 +25,6 @@ def remove_cache_dirs(dir_list): del tmp_list[i] return tmp_list -def remove_destructive_board_tests(board, test_list): - tmp_list = list(test_list) - if "Pynq" in board: - # both tests are destructive to the Pynq-Z1 board and require a board reboot - for i in range(len(tmp_list)-1, -1, -1): - if "bnn_w2_a2_cnv_QE-True" in tmp_list[i]: - del tmp_list[i] - elif "bnn_w1_a1_tfc_QE-True" in tmp_list[i]: - del tmp_list[i] - return tmp_list - def delete_file(file_path): # Check if the file exists before deleting it if os.path.exists(file_path): @@ -78,11 +67,8 @@ def pytest_generate_tests(metafunc): test_dirs = remove_cache_dirs(test_dirs) for marker in all_markers_used: - platform = get_platform(marker) - if "Pynq" in marker: - remove_destructive_board_tests("Pynq", test_dirs) - scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) - elif "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + if "Pynq" in marker or "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: + platform = get_platform(marker) scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) if len(scenarios) > 0: From ba0d58f6cbe671adcee74c1bc83d775d9f201e9a Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 14:47:50 +0100 Subject: [PATCH 17/77] remove additional spacing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/test_bnn_hw_pytest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 1d1e22ed2c..961efd1cc1 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -94,7 +94,7 @@ def test_type_execute(self, test_dir, batch_size, platform): bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) assert result.returncode == 0 - + # Load the output and reference arrays output_array = np.load(output_execute_results_file) reference_array = np.load(execute_results_reference_file) @@ -159,8 +159,8 @@ def test_type_throughput(self, test_dir, batch_size, platform): ) ret_str += "\n" + "-----------------------------" largest_bsize = bsize_range[-1] - + # Dump the metrics to a text file with open(throughput_results_formatted_file, "w") as f: f.write(ret_str) - assert os.path.exists(throughput_results_formatted_file) \ No newline at end of file + assert os.path.exists(throughput_results_formatted_file) From 111c873027cf7eb918b8da93cc8e41c350fab0b6 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:06:41 +0100 Subject: [PATCH 18/77] No need for buildDiscarder function in the pipeline itself Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 12725594df..f73fd78baa 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,7 +1,4 @@ pipeline { - options { - buildDiscarder(logRotator(numToKeepStr: '30', artifactNumToKeepStr: '30')) - } agent { node { label 'finn-build' } } environment { FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" From 746315c3c533df717b7def757aee0e186d7f9562 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:08:08 +0100 Subject: [PATCH 19/77] Env variables are controlled by external CI system and can be removed from the pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f73fd78baa..9d9d6ebabb 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,12 +1,5 @@ pipeline { agent { node { label 'finn-build' } } - environment { - FINN_XILINX_PATH="/proj/xbuilds/SWIP/2022.1_0420_0327/installs/lin64" - FINN_XILINX_VERSION="2022.1" - FINN_DOCKER_TAG="xilinx/finn:jenkins" - FINN_HOST_BUILD_DIR="/scratch/users/finn_ci" - PLATFORM_REPO_PATHS="/opt/xilinx/platforms" - } stages { stage('Quicktest') { steps { From 91a5437fdbc06bbd9fc63c3a6fddda04e3b6f865 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:08:54 +0100 Subject: [PATCH 20/77] Specific agent not required when setting up pipeline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 9d9d6ebabb..6f01b06e55 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,5 +1,5 @@ pipeline { - agent { node { label 'finn-build' } } + agent none stages { stage('Quicktest') { steps { From 8b7d7812292f98e23083c866c0f2352bdda6b153 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:10:41 +0100 Subject: [PATCH 21/77] Add boolean build parameters in order to select tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 6f01b06e55..9100e3ed0d 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,5 +1,9 @@ pipeline { agent none + parameters { + booleanParam(name: 'fpgadataflow', defaultValue: true, description: 'Run fpgadataflow tests') + booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') + } stages { stage('Quicktest') { steps { From 88462e1f8d3138b109d10ede04a1fc5acec96095 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:22:08 +0100 Subject: [PATCH 22/77] Add sanity suite unit and fpgadataflow tests The fpgadataflow tests were placed in their own stage with their own build parameter as the test takes longer than a day to run. This means that this suite cannot sensibly be used in daily CI test runs. Some notes on the stages and their setup: - the when{} block is used as an 'if' statement, checking if a certain input parameter to the pipeline has been set. By default - the fpgadataflow stage will not run unless explicitly set to true by the tester/CI system - FINN_HOST_BUILD_DIR is set to a unique directory per stage for ease of use/test cleanup - catchError is used in order to allow the pipeline to continue to possible future stages if a stage along the way fails. Otherwise the first failed stage found would end the test run Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 79 +++++++++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 9100e3ed0d..eb94885362 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -1,20 +1,79 @@ pipeline { agent none parameters { - booleanParam(name: 'fpgadataflow', defaultValue: true, description: 'Run fpgadataflow tests') + booleanParam(name: 'fpgadataflow', defaultValue: false, description: 'Run fpgadataflow tests') booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') } stages { - stage('Quicktest') { - steps { - sh 'echo "Hello FINN"' - sh 'hostname' - sh 'whoami' - sh 'pwd' - sh 'docker login' - sh 'printenv | sort' - sh './run-docker.sh quicktest' + stage('Sanity Tests') { + parallel { + stage('Sanity - Unit Tests') { + when { + expression { params['sanity'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "sanity_ut" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Multiple markers with pytest needs its own script + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") + sh './run-docker.sh ./run-tests.sh' + } + } + } + } + stage('Sanity - fpgadataflow Tests') { + when { + expression { params['fpgadataflow'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "fpgadataflow" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + } + } + } + } } } } } + +void cleanPreviousBuildFiles(String buildDir) { + // Delete any build files from a previous build + // Previous build folders affect findCopyZip() and can cause the stage to fail + sh "rm -rf ${buildDir}/*" +} + +void createMultiMarkerScript(String markers, String testResultsFilename) { + // Passing multiple markers when running ./run-docker.sh does not work with bash. + // Therefore, create a script to maintain the single quotes that surround the markers + sh """echo "#!/bin/bash +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tests.sh + """ + + // Give permissions to script + sh 'chmod 777 run-tests.sh' +} + +void runDockerPytestWithMarker(String marker, String testResultsFilename) { + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" +} From 80029f1b0a603e76e855d96c21009d0ec6ad886c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:30:50 +0100 Subject: [PATCH 23/77] Add sanity bitstream build tests Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index eb94885362..1b1f4fc92e 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -7,6 +7,30 @@ pipeline { stages { stage('Sanity Tests') { parallel { + stage('Sanity - Build Hardware') { + when { + expression { return params['sanity'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "bnn_build_sanity" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + } + } + } + } stage('Sanity - Unit Tests') { when { expression { params['sanity'] } From 3900428317634ee06f5fee549e46047057ecab78 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:35:31 +0100 Subject: [PATCH 24/77] Collect all files needed for HW testing, adding a stage to collect test scripts Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 39 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1b1f4fc92e..06a1910b16 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,6 +27,12 @@ pipeline { // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + + // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves + findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") + findCopyZip("ZCU104", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_ZCU104_zip") + findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") + findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") } } } @@ -78,6 +84,22 @@ pipeline { } } } + stage('Sanity - Setup Hardware Tests') { + when { + expression { return params['sanity'] } + } + agent { + label 'finn-build' + } + steps { + script { + // Stash the HW test scripts to be used on slave nodes + dir('docker/jenkins') { + stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' + } + } + } + } } } @@ -101,3 +123,20 @@ python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tes void runDockerPytestWithMarker(String marker, String testResultsFilename) { sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" } + +void findBoardBuildFiles(String board, String searchDir, String dirToFind) { + def result = sh(script: "find $searchDir -type d -name \"$dirToFind*\"", returnStdout: true).trim() + if (result.empty) { + error "Directory containing '$dirToFind' not found." + } + return result +} + +void findCopyZip(String board, String findDir, String copyDir, String stashName) { + def buildDir = findBoardBuildFiles(board, findDir, "hw_deployment_${board}") + sh "cp -r ${buildDir}/${board} ${copyDir}/" + dir(copyDir) { + sh "zip -r ${board}.zip ${board}/" + stash name: stashName, includes: "${board}.zip" + } +} From 31ef8d616047bc601f278821c92c0b920b58cebc Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:42:26 +0100 Subject: [PATCH 25/77] Add hw testing stages - only run if build stage was successful Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 175 +++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 06a1910b16..2b2a5786c6 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -33,6 +33,8 @@ pipeline { findCopyZip("ZCU104", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_ZCU104_zip") findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") + + env.BNN_BUILD_SANITY = "SUCCESS" } } } @@ -100,6 +102,159 @@ pipeline { } } } + stage('Sanity - Run Hardware Tests') { + parallel { + stage('BNN Sanity - U250') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (&& params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-u250' + } + environment { + BOARD = 'U250' + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script + sh './run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - Pynq-Z1') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-pynq' + } + environment { + BOARD = 'Pynq-Z1' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_PynqZ1_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + // The marker here omits the '-Z1' as '-' is a special character + // that will not work with Pytest + createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - ZCU104') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-zcu104' + } + environment { + BOARD = 'ZCU104' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + stage('BNN Sanity - KV260_SOM') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + } + agent { + label 'finn-kv260' + } + environment { + BOARD = 'KV260_SOM' + USER_CREDENTIALS = credentials('user-ubuntu-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "sanity_${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + } + } + } } } @@ -140,3 +295,23 @@ void findCopyZip(String board, String findDir, String copyDir, String stashName) stash name: stashName, includes: "${board}.zip" } } + +void createTestScript(String board, String marker, String testResultsFilename) { + if(board == "U250") + sh """echo "#!/bin/bash +. /opt/xilinx/xrt/setup.sh +. ${CONDA_ENV_ACTIVATE} +python hack_driver_script.py +python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh + """ + else + sh """echo "#!/bin/bash +. /etc/profile.d/pynq_venv.sh +. /etc/profile.d/xrt_setup.sh +python hack_driver_script.py +python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh + """ + + // Give permissions to script + sh 'chmod 777 run-tests.sh' +} From 674ef2669feedfa68bd84cd822d7a714971446b3 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:45:36 +0100 Subject: [PATCH 26/77] Only run HW tests if board is online first, fail the pipeline if board is offline Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 72 +++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2b2a5786c6..60c9e47370 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -95,6 +95,12 @@ pipeline { } steps { script { + // Check which boards are online before running HW tests + env.ALVEO_HOST_ONLINE = isNodeOnline('finn-u250') + env.PYNQ_ONLINE = isNodeOnline('finn-pynq') + env.ZCU104_ONLINE = isNodeOnline('finn-zcu104') + env.KV260_ONLINE = isNodeOnline('finn-kv260') + // Stash the HW test scripts to be used on slave nodes dir('docker/jenkins') { stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' @@ -108,7 +114,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (&& params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.ALVEO_HOST_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-u250' @@ -144,7 +150,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.PYNQ_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-pynq' @@ -183,7 +189,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.ZCU104_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-zcu104' @@ -220,7 +226,7 @@ pipeline { when { // beforeAgent set to 'true' to prevent an offline agent hanging the stage beforeAgent true - expression { return (params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } + expression { return (env.KV260_ONLINE == 'true' && params['sanity'] && env.BNN_BUILD_SANITY == 'SUCCESS') } } agent { label 'finn-kv260' @@ -255,6 +261,18 @@ pipeline { } } } + stage('Check Stage Results') { + agent { + label 'finn-build' + } + steps { + catchError(buildResult: 'SUCCESS') { + script { + checkAllBoards() + } + } + } + } } } @@ -315,3 +333,49 @@ python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh // Give permissions to script sh 'chmod 777 run-tests.sh' } + +void isNodeOnline(String labelName) { + Label label = Jenkins.instance.getLabel(labelName) + def agentOnline = false + + if (label) { + List nodes = Jenkins.instance.getNodes() + + nodes.each { node -> + if (node.getAssignedLabels().contains(label)) { + def computer = node.toComputer() + if (computer && computer.isOnline()) { + agentOnline = true + } else { + echo """Agent ${node.displayName} is offline""" + } + } + } + } else { + echo """Node with label ${labelName} not found""" + } + + return agentOnline +} + +def checkAllBoards() { + def overallResult = true + + if (env.PYNQ_ONLINE == 'false') { + overallResult = false + } + + if (env.ALVEO_HOST_ONLINE == 'false') { + overallResult = false + } + + if (env.KV260_ONLINE == 'false') { + overallResult = false + } + + if (env.ZCU104_ONLINE == 'false') { + overallResult = false + } + + return overallResult +} From 507a97bdca4f3b2f202295e4bb9225e57cec7ea1 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:58:24 +0100 Subject: [PATCH 27/77] Collect test result files in final stage and plot with JUnit plugin - only if that test stage ran successfully Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 86 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 60c9e47370..6402fcde6c 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -34,6 +34,10 @@ pipeline { findCopyZip("KV260_SOM", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_KV260_SOM_zip") findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") + // Stash the test results file(s) + stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_SANITY = "SUCCESS" } } @@ -58,6 +62,12 @@ pipeline { // Multiple markers with pytest needs its own script createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") sh './run-docker.sh ./run-tests.sh' + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.SANITY_UT = "SUCCESS" } } } @@ -80,6 +90,12 @@ pipeline { // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.FPGADATAFLOW = "SUCCESS" } } } @@ -139,12 +155,23 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_U250 = "SUCCESS" + // Execute the script sh './run-tests.sh' } } } } + post { + always { + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - Pynq-Z1') { when { @@ -178,12 +205,24 @@ pipeline { // that will not work with Pytest createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_PYNQZ1 = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - ZCU104') { when { @@ -215,12 +254,24 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_ZCU104 = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } stage('BNN Sanity - KV260_SOM') { when { @@ -252,12 +303,24 @@ pipeline { // Create test script createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + // Use an env variable to help collect test results later in pipeline + env.SANITY_BNN_TEST_KV260_SOM = "SUCCESS" + // Execute the script as the root user - needed for zynq platforms sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' } } } } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + } + } + } } } } @@ -272,6 +335,23 @@ pipeline { } } } + post { + always { + script { + // Only unstash for stages that ran + unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") + unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") + unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") + unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") + unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") + unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + + // Plot what XML files were created during the test run + junit '**/*.xml' + } + } + } } } } @@ -379,3 +459,9 @@ def checkAllBoards() { return overallResult } + +void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { + if (stageEnvVariableSet) { + unstash stashName + } +} From 06a6b3d5c58f97fbfcbb9e93744807c9cedeabf9 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 15:59:55 +0100 Subject: [PATCH 28/77] Add post success/failure stage messages Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 6402fcde6c..e757cb7710 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -171,6 +171,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - Pynq-Z1') { @@ -222,6 +228,12 @@ pipeline { stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - ZCU104') { @@ -271,6 +283,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } stage('BNN Sanity - KV260_SOM') { @@ -320,6 +338,12 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" } } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } } } } @@ -465,3 +489,11 @@ void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { unstash stashName } } + +void postFailure(String board) { + echo "Failed to run ${board} tests" +} + +void postSuccess(String board) { + echo "${board} tests passed" +} From be6ed941c76370b20b54e44db4e717920ba9ae0c Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:20:01 +0100 Subject: [PATCH 29/77] Add file archiving - for XML test result files Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index e757cb7710..2f7eab1190 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -373,6 +373,9 @@ pipeline { // Plot what XML files were created during the test run junit '**/*.xml' + + // Archive the XML test results + archiveArtifacts artifacts: "*.xml" } } } From d31ffcaef305d7d099f227cedb3d64061acfaa9d Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:28:55 +0100 Subject: [PATCH 30/77] Add end2end build tests - collecting results as well Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 159 +++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2f7eab1190..c15e686d16 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -3,6 +3,7 @@ pipeline { parameters { booleanParam(name: 'fpgadataflow', defaultValue: false, description: 'Run fpgadataflow tests') booleanParam(name: 'sanity', defaultValue: true, description: 'Run sanity hardware and unit tests') + booleanParam(name: 'end2end', defaultValue: false, description: 'Run end2end tests') } stages { stage('Sanity Tests') { @@ -102,6 +103,159 @@ pipeline { } } } + stage('End2end - Build Hardware') { + parallel { + stage('End2end') { + when { + expression { params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + TEST_NAME = "end2end" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}" + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Delete any build files from a previous build + sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}.xml") + + // Stash the test results file(s) + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + + // Use an env variable to help collect test results later in pipeline + env.END2END = "SUCCESS" + } + } + } + } + stage('BNN end2end - U250') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "U250" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_U250 = "SUCCESS" + } + } + } + stage('BNN end2end - Pynq-Z1') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "Pynq-Z1" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_PYNQZ1 = "SUCCESS" + } + } + } + stage('BNN end2end - ZCU104') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "ZCU104" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_ZCU104 = "SUCCESS" + } + } + } + stage('BNN end2end - KV260_SOM') { + when { + expression { return params['end2end'] } + } + agent { + label 'finn-build' + } + environment { + BOARD = "KV260_SOM" + TEST_NAME = "bnn_build_full" + FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" + } + steps { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_KV260_SOM = "SUCCESS" + } + } + } + } + } stage('Sanity - Setup Hardware Tests') { when { expression { return params['sanity'] } @@ -370,6 +524,11 @@ pipeline { unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") + unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") + unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") + unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") // Plot what XML files were created during the test run junit '**/*.xml' From df81b048314d5d2e5ad6db4e9b580edcdf6bb34f Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:40:21 +0100 Subject: [PATCH 31/77] Add end2end hardware tests - collecting results as well Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 230 ++++++++++++++++++++++++++++++++++++- 1 file changed, 229 insertions(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index c15e686d16..a117625230 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -256,7 +256,7 @@ pipeline { } } } - stage('Sanity - Setup Hardware Tests') { + stage('Sanity & BNN end2end - Setup Hardware Tests') { when { expression { return params['sanity'] } } @@ -502,6 +502,230 @@ pipeline { } } } + stage('End2end - Run Hardware Tests') { + parallel { + stage('BNN end2end - U250') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.ALVEO_HOST_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_U250 == 'SUCCESS') } + } + agent { + label 'finn-u250' + } + environment { + BOARD = 'U250' + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_U250 = "SUCCESS" + + // Execute the script + sh './run-tests.sh' + } + } + } + } + post { + always { + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - Pynq-Z1') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.PYNQ_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_PYNQZ1 == 'SUCCESS') } + } + agent { + label 'finn-pynq' + } + environment { + BOARD = 'Pynq-Z1' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "PynqZ1_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + // The marker here omits the '-Z1' as '-' is a special character + // that will not work with Pytest + createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_PYNQZ1 = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - ZCU104') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.ZCU104_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_ZCU104 == 'SUCCESS') } + } + agent { + label 'finn-zcu104' + } + environment { + BOARD = 'ZCU104' + USER_CREDENTIALS = credentials('pynq-z1-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_ZCU104 = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + stage('BNN end2end - KV260_SOM') { + when { + // beforeAgent set to 'true' to prevent an offline agent hanging the stage + beforeAgent true + expression { return (env.KV260_ONLINE == 'true' && params['end2end'] && env.BNN_BUILD_KV260_SOM == 'SUCCESS') } + } + agent { + label 'finn-kv260' + } + environment { + BOARD = 'KV260_SOM' + USER_CREDENTIALS = credentials('user-ubuntu-credentials') + } + steps { + catchError(stageResult: 'FAILURE') { + script { + // Clean any files from a previous run + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + + // Get the test files + unstash name: "${env.BOARD}_zip" + sh "unzip -o ${env.BOARD}.zip" + + dir(env.BOARD) { + // Get the scripts necessary for running hw tests + unstash name: 'bnn_test_files' + + // Create test script + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + + // Use an env variable to help collect test results later in pipeline + env.BNN_TEST_KV260_SOM = "SUCCESS" + + // Execute the script as the root user - needed for zynq platforms + sh 'echo $USER_CREDENTIALS_PSW | sudo -S ./run-tests.sh' + } + } + } + } + post { + always { + // Get test result file and delete test files on the board + dir(env.BOARD) { + // Collect the results file on the slave node by stashing + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + } + } + success { + postSuccess(env.BOARD) + } + failure { + postFailure(env.BOARD) + } + } + } + } + } stage('Check Stage Results') { agent { label 'finn-build' @@ -529,6 +753,10 @@ pipeline { unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") + unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") + unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") + unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") + unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") // Plot what XML files were created during the test run junit '**/*.xml' From feb4b277c679c96e1528e8753e85431a336881cb Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 16:42:33 +0100 Subject: [PATCH 32/77] Add catchError for end2end bnn build stages to allow pipeline to continue on error Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 96 +++++++++++++++++++++----------------- 1 file changed, 52 insertions(+), 44 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index a117625230..1fc80a6feb 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -147,20 +147,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_U250 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_U250 = "SUCCESS" + } } } } @@ -177,20 +179,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_PYNQZ1 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_PYNQZ1 = "SUCCESS" + } } } } @@ -207,20 +211,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_ZCU104 = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_ZCU104 = "SUCCESS" + } } } } @@ -237,20 +243,22 @@ pipeline { FINN_HOST_BUILD_DIR = "${env.FINN_HOST_BUILD_DIR}/${env.TEST_NAME}_${env.BOARD}" } steps { - script { - // Creates dir in finn clone to store build files for stashing - sh "mkdir -p ${env.TEST_NAME}" - cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) + catchError(stageResult: 'FAILURE') { + script { + // Creates dir in finn clone to store build files for stashing + sh "mkdir -p ${env.TEST_NAME}" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) - // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") - findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") + // Pass in the marker to run with pytest and the XML test results filename + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") - // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + // Stash the test results file(s) + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" - // Use an env variable to help collect test results later in pipeline - env.BNN_BUILD_KV260_SOM = "SUCCESS" + // Use an env variable to help collect test results later in pipeline + env.BNN_BUILD_KV260_SOM = "SUCCESS" + } } } } From 7e258a84e79980484156f29701a768d835597524 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:19:35 +0100 Subject: [PATCH 33/77] Add pytest-html library and add to all tests in Jenkinsfile. Archive the results as well Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 76 +++++++++++++++++++------------------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index d69ccc9725..69425df1ee 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -102,6 +102,7 @@ RUN pip install pandas==1.5.3 RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading +RUN pip install pytest-html==3.2.0 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1fc80a6feb..d8869eeb5b 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,7 +27,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}.xml") + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}") // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") @@ -36,7 +36,7 @@ pipeline { findCopyZip("U250", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_U250_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml" + stash name: "${env.TEST_NAME}", includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_SANITY = "SUCCESS" @@ -61,11 +61,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Multiple markers with pytest needs its own script - createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}.xml") + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}") sh './run-docker.sh ./run-tests.sh' // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.SANITY_UT = "SUCCESS" @@ -90,10 +90,10 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}.xml") + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}") // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.FPGADATAFLOW = "SUCCESS" @@ -123,10 +123,10 @@ pipeline { sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}.xml") + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}") // Stash the test results file(s) - stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml" + stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline env.END2END = "SUCCESS" @@ -154,11 +154,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_U250 = "SUCCESS" @@ -186,11 +186,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_PynqZ1", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_PYNQZ1 = "SUCCESS" @@ -218,11 +218,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_ZCU104 = "SUCCESS" @@ -250,11 +250,11 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}.xml") + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}") findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) - stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml" + stash name: "${env.TEST_NAME}_${env.BOARD}", includes: "${env.TEST_NAME}_${env.BOARD}.xml,${env.TEST_NAME}_${env.BOARD}.html" // Use an env variable to help collect test results later in pipeline env.BNN_BUILD_KV260_SOM = "SUCCESS" @@ -315,7 +315,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_U250 = "SUCCESS" @@ -330,7 +330,7 @@ pipeline { always { dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -371,7 +371,7 @@ pipeline { // Create test script // The marker here omits the '-Z1' as '-' is a special character // that will not work with Pytest - createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, 'Pynq', "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_PYNQZ1 = "SUCCESS" @@ -387,7 +387,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -426,7 +426,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_ZCU104 = "SUCCESS" @@ -442,7 +442,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -481,7 +481,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "sanity_bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.SANITY_BNN_TEST_KV260_SOM = "SUCCESS" @@ -497,7 +497,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } success { @@ -539,7 +539,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_U250 = "SUCCESS" @@ -554,7 +554,7 @@ pipeline { always { dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -595,7 +595,7 @@ pipeline { // Create test script // The marker here omits the '-Z1' as '-' is a special character // that will not work with Pytest - createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, 'Pynq', "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_PYNQZ1 = "SUCCESS" @@ -611,7 +611,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -650,7 +650,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_ZCU104 = "SUCCESS" @@ -666,7 +666,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -705,7 +705,7 @@ pipeline { unstash name: 'bnn_test_files' // Create test script - createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}.xml") + createTestScript(env.BOARD, env.BOARD, "bnn_test_hw_${env.BOARD}") // Use an env variable to help collect test results later in pipeline env.BNN_TEST_KV260_SOM = "SUCCESS" @@ -721,7 +721,7 @@ pipeline { // Get test result file and delete test files on the board dir(env.BOARD) { // Collect the results file on the slave node by stashing - stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml" + stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } success { @@ -769,8 +769,8 @@ pipeline { // Plot what XML files were created during the test run junit '**/*.xml' - // Archive the XML test results - archiveArtifacts artifacts: "*.xml" + // Archive the XML & HTML test results + archiveArtifacts artifacts: "*.xml *.html" } } } @@ -788,7 +788,7 @@ void createMultiMarkerScript(String markers, String testResultsFilename) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash -python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ // Give permissions to script @@ -796,7 +796,7 @@ python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}" >> run-tes } void runDockerPytestWithMarker(String marker, String testResultsFilename) { - sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}""" + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html""" } void findBoardBuildFiles(String board, String searchDir, String dirToFind) { @@ -822,14 +822,14 @@ void createTestScript(String board, String marker, String testResultsFilename) { . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} python hack_driver_script.py -python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh python hack_driver_script.py -python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh +python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ // Give permissions to script From 6b5e7680781fde8fbbcd7a529cbf1ca6c52f1b58 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:39:59 +0100 Subject: [PATCH 34/77] Add pytest-html-merger library to combine individual HTML files created in the jenkins pipeline Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 50 ++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 69425df1ee..91a22952ff 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -103,6 +103,7 @@ RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading RUN pip install pytest-html==3.2.0 +RUN pip install pytest-html-merger==0.0.8 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index d8869eeb5b..f782569643 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -748,29 +748,37 @@ pipeline { post { always { script { - // Only unstash for stages that ran - unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") - unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") - unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") - unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") - unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") - unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") - unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") - unstashSuccessfulStage(env.END2END, "end2end") - unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") - unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") - unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") - unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") - unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") - unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") - unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") - unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") - - // Plot what XML files were created during the test run - junit '**/*.xml' + sh 'mkdir -p reports' + cleanPreviousBuildFiles('reports') + dir('reports') { + // Only unstash for stages that ran + unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") + unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") + unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") + unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") + unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") + unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") + unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") + unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") + unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") + unstashSuccessfulStage(env.BNN_BUILD_KV260_SOM, "bnn_build_full_KV260_SOM") + unstashSuccessfulStage(env.BNN_TEST_U250, "xml_bnn_test_U250") + unstashSuccessfulStage(env.BNN_TEST_PYNQZ1, "xml_bnn_test_PynqZ1") + unstashSuccessfulStage(env.BNN_TEST_ZCU104, "xml_bnn_test_ZCU104") + unstashSuccessfulStage(env.BNN_TEST_KV260_SOM, "xml_bnn_test_KV260_SOM") + } + + // Combine individual HTML files to one single report + sh './run-docker.sh pytest_html_merger -i reports/ -o reports/test_report_final.html' // Archive the XML & HTML test results - archiveArtifacts artifacts: "*.xml *.html" + archiveArtifacts artifacts: "reports/*.xml" + archiveArtifacts artifacts: "reports/*.html" + + // Plot what XML files were created during the test run + junit 'reports/*.xml' } } } From fb9218e15b8ad0b8bacf4af610c5df1fb50e52c0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 21 Jul 2023 17:56:08 +0100 Subject: [PATCH 35/77] Add code coverage for sanity unit tests and for fpgadataflow tests. Archive the results Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 1 + docker/jenkins/Jenkinsfile | 31 ++++++++++++++++++++----------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index 91a22952ff..e11e8136fd 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -104,6 +104,7 @@ RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading RUN pip install pytest-html==3.2.0 RUN pip install pytest-html-merger==0.0.8 +RUN pip install pytest-cov==4.1.0 # extra dependencies from other FINN deps # installed in Docker image to make entrypoint script go faster diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f782569643..b7998ae5b9 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -27,7 +27,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}") + runDockerPytestWithMarker("sanity_bnn", "${env.TEST_NAME}", '') // Find the board's build files (bitstreams/xclbins) and zip for use on the boards themselves findCopyZip("Pynq-Z1", env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "sanity_PynqZ1_zip") @@ -61,7 +61,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Multiple markers with pytest needs its own script - createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}") + createMultiMarkerScript("util or brevitas_export or streamline or transform or notebooks", "${env.TEST_NAME}", "--cov --cov-report=html:coverage_sanity_ut") sh './run-docker.sh ./run-tests.sh' // Stash the test results file(s) @@ -90,7 +90,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}") + runDockerPytestWithMarker("fpgadataflow", "${env.TEST_NAME}", "--cov --cov-report=html:coverage_fpgadataflow") // Stash the test results file(s) stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" @@ -123,7 +123,7 @@ pipeline { sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}") + runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}", '') // Stash the test results file(s) stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" @@ -154,7 +154,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_u250", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -186,7 +186,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_pynq", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "PynqZ1_zip") // Stash the test results file(s) @@ -218,7 +218,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_zcu104", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -250,7 +250,7 @@ pipeline { cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename - runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}") + runDockerPytestWithMarker("bnn_kv260", "${env.TEST_NAME}_${env.BOARD}", '') findCopyZip(env.BOARD, env.FINN_HOST_BUILD_DIR, env.TEST_NAME, "${env.BOARD}_zip") // Stash the test results file(s) @@ -777,6 +777,9 @@ pipeline { archiveArtifacts artifacts: "reports/*.xml" archiveArtifacts artifacts: "reports/*.html" + archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") + archiveSuccessfulStage(env.FPGADATAFLOW, "coverage_fpgadataflow") + // Plot what XML files were created during the test run junit 'reports/*.xml' } @@ -796,15 +799,15 @@ void createMultiMarkerScript(String markers, String testResultsFilename) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash -python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh +python -m pytest -m \'${markers}\' --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}" >> run-tests.sh """ // Give permissions to script sh 'chmod 777 run-tests.sh' } -void runDockerPytestWithMarker(String marker, String testResultsFilename) { - sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html""" +void runDockerPytestWithMarker(String marker, String testResultsFilename, String additionalOptions) { + sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}""" } void findBoardBuildFiles(String board, String searchDir, String dirToFind) { @@ -896,6 +899,12 @@ void unstashSuccessfulStage(String stageEnvVariableSet, String stashName) { } } +void archiveSuccessfulStage(String stageEnvVariableSet, String folder) { + if (stageEnvVariableSet) { + archiveArtifacts artifacts: "${folder}/**/*" + } +} + void postFailure(String board) { echo "Failed to run ${board} tests" } From c28e8f026d64b871dc4cedf349b4f990b5ddc4df Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 25 Jul 2023 13:09:15 +0100 Subject: [PATCH 36/77] Forgot to add additionalOptions as a function input Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index b7998ae5b9..98baad74ec 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -795,7 +795,7 @@ void cleanPreviousBuildFiles(String buildDir) { sh "rm -rf ${buildDir}/*" } -void createMultiMarkerScript(String markers, String testResultsFilename) { +void createMultiMarkerScript(String markers, String testResultsFilename, String additionalOptions) { // Passing multiple markers when running ./run-docker.sh does not work with bash. // Therefore, create a script to maintain the single quotes that surround the markers sh """echo "#!/bin/bash From 0a2b850da0b957db687615565de8f44f98ef4718 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Tue, 25 Jul 2023 13:18:09 +0100 Subject: [PATCH 37/77] Remove postFailure() and postSuccess() functions. This is an attempt to reduce the method count used in the pipeline as the current size causes the "groovyjarjarasm.asm.MethodTooLargeException: Method too large" error. As a result the pipeline does not run at all. This is a well known limitation. Removing unneccessary functions shrinks the method count and allows the Jenkinsfile to run. Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 56 -------------------------------------- 1 file changed, 56 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 98baad74ec..1ab8e81f46 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -333,12 +333,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - Pynq-Z1') { @@ -390,12 +384,6 @@ pipeline { stash name: "xml_sanity_bnn_test_PynqZ1", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - ZCU104') { @@ -445,12 +433,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN Sanity - KV260_SOM') { @@ -500,12 +482,6 @@ pipeline { stash name: "xml_sanity_bnn_test_${env.BOARD}", includes: "sanity_bnn_test_hw_${env.BOARD}.xml,sanity_bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } } @@ -557,12 +533,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - Pynq-Z1') { @@ -614,12 +584,6 @@ pipeline { stash name: "xml_bnn_test_PynqZ1", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - ZCU104') { @@ -669,12 +633,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } stage('BNN end2end - KV260_SOM') { @@ -724,12 +682,6 @@ pipeline { stash name: "xml_bnn_test_${env.BOARD}", includes: "bnn_test_hw_${env.BOARD}.xml,bnn_test_hw_${env.BOARD}.html" } } - success { - postSuccess(env.BOARD) - } - failure { - postFailure(env.BOARD) - } } } } @@ -904,11 +856,3 @@ void archiveSuccessfulStage(String stageEnvVariableSet, String folder) { archiveArtifacts artifacts: "${folder}/**/*" } } - -void postFailure(String board) { - echo "Failed to run ${board} tests" -} - -void postSuccess(String board) { - echo "${board} tests passed" -} From 61cba651c155c258fe5a529a2be5d2b3fdf2d3d0 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 26 Jul 2023 15:46:15 +0100 Subject: [PATCH 38/77] Remove driver hack from BNN testing Signed-off-by: Fionn O'Donohoe --- docker/jenkins/Jenkinsfile | 4 +-- docker/jenkins/hack_driver_script.py | 49 ---------------------------- 2 files changed, 1 insertion(+), 52 deletions(-) delete mode 100755 docker/jenkins/hack_driver_script.py diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2f7eab1190..c19cb97dec 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -119,7 +119,7 @@ pipeline { // Stash the HW test scripts to be used on slave nodes dir('docker/jenkins') { - stash name: 'bnn_test_files', includes: 'hack_driver_script.py,test_bnn_hw_pytest.py' + stash name: 'bnn_test_files', includes: 'test_bnn_hw_pytest.py' } } } @@ -426,14 +426,12 @@ void createTestScript(String board, String marker, String testResultsFilename) { sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}" >> run-tests.sh """ diff --git a/docker/jenkins/hack_driver_script.py b/docker/jenkins/hack_driver_script.py deleted file mode 100755 index 568c62150d..0000000000 --- a/docker/jenkins/hack_driver_script.py +++ /dev/null @@ -1,49 +0,0 @@ -import os - -def remove_cache_dirs(dir_list): - tmp_list = list(dir_list) - for i in range(len(tmp_list)-1, -1, -1): - if ".pytest_cache" in tmp_list[i]: - del tmp_list[i] - elif "__pycache__" in tmp_list[i]: - del tmp_list[i] - return tmp_list - -def hack_driver_script(board, test_dir): - test_script_file = "driver.py" - # Read the contents of the test script file - with open(test_script_file, "r") as f: - lines = f.readlines() - - # Specify the line to be replaced and the new line - line_to_replace = "ishape_normal" - if "cnv" in test_dir: - new_line = " \"ishape_normal\" : [(1, 3, 32, 32)]," - else: - new_line = " \"ishape_normal\" : [(1, 1, 28, 28)]," - - # Iterate over the lines and replace the specified line - for i in range(len(lines)): - if line_to_replace in lines[i]: - lines[i] = new_line + "\n" - break # Only replace the first occurrence - - # Write the modified contents back to the test script file - with open(test_script_file, "w") as f: - f.writelines(lines) - -if __name__ == "__main__": - current_dir = os.getcwd() - board = os.path.basename(current_dir) - - # Get list of local directories - removing the Python cache directories - local_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] - local_dirs = remove_cache_dirs(local_dirs) - - # Now create the full paths for each relative path - local_dirs_full_path = [os.path.join(current_dir, name) for name in local_dirs if os.path.isdir(os.path.join(current_dir, name))] - - # Change the driver.py script for each of the test directories - for dir in local_dirs_full_path: - os.chdir(dir) - hack_driver_script(board, dir) From 10d34b5fea8904f949c4ddab98cd5c0a1321fa10 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Wed, 26 Jul 2023 15:49:32 +0100 Subject: [PATCH 39/77] Add input tensor data reshaping and transposing for BNN networks Signed-off-by: Fionn O'Donohoe --- tests/end2end/test_end2end_bnn_pynq.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 6b288bd382..59fbb0c1cb 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -321,7 +321,22 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): (input_tensor_npy, output_tensor_npy) = get_golden_io_pair( topology, wbits, abits, return_topk=1 ) - np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy) + + # Some changes are required in order to prepare the input tensor data for hardware + # testing. The ONNX graphs for these models contain nodes that manipulate the input + # tensor shape which FINN considers when creating the model. The same input tensor + # shaping needs to be done here on the input data. + # For the convolutional models, the graph contains the Transpose node. The Brevitas + # model works in NCHW layout but the FINN kernels are optimized for NHWC. + # The FC models contain a Reshape node, which FINN uses, so we therefore have to + # reshape the input tensor data to match the reshaping in the model + if topology == "cnv": + input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) + else: + input_shape = input_tensor_npy.shape + input_tensor_npy = (input_shape[0], np.prod(input_shape[1:])) + + np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy.copy()) np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) # driver.py and python libraries From fe0915258bc9278a7d83ddbe27fc811ce604ae67 Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 28 Jul 2023 15:10:26 +0100 Subject: [PATCH 40/77] Add markers for BNN test suites to quiesce warnings when running pytest Signed-off-by: Fionn O'Donohoe --- setup.cfg | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setup.cfg b/setup.cfg index fb070a436e..a70eaeb2f3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -130,6 +130,11 @@ markers = fpgadataflow: mark tests related to hls layers end2end: mark tests that run the end2end flow notebooks: mark tests that execute all Jupyter notebooks + sanity_bnn: mark tests that execute the sanity BNN test + bnn_u250: mark tests that execute U250 BNN tests + bnn_kv260: mark tests that execute KV260 BNN tests + bnn_pynq: mark tests that execute Pynq-Z1 BNN tests + bnn_zcu104: mark tests that execute ZCU104 BNN tests norecursedirs = dist build From d7370db4fcde4cfb41af7a34e2494a7482fee6af Mon Sep 17 00:00:00 2001 From: Fionn O'Donohoe Date: Fri, 28 Jul 2023 15:49:12 +0100 Subject: [PATCH 41/77] Add pytest library version and associates plugins for HTML report capturing Newer version of pytest caused an issue when gathering HTML reports: ModuleNotFoundError: No module named 'py.xml'; 'py' is not a package Apparently this is not a pytest bug but due to a related plugin and is caused by depending on the py package but not declaring it as a dependency. The exact versions of the libraries specified in this commit allow for HTML report gathering. This was tested in docker and on hardware in 2 virtual environments: virtual_env and conda (zynq and alveo environments respectively) Signed-off-by: Fionn O'Donohoe --- docker/Dockerfile.finn | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.finn b/docker/Dockerfile.finn index e11e8136fd..06dc109808 100644 --- a/docker/Dockerfile.finn +++ b/docker/Dockerfile.finn @@ -102,7 +102,11 @@ RUN pip install pandas==1.5.3 RUN pip install scikit-learn==1.2.1 RUN pip install tqdm==4.64.1 RUN pip install -e git+https://github.com/fbcotter/dataset_loading.git@0.0.4#egg=dataset_loading -RUN pip install pytest-html==3.2.0 +# these versions of pytest and associated plugins allow for stable collection of +# test reports and code coverage reports in HTML +RUN pip install pytest==6.2.5 +RUN pip install pytest-metadata==1.7.0 +RUN pip install pytest-html==3.0.0 RUN pip install pytest-html-merger==0.0.8 RUN pip install pytest-cov==4.1.0 From 8357c102633a1ce25666f600d30b66ad6f94dfdf Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 8 Aug 2023 10:42:52 +0100 Subject: [PATCH 42/77] [Lint] Run pre-commit over files --- docker/jenkins/test_bnn_hw_pytest.py | 76 +++++++++++++++++------ tests/end2end/test_end2end_bnn_pynq.py | 83 +++++++++++++++++++------- 2 files changed, 122 insertions(+), 37 deletions(-) diff --git a/docker/jenkins/test_bnn_hw_pytest.py b/docker/jenkins/test_bnn_hw_pytest.py index 961efd1cc1..c8f4fbf74d 100755 --- a/docker/jenkins/test_bnn_hw_pytest.py +++ b/docker/jenkins/test_bnn_hw_pytest.py @@ -1,14 +1,15 @@ -import os -import numpy as np -from scipy.stats import linregress -import subprocess import pytest + import itertools import logging +import numpy as np +import os +import subprocess +from scipy.stats import linregress # no __init__ constructors allowed in Pytest - so use global variables instead base_dir_global = os.getcwd() -default_test_run_timeout = 30 # seconds +default_test_run_timeout = 30 # seconds output_execute_results_file = "output.npy" execute_results_reference_file = "output_reference.npy" output_throughput_results_file = "nw_metrics.txt" @@ -18,13 +19,14 @@ def remove_cache_dirs(dir_list): tmp_list = list(dir_list) - for i in range(len(tmp_list)-1, -1, -1): + for i in range(len(tmp_list) - 1, -1, -1): if ".pytest_cache" in tmp_list[i]: del tmp_list[i] elif "__pycache__" in tmp_list[i]: del tmp_list[i] return tmp_list + def delete_file(file_path): # Check if the file exists before deleting it if os.path.exists(file_path): @@ -36,16 +38,21 @@ def delete_file(file_path): else: logger.info(f"File '{file_path}' does not exist. Continuing with the script.") + def get_platform(board_str): return "alveo" if "U250" in board_str else "zynq-iodma" + def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, platform_list): test_cases = [ - (f'{marker}_{param1}_batchSize-{param2}_platform-{param3}', { - 'test_dir': param1, - 'batch_size': param2, - 'platform': param3, - }) + ( + f"{marker}_{param1}_batchSize-{param2}_platform-{param3}", + { + "test_dir": param1, + "batch_size": param2, + "platform": param3, + }, + ) for param1, param2, param3 in itertools.product( test_dir_list, batch_size_list, @@ -54,6 +61,7 @@ def get_full_parameterized_test_list(marker, test_dir_list, batch_size_list, pla ] return test_cases + def pytest_generate_tests(metafunc): idlist = [] argvalues = [] @@ -61,15 +69,21 @@ def pytest_generate_tests(metafunc): # Separate the full list of markers used on command line. # This allows a user to select multiple markers - all_markers_used = metafunc.config.getoption("-m").split(" ") + all_markers_used = metafunc.config.getoption("-m").split(" ") current_dir = os.getcwd() - test_dirs = [name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name))] + test_dirs = [ + name for name in os.listdir(current_dir) if os.path.isdir(os.path.join(current_dir, name)) + ] test_dirs = remove_cache_dirs(test_dirs) for marker in all_markers_used: if "Pynq" in marker or "U250" in marker or "ZCU104" in marker or "KV260_SOM" in marker: platform = get_platform(marker) - scenarios.extend(get_full_parameterized_test_list(marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform])) + scenarios.extend( + get_full_parameterized_test_list( + marker, test_dir_list=test_dirs, batch_size_list=[1], platform_list=[platform] + ) + ) if len(scenarios) > 0: for scenario in scenarios: @@ -92,7 +106,21 @@ def test_type_execute(self, test_dir, batch_size, platform): # Run test option: execute bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" - result = subprocess.run(["python", "driver.py", "--exec_mode=execute", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + result = subprocess.run( + [ + "python", + "driver.py", + "--exec_mode=execute", + f"--batchsize={batch_size}", + f"--bitfile={bitfile}", + "--inputfile=input.npy", + "--outputfile=output.npy", + f"--platform={platform}", + ], + capture_output=True, + text=True, + timeout=default_test_run_timeout, + ) assert result.returncode == 0 # Load the output and reference arrays @@ -112,7 +140,21 @@ def test_type_throughput(self, test_dir, batch_size, platform): # Run test option: throughput bitfile = "a.xclbin" if platform == "alveo" else "resizer.bit" - result = subprocess.run(["python", "driver.py", "--exec_mode=throughput_test", f"--batchsize={batch_size}", f"--bitfile={bitfile}", "--inputfile=input.npy", "--outputfile=output.npy", f"--platform={platform}"], capture_output=True, text=True, timeout=default_test_run_timeout) + result = subprocess.run( + [ + "python", + "driver.py", + "--exec_mode=throughput_test", + f"--batchsize={batch_size}", + f"--bitfile={bitfile}", + "--inputfile=input.npy", + "--outputfile=output.npy", + f"--platform={platform}", + ], + capture_output=True, + text=True, + timeout=default_test_run_timeout, + ) assert result.returncode == 0 # Check if nw_metrics.txt now exists after test run @@ -158,7 +200,7 @@ def test_type_throughput(self, test_dir, batch_size, platform): np.round(v["DRAM_out_bandwidth[MB/s]"], 2), ) ret_str += "\n" + "-----------------------------" - largest_bsize = bsize_range[-1] + # largest_bsize = bsize_range[-1] # Dump the metrics to a text file with open(throughput_results_formatted_file, "w") as f: diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 59fbb0c1cb..07e977a266 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -28,9 +28,8 @@ import pytest -import numpy as np - import itertools +import numpy as np # as of Feb'20 there is a bug that segfaults ONNX shape inference if we # import pytorch before onnx, so we make sure to import onnx first @@ -41,7 +40,6 @@ from brevitas.export import export_qonnx from dataset_loading import cifar, mnist from distutils.dir_util import copy_tree -from shutil import copy from qonnx.core.datatype import DataType from qonnx.core.modelwrapper import ModelWrapper from qonnx.custom_op.registry import getCustomOp @@ -60,6 +58,7 @@ from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul from qonnx.transformation.merge_onnx_models import MergeONNXModels from qonnx.util.cleanup import cleanup as qonnx_cleanup +from shutil import copy import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls import finn.transformation.streamline.absorb as absorb @@ -348,12 +347,15 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): # parameters that make up inputs to test case(s) def get_full_parameterized_test_list(marker, wbits_list, abits_list, topology_list, board_list): test_cases = [ - (f'{marker}_w{param1}_a{param2}_{param3}_{param4}', { - 'wbits': param1, - 'abits': param2, - 'topology': param3, - 'board': param4, - }) + ( + f"{marker}_w{param1}_a{param2}_{param3}_{param4}", + { + "wbits": param1, + "abits": param2, + "topology": param3, + "board": param4, + }, + ) for param1, param2, param3, param4 in itertools.product( wbits_list, abits_list, @@ -376,21 +378,63 @@ def pytest_generate_tests(metafunc): # Separate the full list of markers used on command line. # This allows a user to select multiple markers - all_markers_used = metafunc.config.getoption("-m").split(" ") + all_markers_used = metafunc.config.getoption("-m").split(" ") for marker in all_markers_used: if "sanity_bnn" in marker: - # Define a set of sanity tests that target each of the supported boards with fixed parameters - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[1], topology_list=["lfc"], board_list=[test_support_board_map[0]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[1], abits_list=[2], topology_list=["cnv"], board_list=[test_support_board_map[1]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["tfc"], board_list=[test_support_board_map[2]])) - scenarios.extend(get_full_parameterized_test_list("sanity_bnn", wbits_list=[2], abits_list=[2], topology_list=["cnv"], board_list=[test_support_board_map[3]])) + # Define a set of sanity tests that target each of + # the supported boards with fixed parameters + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[1], + abits_list=[1], + topology_list=["lfc"], + board_list=[test_support_board_map[0]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[1], + abits_list=[2], + topology_list=["cnv"], + board_list=[test_support_board_map[1]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[2], + abits_list=[2], + topology_list=["tfc"], + board_list=[test_support_board_map[2]], + ) + ) + scenarios.extend( + get_full_parameterized_test_list( + "sanity_bnn", + wbits_list=[2], + abits_list=[2], + topology_list=["cnv"], + board_list=[test_support_board_map[3]], + ) + ) if "bnn_" in marker: # Target the full set of parameters for a single board # Extract the board name from the marker used, as it is in the form of 'bnn_' - bnn_board = next((element for element in test_support_board_map if marker.split("_")[1] in element.lower()), None) - test_cases = get_full_parameterized_test_list("bnn", wbits, abits, topology, [bnn_board]) + bnn_board = next( + ( + element + for element in test_support_board_map + if marker.split("_")[1] in element.lower() + ), + None, + ) + test_cases = get_full_parameterized_test_list( + "bnn", wbits, abits, topology, [bnn_board] + ) scenarios.extend(test_cases) if len(scenarios) > 0: @@ -401,6 +445,7 @@ def pytest_generate_tests(metafunc): argvalues.append([x[1] for x in items]) metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") + @pytest.mark.sanity_bnn @pytest.mark.bnn_pynq @pytest.mark.bnn_zcu104 @@ -706,9 +751,7 @@ def test_make_pynq_driver(self, topology, wbits, abits, board): model.save(get_checkpoint_name(topology, wbits, abits, "driver_" + board)) def test_deploy(self, topology, wbits, abits, board): - prev_chkpt_name = get_checkpoint_name( - topology, wbits, abits, "driver_" + board - ) + prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "driver_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) model_title = "%s_w%d_a%d_%s" % ("bnn", wbits, abits, topology) deploy_based_on_board(model, model_title, topology, wbits, abits, board) From 3df0c17191cafd2c5e90f0aa2f310626b1297e67 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 8 Aug 2023 11:39:08 +0100 Subject: [PATCH 43/77] [GHA] exclude bnn_pynq from quicktest --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index 466fcfb09d..814cec03d1 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,7 +6,7 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL" + python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks or bnn_pynq)' --dist=loadfile -n $PYTEST_PARALLEL" elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL" From 066d0277ff70bb7cf990baacc563788467a5c836 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 14:34:29 +0100 Subject: [PATCH 44/77] [CI] Split Jenkinsfiles into CI and testing --- docker/jenkins/Jenkinsfile_CI | 46 +++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 docker/jenkins/Jenkinsfile_CI diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI new file mode 100644 index 0000000000..2954877c2a --- /dev/null +++ b/docker/jenkins/Jenkinsfile_CI @@ -0,0 +1,46 @@ +node { + def app + stage('Clone repository') { + /* Let's make sure we have the repository cloned to our workspace */ + checkout scm + } + withEnv([ + "FINN_XILINX_PATH=/proj/xbuilds/SWIP/2022.2_1014_8888/installs/lin64", + "FINN_XILINX_VERSION=2022.2", + "FINN_DOCKER_TAG=xilinx/finn:jenkins", + "FINN_HOST_BUILD_DIR=/scratch/users/finn_ci", + "PLATFORM_REPO_PATHS=/opt/xilinx/platforms" + ]){ + parallel firstBranch: { + stage('Brevitas export') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + } + } + }, secondBranch: { + stage('Streamlining transformations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + } + } + }, thirdBranch: { + stage('Util functions') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mutil") + } + } + }, fourthBranch: { + stage('General transformations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mtransform") + } + } + }, fifthBranch: { + stage('Fpgadataflow transformations and simulations') { + dir("${env.WORKSPACE}") { + sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + } + } + } + } +} From 6f84ed9466f5cc8ef1041236c0c3369b786a90dc Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 14:44:51 +0100 Subject: [PATCH 45/77] [Tests] Rename board map for tests --- src/finn/util/basic.py | 4 ++-- tests/end2end/test_end2end_bnn_pynq.py | 16 ++++++---------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/finn/util/basic.py b/src/finn/util/basic.py index 7dd04996ba..a184a53862 100644 --- a/src/finn/util/basic.py +++ b/src/finn/util/basic.py @@ -31,8 +31,8 @@ import sys import tempfile -# supported boards -test_support_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] +# test boards +test_board_map = ["Pynq-Z1", "KV260_SOM", "ZCU104", "U250"] # mapping from PYNQ board names to FPGA part names pynq_part_map = dict() diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 07e977a266..0343b9082b 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -92,7 +92,7 @@ MakeMaxPoolNHWC, MoveScalarLinearPastInvariants, ) -from finn.util.basic import get_finn_root, make_build_dir, test_support_board_map +from finn.util.basic import get_finn_root, make_build_dir, test_board_map from finn.util.pytorch import ToTensor from finn.util.test import ( execute_parent, @@ -390,7 +390,7 @@ def pytest_generate_tests(metafunc): wbits_list=[1], abits_list=[1], topology_list=["lfc"], - board_list=[test_support_board_map[0]], + board_list=[test_board_map[0]], ) ) scenarios.extend( @@ -399,7 +399,7 @@ def pytest_generate_tests(metafunc): wbits_list=[1], abits_list=[2], topology_list=["cnv"], - board_list=[test_support_board_map[1]], + board_list=[test_board_map[1]], ) ) scenarios.extend( @@ -408,7 +408,7 @@ def pytest_generate_tests(metafunc): wbits_list=[2], abits_list=[2], topology_list=["tfc"], - board_list=[test_support_board_map[2]], + board_list=[test_board_map[2]], ) ) scenarios.extend( @@ -417,7 +417,7 @@ def pytest_generate_tests(metafunc): wbits_list=[2], abits_list=[2], topology_list=["cnv"], - board_list=[test_support_board_map[3]], + board_list=[test_board_map[3]], ) ) @@ -425,11 +425,7 @@ def pytest_generate_tests(metafunc): # Target the full set of parameters for a single board # Extract the board name from the marker used, as it is in the form of 'bnn_' bnn_board = next( - ( - element - for element in test_support_board_map - if marker.split("_")[1] in element.lower() - ), + (element for element in test_board_map if marker.split("_")[1] in element.lower()), None, ) test_cases = get_full_parameterized_test_list( From e080625d0b41f1ad8972ab2b1bf7b0ae899be174 Mon Sep 17 00:00:00 2001 From: auphelia Date: Wed, 9 Aug 2023 17:16:09 +0100 Subject: [PATCH 46/77] [Tests] Fix bug in reshaping input npy for remote execution --- tests/end2end/test_end2end_bnn_pynq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index 0343b9082b..d98c06f7d0 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -333,7 +333,8 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) else: input_shape = input_tensor_npy.shape - input_tensor_npy = (input_shape[0], np.prod(input_shape[1:])) + new_input_shape = (input_shape[0], np.prod(input_shape[1:])) + input_tensor_npy = input_tensor_npy.reshape(new_input_shape) np.save(os.path.join(deployment_dir, "input.npy"), input_tensor_npy.copy()) np.save(os.path.join(deployment_dir, "output_reference.npy"), output_tensor_npy) From fe09f06d9cf35994269db2f667472167f05d6165 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 23 Aug 2023 09:51:42 +0100 Subject: [PATCH 47/77] [CI] Fix bug with build parameters and result flags sharing common names --- docker/jenkins/Jenkinsfile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 7ca9aedafc..f4f0533c3f 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -96,7 +96,7 @@ pipeline { stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline - env.FPGADATAFLOW = "SUCCESS" + env.FPGADATAFLOW_RESULT = "SUCCESS" } } } @@ -129,7 +129,7 @@ pipeline { stash name: env.TEST_NAME, includes: "${env.TEST_NAME}.xml,${env.TEST_NAME}.html" // Use an env variable to help collect test results later in pipeline - env.END2END = "SUCCESS" + env.END2END_RESULT = "SUCCESS" } } } @@ -705,13 +705,13 @@ pipeline { dir('reports') { // Only unstash for stages that ran unstashSuccessfulStage(env.SANITY_UT, "sanity_ut") - unstashSuccessfulStage(env.FPGADATAFLOW, "fpgadataflow") + unstashSuccessfulStage(env.FPGADATAFLOW_RESULT, "fpgadataflow") unstashSuccessfulStage(env.BNN_BUILD_SANITY, "bnn_build_sanity") unstashSuccessfulStage(env.SANITY_BNN_TEST_U250, "xml_sanity_bnn_test_U250") unstashSuccessfulStage(env.SANITY_BNN_TEST_PYNQZ1, "xml_sanity_bnn_test_PynqZ1") unstashSuccessfulStage(env.SANITY_BNN_TEST_ZCU104, "xml_sanity_bnn_test_ZCU104") unstashSuccessfulStage(env.SANITY_BNN_TEST_KV260_SOM, "xml_sanity_bnn_test_KV260_SOM") - unstashSuccessfulStage(env.END2END, "end2end") + unstashSuccessfulStage(env.END2END_RESULT, "end2end") unstashSuccessfulStage(env.BNN_BUILD_U250, "bnn_build_full_U250") unstashSuccessfulStage(env.BNN_BUILD_PYNQZ1, "bnn_build_full_PynqZ1") unstashSuccessfulStage(env.BNN_BUILD_ZCU104, "bnn_build_full_ZCU104") @@ -730,7 +730,7 @@ pipeline { archiveArtifacts artifacts: "reports/*.html" archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") - archiveSuccessfulStage(env.FPGADATAFLOW, "coverage_fpgadataflow") + archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") // Plot what XML files were created during the test run junit 'reports/*.xml' From db99ec811957310b68c5818e506c3374402dd16f Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 2 Aug 2023 11:13:31 +0100 Subject: [PATCH 48/77] Add support to pull in a .Xilinx directory to allow beta devices to be enabled inside docker container See https://docs.xilinx.com/r/en-US/ug835-vivado-tcl-commands/Tcl-Initialization-Scripts for information on using tcl init scripts --- docker/finn_entrypoint.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index 4e0266ca6b..b441c9359a 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -114,6 +114,27 @@ else yecho "If you need Vitis HLS, ensure HLS_PATH is set correctly and mounted into the Docker container." fi +if [ -d "$FINN_ROOT/.Xilinx" ]; then + mkdir "$HOME/.Xilinx" + if [ -f "$FINN_ROOT/.Xilinx/HLS_init.tcl" ]; then + cp "$FINN_ROOT/.Xilinx/HLS_init.tcl" "$HOME/.Xilinx/" + else + yecho "Unable to find $FINN_ROOT/.Xilinx/HLS_init.tcl" + fi + + if [ -f "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" ]; then + mkdir "$HOME/.Xilinx/Vivado/" + cp "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" "$HOME/.Xilinx/Vivado/" + else + yecho "Unable to find $FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" + fi +else + yecho "Unable to find $FINN_ROOT/.Xilinx" + yecho "Functionality dependent on beta devices will not be available." + yecho "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl " + yecho "are set correctly and mounted into the Docker container." +fi + export PATH=$PATH:$HOME/.local/bin # execute the provided command(s) as root exec "$@" From 26e3306796d3d0daac94b87c9d4d01676ecf134e Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 24 Aug 2023 15:40:48 +0100 Subject: [PATCH 49/77] [NBs] Add first draft of advanced builder settings notebook --- .../4_advanced_builder_settings.ipynb | 789 ++++++++++++++++++ 1 file changed, 789 insertions(+) create mode 100644 notebooks/advanced/4_advanced_builder_settings.ipynb diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb new file mode 100644 index 0000000000..ce02ab618e --- /dev/null +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -0,0 +1,789 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8fcff912", + "metadata": {}, + "source": [ + "# Advanced Builder settings\n", + "\n", + "**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".\n", + "\n", + "\"drawing\"\n", + "\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", + "\n", + "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." + ] + }, + { + "cell_type": "markdown", + "id": "a830e730", + "metadata": {}, + "source": [ + "In this tutorial, we will have a more detailed look into the FINN builder tool and explore different options to customize your FINN design. We assume that you have already completed the [Cybersecurity notebooks](../end2end_example/cybersecurity) and that you have a basic understanding of how the FINN compiler works and how to use the FINN builder tool." + ] + }, + { + "cell_type": "markdown", + "id": "5ec9a0db", + "metadata": {}, + "source": [ + "## Outline\n", + "---------------\n", + "\n", + "1. [Introduction to the CNV-w2a2 network](#intro_cnv)\n", + "2. [Recap default builder flow](#recap_builder)\n", + "3. [How to make a custom build step](#custom_step)\n", + "4. [Folding configuration json](#folding_config)\n", + "5. [Additional builder arguments](#builder_arg)\n", + " 1. [Verification steps](#verify)\n", + " 2. [Examples for additional builder arguments](#example_args)\n", + " 3. [Other builder arguments](#other_args)" + ] + }, + { + "cell_type": "markdown", + "id": "5dbed63f", + "metadata": {}, + "source": [ + "## Introduction to the CNV-w2a2 network \n", + "\n", + "The particular quantized neural network (QNN) we will be targeting in this notebook is referred to as CNV-w2a2 and it classifies 32x32 RGB images into one of ten CIFAR-10 classes. All weights and activations in this network are quantized to two bit, with the exception of the input (which is RGB with 8 bits per channel) and the final output (which is 32-bit numbers). It is similar to the convolutional neural network used in the [cnv_end2end_example](../end2end_example/bnn-pynq/cnv_end2end_example.ipynb) Jupyter notebook.\n", + "\n", + "\n", + "You'll have a chance to interactively examine the layers that make up the network in Netron in a moment, so that's enough about the network for now. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce459f3c", + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.basic import make_build_dir\n", + "from finn.util.visualization import showInNetron, showSrc\n", + "import os\n", + " \n", + "build_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe262964", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "from finn.util.test import get_test_model_trained\n", + "from brevitas.export import export_qonnx\n", + "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", + "from qonnx.core.modelwrapper import ModelWrapper\n", + "from qonnx.core.datatype import DataType\n", + "\n", + "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", + "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path)\n", + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)\n", + "#model = ModelWrapper(export_onnx_path)\n", + "#model.set_tensor_datatype(model.graph.input[0].name, DataType[\"UINT8\"])\n", + "#model.save(build_dir + \"/end2end_cnv_w2a2_tidy.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87f59da6", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/end2end_cnv_w2a2_export.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "c764ed76", + "metadata": {}, + "source": [ + "## Quick recap, how to setup up default builder flow for resource estimations " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9007705a", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_estimates_only\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_cfg.estimate_only_dataflow_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02e4c0f0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72de8d4c", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3fe1186", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.estimate_only_dataflow_steps))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "029da0da", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "showSrc(build_dataflow_steps.step_tidy_up)" + ] + }, + { + "cell_type": "markdown", + "id": "e9c2c97f", + "metadata": {}, + "source": [ + "## How to make a custom build step " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b9d43cc8", + "metadata": {}, + "outputs": [], + "source": [ + "from finn.util.pytorch import ToTensor\n", + "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", + "\n", + "def custom_step_add_pre_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", + " ishape = model.get_tensor_shape(model.graph.input[0].name)\n", + " # preprocessing: torchvision's ToTensor divides uint8 inputs by 255\n", + " preproc = ToTensor()\n", + " export_qonnx(preproc, torch.randn(ishape), \"preproc.onnx\", opset_version=11)\n", + " preproc_model = ModelWrapper(\"preproc.onnx\")\n", + " # set input finn datatype to UINT8\n", + " preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType[\"UINT8\"])\n", + " model = model.transform(MergeONNXModels(preproc_model))\n", + " return model\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f00b465", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_pre_proc\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3a2bcea", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "87e5651e", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_proc/intermediate_models/custom_step_add_pre_proc.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8c6f1bd0", + "metadata": {}, + "outputs": [], + "source": [ + "from qonnx.transformation.insert_topk import InsertTopK\n", + "\n", + "def custom_step_add_post_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", + " model = model.transform(InsertTopK(k=1))\n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "57adbb44", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_pre_and_post_proc\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b0598b81", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44127417", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/step_convert_to_hls.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "5ffbadd1", + "metadata": {}, + "source": [ + "## Folding configuration json " + ] + }, + { + "cell_type": "markdown", + "id": "c164040f", + "metadata": {}, + "source": [ + "To learn about the influence of folding factors/parallelism in FINN, please have a look at this notebook: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f75f5634", + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object, indent=1))" + ] + }, + { + "cell_type": "markdown", + "id": "ba856c28", + "metadata": {}, + "source": [ + "Hardware configuration for each layer\n", + "\n", + "FIFO depths\n", + "\n", + "Type of memory/compute resources to be used\n", + "\n", + "Parallelism along different dimensions (“PE”, ”SIMD”)\n", + "\n", + "Baked-in, decoupled or external parameters\n", + "\n", + "Influences almost all flows\n", + "\n", + "step_apply_folding_config\n", + "\n", + "Values tuned for performance & footprint\n", + "\n", + "Many additional constraints not visible from .json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f7f42774", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_pre_and_post_proc/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cdd9f706", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_all_lutram\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " folding_config_file = \"folding_config_all_lutram.json\",\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99b647c0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc680178", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_all_lutram/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "695ecfb1", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_all_lutram/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59e8aaaa", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_all_bram\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " folding_config_file = \"folding_config_all_bram.json\",\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2cdc1aa0", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd0388fd", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_all_bram/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e60a3efb", + "metadata": {}, + "outputs": [], + "source": [ + "with open(build_dir+\"/output_all_bram/report/estimate_layer_resources.json\", 'r') as json_file:\n", + " json_object = json.load(json_file)\n", + "\n", + "print(json.dumps(json_object[\"total\"], indent=1))" + ] + }, + { + "cell_type": "markdown", + "id": "4a675834", + "metadata": {}, + "source": [ + "## Additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "e0c167f4", + "metadata": {}, + "source": [ + "### Verification steps " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4fe7318e", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "showSrc(build_dataflow_steps.step_tidy_up)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ce1aa025", + "metadata": {}, + "outputs": [], + "source": [ + "showSrc(build_cfg.VerificationStepType)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e157d03c", + "metadata": {}, + "outputs": [], + "source": [ + "# Get golden io pair from Brevitas and save as .npy files\n", + "from finn.util.test import get_trained_network_and_ishape, get_example_input, get_topk\n", + "import numpy as np\n", + "\n", + "\n", + "(brevitas_model, ishape) = get_trained_network_and_ishape(\"cnv\", 2, 2)\n", + "input_tensor_npy = get_example_input(\"cnv\")\n", + "input_tensor_torch = torch.from_numpy(input_tensor_npy).float()\n", + "input_tensor_torch = ToTensor().forward(input_tensor_torch).detach()\n", + "output_tensor_npy = brevitas_model.forward(input_tensor_torch).detach().numpy()\n", + "output_tensor_npy = get_topk(output_tensor_npy, k=1)\n", + "\n", + "np.save(\"input.npy\", input_tensor_npy)\n", + "np.save(\"expected_output.npy\", output_tensor_npy)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cd3032b", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_with_verification\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + " verify_steps=[\n", + " build_cfg.VerificationStepType.QONNX_TO_FINN_PYTHON,\n", + " build_cfg.VerificationStepType.TIDY_UP_PYTHON,\n", + " build_cfg.VerificationStepType.STREAMLINED_PYTHON,\n", + " ]\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a3a46e76", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates)" + ] + }, + { + "cell_type": "markdown", + "id": "f0b30546", + "metadata": {}, + "source": [ + "### Examples for additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "ddfb40e4", + "metadata": {}, + "source": [ + "#### Standalone Thresholds" + ] + }, + { + "cell_type": "markdown", + "id": "b710fd28", + "metadata": {}, + "source": [ + "#### RTL Convolutional Input Generator" + ] + }, + { + "cell_type": "markdown", + "id": "4609f94d", + "metadata": {}, + "source": [ + "### Other builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "37b6853d", + "metadata": {}, + "source": [ + "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9f6aa29", + "metadata": {}, + "outputs": [], + "source": [ + "# Filter out methods\n", + "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", + "print(\"\\n\".join(builder_args))" + ] + }, + { + "cell_type": "markdown", + "id": "b12ab370", + "metadata": {}, + "source": [ + "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 316d23a03dc70b260093a3811e7156e1ca1a7c06 Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 24 Aug 2023 16:59:11 +0100 Subject: [PATCH 50/77] [NBs] Checking in advanced nb --- .../4_advanced_builder_settings.ipynb | 269 +++++++++++++++++- 1 file changed, 256 insertions(+), 13 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index ce02ab618e..5936118089 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -34,7 +34,8 @@ "\n", "1. [Introduction to the CNV-w2a2 network](#intro_cnv)\n", "2. [Recap default builder flow](#recap_builder)\n", - "3. [How to make a custom build step](#custom_step)\n", + "3. [Build steps](#build_step)\n", + " 1. [How to make a custom build step](#custom_step)\n", "4. [Folding configuration json](#folding_config)\n", "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", @@ -86,10 +87,7 @@ "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path)\n", - "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)\n", - "#model = ModelWrapper(export_onnx_path)\n", - "#model.set_tensor_datatype(model.graph.input[0].name, DataType[\"UINT8\"])\n", - "#model.save(build_dir + \"/end2end_cnv_w2a2_tidy.onnx\")" + "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)" ] }, { @@ -154,7 +152,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -167,6 +165,14 @@ "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "7e561a91", + "metadata": {}, + "source": [ + "## Build steps " + ] + }, { "cell_type": "code", "execution_count": null, @@ -177,6 +183,25 @@ "print(\"\\n\".join(build_cfg.estimate_only_dataflow_steps))" ] }, + { + "cell_type": "markdown", + "id": "dd3ef987", + "metadata": {}, + "source": [ + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "313fac18", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_tidy_up.__doc__)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -193,7 +218,7 @@ "id": "e9c2c97f", "metadata": {}, "source": [ - "## How to make a custom build step " + "### How to make a custom build step " ] }, { @@ -349,7 +374,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -388,9 +413,9 @@ "import json\n", "\n", "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", - " json_object = json.load(json_file)\n", + " folding_config = json.load(json_file)\n", "\n", - "print(json.dumps(json_object, indent=1))" + "print(json.dumps(folding_config, indent=1))" ] }, { @@ -430,6 +455,38 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "d4d177dc", + "metadata": {}, + "source": [ + "You can manually change, here we generate two new folding configurations with either all lutram or all bram" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "112af6fd", + "metadata": {}, + "outputs": [], + "source": [ + "# Set all ram_style to LUT RAM\n", + "for key in folding_config:\n", + " if \"ram_style\" in folding_config[key]:\n", + " folding_config[key][\"ram_style\"] = \"distributed\" \n", + "# Save as .json \n", + "with open(\"folding_config_all_lutram.json\", \"w\") as jsonFile:\n", + " json.dump(folding_config, jsonFile)\n", + " \n", + "# Set all ram_style to BRAM\n", + "for key in folding_config:\n", + " if \"ram_style\" in folding_config[key]:\n", + " folding_config[key][\"ram_style\"] = \"block\" \n", + "# Save as .json \n", + "with open(\"folding_config_all_bram.json\", \"w\") as jsonFile:\n", + " json.dump(folding_config, jsonFile)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -481,7 +538,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -558,7 +615,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -669,6 +726,7 @@ " \"step_streamline\",\n", " \"step_convert_to_hls\",\n", " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", " \"step_apply_folding_config\",\n", " \"step_minimize_bit_width\",\n", " \"step_generate_estimate_reports\",\n", @@ -700,7 +758,7 @@ "outputs": [], "source": [ "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates)" + "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -719,6 +777,80 @@ "#### Standalone Thresholds" ] }, + { + "cell_type": "markdown", + "id": "bddbd686", + "metadata": {}, + "source": [ + " picture of im2col + matmul + multithreshold" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "de55871e", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_standalone_thresholds\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " standalone_thresholds = True,\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c143f97a", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ba36f07b", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, { "cell_type": "markdown", "id": "b710fd28", @@ -727,6 +859,72 @@ "#### RTL Convolutional Input Generator" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8249280d", + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "estimates_output_dir = \"output_rtl_swg\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(estimates_output_dir):\n", + " shutil.rmtree(estimates_output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + "]\n", + "\n", + "cfg_estimates = build.DataflowBuildConfig(\n", + " output_dir = estimates_output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " force_rtl_conv_inp_gen = True,\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64e83b16", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_estimates);" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "09c45dcd", + "metadata": {}, + "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, { "cell_type": "markdown", "id": "4609f94d", @@ -763,6 +961,51 @@ "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" ] + }, + { + "cell_type": "markdown", + "id": "9aba0493", + "metadata": {}, + "source": [ + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports so far, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec39b9f2", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" + ] + }, + { + "cell_type": "markdown", + "id": "76df000f", + "metadata": {}, + "source": [ + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "caf49f03", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ec10985", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 033fdc30267ed34c6aee2ccb88c4828acc995aa7 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 28 Aug 2023 22:04:43 +0100 Subject: [PATCH 51/77] [NB] First two sections of advanced nb --- .../4_advanced_builder_settings.ipynb | 268 +++++++++++++++++- 1 file changed, 256 insertions(+), 12 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 5936118089..63f69a6385 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -11,8 +11,7 @@ "\n", "\"drawing\"\n", "\n", - "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in such architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", - "\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." ] }, @@ -53,7 +52,7 @@ "The particular quantized neural network (QNN) we will be targeting in this notebook is referred to as CNV-w2a2 and it classifies 32x32 RGB images into one of ten CIFAR-10 classes. All weights and activations in this network are quantized to two bit, with the exception of the input (which is RGB with 8 bits per channel) and the final output (which is 32-bit numbers). It is similar to the convolutional neural network used in the [cnv_end2end_example](../end2end_example/bnn-pynq/cnv_end2end_example.ipynb) Jupyter notebook.\n", "\n", "\n", - "You'll have a chance to interactively examine the layers that make up the network in Netron in a moment, so that's enough about the network for now. \n" + "You'll have a chance to interactively examine the layers that make up the network in Netron. We start by setting the build directory to the directory this notebook is in and importing helper functions to use in the notebook to examine ONNX graphs and source code." ] }, { @@ -63,13 +62,21 @@ "metadata": {}, "outputs": [], "source": [ - "from finn.util.basic import make_build_dir\n", + "#from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showInNetron, showSrc\n", "import os\n", " \n", "build_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"" ] }, + { + "cell_type": "markdown", + "id": "7fc6444c", + "metadata": {}, + "source": [ + "In the next step, we will export the trained network directly from Brevitas to the QONNX format. QONNX is the intermediate representation (IR) that is used as the frontend to the FINN compiler. Please note that the internal representation of the network is still the FINN-ONNX format. [QONNX and FINN-ONNX](https://finn.readthedocs.io/en/latest/internals.html#intermediate-representation-qonnx-and-finn-onnx) are extensions to the ONNX format to represent quantization, especially below 8 bit, in ONNX graphs. The main difference is that quantization in QONNX graphs is represented using dedicated quantization nodes ([more about QONNX](https://github.com/fastmachinelearning/qonnx)) while the quantization in FINN-ONNX is an annotation attached to the tensors." + ] + }, { "cell_type": "code", "execution_count": null, @@ -81,8 +88,6 @@ "from finn.util.test import get_test_model_trained\n", "from brevitas.export import export_qonnx\n", "from qonnx.util.cleanup import cleanup as qonnx_cleanup\n", - "from qonnx.core.modelwrapper import ModelWrapper\n", - "from qonnx.core.datatype import DataType\n", "\n", "cnv = get_test_model_trained(\"CNV\", 2, 2)\n", "export_onnx_path = build_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", @@ -90,6 +95,14 @@ "qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)" ] }, + { + "cell_type": "markdown", + "id": "d24b632f", + "metadata": {}, + "source": [ + "After the export, we call a clean up function on the model. This makes sure, that for example all shapes in the network are inferred, constant folding was applied and all tensors and nodes have unique names. In the next step, we can visualize the graph using Netron. When scrolling through the graph, you can see the Quant nodes that indicate the quantization in the network. In the [first step](https://github.com/Xilinx/finn/blob/main/src/finn/builder/build_dataflow_steps.py#L260) of the FINN builder flow, the network gets converted from the QONNX format to the FINN-ONNX format. That means these Quant nodes will not be present in the graph anymore and instead the quantization will be attached as an annotation to the tensors." + ] + }, { "cell_type": "code", "execution_count": null, @@ -108,6 +121,14 @@ "## Quick recap, how to setup up default builder flow for resource estimations " ] }, + { + "cell_type": "markdown", + "id": "a26e5418", + "metadata": {}, + "source": [ + "As a quick recap, let's set up the builder like we have done in the cybersecurity example to get the resource estimates for our example network." + ] + }, { "cell_type": "code", "execution_count": null, @@ -155,16 +176,130 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "4fa0b9f5", + "metadata": {}, + "source": [ + "The output directory was created and we can extract information about our model and also how it was processed in the FINN compiler from the generated files. Let's focus on the intermediate models for now. You can find them in the output directory in the folder \"intermediate_models\"." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "05a941ef", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_estimates_only/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "d746eff3", + "metadata": {}, + "source": [ + "After each FINN builder step, the graph is saved as .onnx file. In the cell above we sort the intermediate models by time in descending order (`ls -t -r`) to visualize the builder flow. As you can see after the conversion to the FINN-ONNX format (`step_qonnx_to_finn`), the graph is prepared by tidy up and streamlining (`step_tidy_up` and `step_streamline`) and then the high level nodes are converted to HLS layers (`step_convert_to_hls`). Then there is a partition created from all layers that were converted to HLS layers (`step_create_dataflow_partition`), then optimizations are applied (`step_target_fps_parallelization`, `step_apply_folding_config` and `step_minimize_bit_width`). In the final step of this example we generate resource and performance reports for the network (`step_generate_estimate_reports`). Use the code below to investigate the network after each step." + ] + }, { "cell_type": "code", "execution_count": null, "id": "72de8d4c", "metadata": {}, "outputs": [], + "source": [ + "model_to_investigate = \"step_qonnx_to_finn.onnx\"\n", + "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/\"+model_to_investigate)" + ] + }, + { + "cell_type": "markdown", + "id": "bccebd0d", + "metadata": {}, + "source": [ + "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there is layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d86463a", + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/output_estimates_only/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "2719cc09", + "metadata": {}, + "source": [ + "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized the node will be converted." + ] + }, + { + "cell_type": "markdown", + "id": "ff7fa549", + "metadata": {}, + "source": [ + "
\n", + "Important notice: We are working on supporting additional data types and this limitation might disappear in the near future.\n", + "
" + ] + }, + { + "cell_type": "markdown", + "id": "6e6d942e", + "metadata": {}, + "source": [ + "When we click on the `global_in` in the graph, we can see that the quantization annotation does not contain a data type. If no data type is set and it can not be derived from the preceeding node, the FINN compiler automatically assumes that the data type is floating point. This is why the first node does not get converted into an HLS layer, the input is assumed to be floating point." + ] + }, + { + "cell_type": "markdown", + "id": "8b8994e6", + "metadata": {}, + "source": [ + "The solution to the problem depends on the actual data input.\n", + "1. The data set is quantized and `global_in` is an integer: We set the data type of the tensor `global_in` before passing the model to the FINN compiler using [helper functions of ModelWrapper](https://finn.readthedocs.io/en/latest/internals.html#helper-functions-for-tensors).\n", + "2. The data set is not quantized: we can either execute the first layer in software (e.g. as part of the Python driver) or we can add a preprocessing step into the graph." + ] + }, + { + "cell_type": "markdown", + "id": "7504dce7", + "metadata": {}, + "source": [ + "Even though in the example of the CNVw2a2, the inputs are 32x32 RGB images, so the input values are 8 bit (UINT8) \"quantized\", the input to the exported model is floating point. For training in Brevitas, these values were normalized between 0 and 1.0 and so the exported model expects floating point values as input. \n", + "This means we are in scenario 2. In the next section we will develop a custom step for the FINN builder flow to add preprocessing to our network.\n", + "\n", + "But before we move to the next section, let's take a look at the last two nodes in the graph that were not converted to HLS layers." + ] + }, + { + "cell_type": "markdown", + "id": "f9c2696b", + "metadata": {}, + "source": [ + "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", + "\n", + "Our example is a network for image classification, so that we know that the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "\n", + "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " + ] + }, + { + "cell_type": "markdown", + "id": "4fc8fbf5", + "metadata": {}, + "source": [ + "These pre-processing and post-processing steps are network dependent and we will need to write **custom steps** that can then be executed using the FINN builder tool.\n", + "\n", + "In the next section we will first look into how a standard build step inside FINN looks like and then we will write our own custom steps for pre- and post-processing and add them to the builder configuration." + ] + }, { "cell_type": "markdown", "id": "7e561a91", @@ -173,6 +308,14 @@ "## Build steps " ] }, + { + "cell_type": "markdown", + "id": "fb18b21d", + "metadata": {}, + "source": [ + "The following steps are executed when using the `estimates_only`-flow." + ] + }, { "cell_type": "code", "execution_count": null, @@ -213,6 +356,14 @@ "showSrc(build_dataflow_steps.step_tidy_up)" ] }, + { + "cell_type": "markdown", + "id": "2809f6a7", + "metadata": {}, + "source": [ + "Each steps gets the model and the build configuration as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." + ] + }, { "cell_type": "markdown", "id": "e9c2c97f", @@ -221,6 +372,14 @@ "### How to make a custom build step " ] }, + { + "cell_type": "markdown", + "id": "537a44e7", + "metadata": {}, + "source": [ + "When writing our own custom steps, we use the same pattern. See below the code for the pre-processing for the example network." + ] + }, { "cell_type": "code", "execution_count": null, @@ -230,6 +389,8 @@ "source": [ "from finn.util.pytorch import ToTensor\n", "from qonnx.transformation.merge_onnx_models import MergeONNXModels\n", + "from qonnx.core.modelwrapper import ModelWrapper\n", + "from qonnx.core.datatype import DataType\n", "\n", "def custom_step_add_pre_proc(model: ModelWrapper, cfg: build.DataflowBuildConfig):\n", " ishape = model.get_tensor_shape(model.graph.input[0].name)\n", @@ -239,11 +400,22 @@ " preproc_model = ModelWrapper(\"preproc.onnx\")\n", " # set input finn datatype to UINT8\n", " preproc_model.set_tensor_datatype(preproc_model.graph.input[0].name, DataType[\"UINT8\"])\n", + " # merge pre-processing onnx model with cnv model (passed as input argument)\n", " model = model.transform(MergeONNXModels(preproc_model))\n", " return model\n", " " ] }, + { + "cell_type": "markdown", + "id": "7a6798aa", + "metadata": {}, + "source": [ + "In the next step we can modify the builder configuration to execute a custom sequence of builder steps, including the newly implemented pre-processing custom step.\n", + "\n", + "For that we create a list `build_steps` which contains next to the standard steps from the `estimate_only` flow, also the new custom step to add the pre-processing. This list then gets passed in the build configuration." + ] + }, { "cell_type": "code", "execution_count": null, @@ -254,11 +426,11 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_pre_proc\"\n", + "output_dir = \"output_pre_proc\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -275,7 +447,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -298,6 +470,24 @@ "build.build_dataflow_cfg(model_file, cfg_estimates)" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "51b7dbd5", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_pre_proc/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "4690049f", + "metadata": {}, + "source": [ + "An intermediate .onnx file after the execution of the custom step was automatically created, let's have a look at the graph." + ] + }, { "cell_type": "code", "execution_count": null, @@ -308,6 +498,16 @@ "showInNetron(build_dir+\"/output_pre_proc/intermediate_models/custom_step_add_pre_proc.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "90c6bef9", + "metadata": {}, + "source": [ + "The graph is in QONNX format and a division by 255 is inserted in the beginning. We can now use the CIFAR-10 images directly as input to the graph and the new `global_in` tensor is UINT8.\n", + "\n", + "You can already have a look on how the intermediate models have changed by modifying the code in the cell above. Before we go into more detail, we will add another custom step to insert the post-processing. In this case this means the insertion of a TopK node." + ] + }, { "cell_type": "code", "execution_count": null, @@ -332,7 +532,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_pre_and_post_proc\"\n", + "output_dir = \"output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -354,7 +554,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -377,16 +577,60 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "95230896", + "metadata": {}, + "outputs": [], + "source": [ + "!ls -t -r output_pre_and_post_proc/intermediate_models" + ] + }, + { + "cell_type": "markdown", + "id": "3a0263b1", + "metadata": {}, + "source": [ + "You can use the code in the cell below to investigate the generated intermediate models. " + ] + }, { "cell_type": "code", "execution_count": null, "id": "44127417", "metadata": {}, "outputs": [], + "source": [ + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/custom_step_add_post_proc.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "5cc97505", + "metadata": {}, + "source": [ + "Let's have a look at the model after the conversion to hls, to verify that now all layers are correctly converted." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "63131e3e", + "metadata": {}, + "outputs": [], "source": [ "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/step_convert_to_hls.onnx\")" ] }, + { + "cell_type": "markdown", + "id": "8fd0af6b", + "metadata": {}, + "source": [ + "The model contains now a `Thresholding` layer in the beginning and a `LabelSelect_Batch` layer at the end. Please note, that there is still a `Transpose` node as the first layer of the graph, but we can solve this by converting the input data to the NHWC format before streaming it into the FINN accelerator." + ] + }, { "cell_type": "markdown", "id": "5ffbadd1", From 68726ac8a329473bc183af993eacb66c17a0c88a Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 29 Aug 2023 18:15:58 +0100 Subject: [PATCH 52/77] [NB] Add section about folding configurations to advanced nb --- .../4_advanced_builder_settings.ipynb | 135 ++++++++++++++---- 1 file changed, 104 insertions(+), 31 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 63f69a6385..1e17f640ef 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -11,7 +11,7 @@ "\n", "\"drawing\"\n", "\n", - "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", + "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from a small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", "These implementations offer a good balance of performance and flexibility, but building them by hand is difficult and time-consuming. This is where the FINN compiler comes in: it can build streaming dataflow accelerators from an ONNX description to match the desired throughput." ] }, @@ -62,7 +62,6 @@ "metadata": {}, "outputs": [], "source": [ - "#from finn.util.basic import make_build_dir\n", "from finn.util.visualization import showInNetron, showSrc\n", "import os\n", " \n", @@ -218,7 +217,7 @@ "id": "bccebd0d", "metadata": {}, "source": [ - "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there is layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." + "The analysis of these .onnx files can help us identifying points in the flow in which we might need to intervene and provide the compiler with additional information. When investigating the network after the conversion to HLS layers, we can see that there are layers that were not converted. We can see this by clicking on the different nodes. HLS layers have the module `finn.custom_op.fpgadataflow`." ] }, { @@ -236,7 +235,7 @@ "id": "2719cc09", "metadata": {}, "source": [ - "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized the node will be converted." + "As you can see in the graph, the first two nodes (a MultiThreshold and Transpose node) and the last two nodes (a Mul and Add node) are not converted into HLS layers. FINN currently only converts integer only operations into HLS layers, this means only when the input, output & weights are quantized to integer the node will be converted." ] }, { @@ -285,7 +284,7 @@ "source": [ "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", "\n", - "Our example is a network for image classification, so that we know that the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "Our example is a network for image classification, so the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", "\n", "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " ] @@ -361,7 +360,7 @@ "id": "2809f6a7", "metadata": {}, "source": [ - "Each steps gets the model and the build configuration as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." + "Each steps gets the model (`model: ModelWrapper`) and the build configuration (`cfg: DataflowBuildConfig`) as input arguments. Then a certain sequence of transformations is applied to the model. In some of the steps, verification can be run to ensure that the applied transformations have not changed the behaviour of the network. In the end the modified model is returned." ] }, { @@ -602,7 +601,8 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/custom_step_add_post_proc.onnx\")" + "model_to_investigate = \"custom_step_add_post_proc.onnx\"\n", + "showInNetron(build_dir+\"/output_pre_and_post_proc/intermediate_models/\"+model_to_investigate)" ] }, { @@ -644,7 +644,17 @@ "id": "c164040f", "metadata": {}, "source": [ - "To learn about the influence of folding factors/parallelism in FINN, please have a look at this notebook: " + "The FINN compiler allows the user to implement a network in streaming dataflow architecture, this means every layer is implemented individually and the data is streamed through the accelerator. We can customize each layer for specific performance and resource requirements by adjusting the parallelism and resource type of each layer. In the FINN context we refer to this customization of parallelism in each layer as folding. To learn more details about the influence of folding factors/parallelism in FINN, please have a look at our [folding tutorial](3_folding.ipynb).\n", + "\n", + "In this section, we will look into the interface over which we can influence the customization of each layer using the FINN builder tool: A json file containing the folding configuration." + ] + }, + { + "cell_type": "markdown", + "id": "1299b86d", + "metadata": {}, + "source": [ + "Depending on the invoked step, the FINN compiler can produce or consume a .json file containing the folding configuration for each layer. In the cell below, we will have a look at the automatically generated .json file, which is produced by `step_target_fps_parallelization`. We use this then as starting point to manipulate the folding configuration and feed it back into the builder tool." ] }, { @@ -664,26 +674,28 @@ }, { "cell_type": "markdown", - "id": "ba856c28", + "id": "8de787a7", "metadata": {}, "source": [ - "Hardware configuration for each layer\n", - "\n", - "FIFO depths\n", - "\n", - "Type of memory/compute resources to be used\n", - "\n", - "Parallelism along different dimensions (“PE”, ”SIMD”)\n", - "\n", - "Baked-in, decoupled or external parameters\n", - "\n", - "Influences almost all flows\n", - "\n", - "step_apply_folding_config\n", - "\n", - "Values tuned for performance & footprint\n", - "\n", - "Many additional constraints not visible from .json" + "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", + "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", + "* `ram_style` determines which memory resource will be used for the layer.\n", + " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", + " * `distributed`: LUTRAM will be used\n", + " * `block`: BRAM will be used\n", + " * `ultra`: URAM will be used, if available on the selected board\n", + "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "* `resType`: This is a node attribute for the MVAU layer and can be set to `lut` or `dsp`. Please note that selecting `dsp` will not enable the optimized RTL variant of the MVAU but rather generate HLS code utilizing DSPs, this is not optimal yet but can give an additional parameter for design space exploration.\n", + "* `runtime_writeable_weights`: FINN offers the option to implement the weights as \"runtime writable\", this means you can write the weight values from the driver via an axilite interface." + ] + }, + { + "cell_type": "markdown", + "id": "fd1519fe", + "metadata": {}, + "source": [ + "In the following part of the tutorial, we will use the auto generated json file as starting point to create two new json files which explore the `ram_style` attribute. We will use one of the generated reports from the FINN builder to see the impact of these changes.\n", + "For that, we will extract the total resources from the *estimate_layer_resources.json* report in the following cell." ] }, { @@ -699,12 +711,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "0be3b0e1", + "metadata": {}, + "source": [ + "The FINN compiler estimates the network to use ~500 BRAM blocks and ~100k LUTs." + ] + }, { "cell_type": "markdown", "id": "d4d177dc", "metadata": {}, "source": [ - "You can manually change, here we generate two new folding configurations with either all lutram or all bram" + "We will use the `auto_folding_config.json` and create two folding configuration from that file:\n", + "* All `ram_style` attributes set to `distributed`\n", + "* All `ram_style` attributes set to `block`" ] }, { @@ -714,6 +736,9 @@ "metadata": {}, "outputs": [], "source": [ + "with open(build_dir+\"/output_pre_and_post_proc/auto_folding_config.json\", 'r') as json_file:\n", + " folding_config = json.load(json_file)\n", + "\n", "# Set all ram_style to LUT RAM\n", "for key in folding_config:\n", " if \"ram_style\" in folding_config[key]:\n", @@ -731,6 +756,14 @@ " json.dump(folding_config, jsonFile)" ] }, + { + "cell_type": "markdown", + "id": "0e64a499", + "metadata": {}, + "source": [ + "After generating these files, we will invoke the builder flow. To enable the FINN builder to take the generated folding configuration as input, we will need to set the additional builder argument `folding_config_file` and we will change the `build_steps` to not run `step_target_fps_parallelization`. The build step does not necessarily need to be excluded, but since we pass a separate folding configuration, the output from that step would be overwritten anyways, so we skip it for a faster execution." + ] + }, { "cell_type": "code", "execution_count": null, @@ -741,7 +774,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_all_lutram\"\n", + "output_dir = \"output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -762,7 +795,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", @@ -785,6 +818,14 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "e705767d", + "metadata": {}, + "source": [ + "We can now have a look at the produced model, when clicking on the individual nodes, you can see that all layers have the node attribute `ram_style` set to `distributed`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -808,6 +849,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "55208c70", + "metadata": {}, + "source": [ + "The estimation report shows that BRAM utilization is down to zero and the LUT count went up to around 150k." + ] + }, + { + "cell_type": "markdown", + "id": "11b8430a", + "metadata": {}, + "source": [ + "Let's do the same with the folding configuration which sets all memory resources to use BRAM." + ] + }, { "cell_type": "code", "execution_count": null, @@ -818,7 +875,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_all_bram\"\n", + "output_dir = \"output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -839,7 +896,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", @@ -885,6 +942,22 @@ "print(json.dumps(json_object[\"total\"], indent=1))" ] }, + { + "cell_type": "markdown", + "id": "97f87780", + "metadata": {}, + "source": [ + "The initial implementation already had a high utilization of BRAM, but the estimations went now up to 522 BRAMs while the LUT count went down to ~99k." + ] + }, + { + "cell_type": "markdown", + "id": "e65a8ded", + "metadata": {}, + "source": [ + "You can use this example as a starting point to manipulate the folding configuration yourself. Instead of using the above code, you can also manually open one of the example .json files and set the values differently. Please be aware that the node attributes can not be set to arbitrary values. Especially the folding factors need to fulfil [certain constraints](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer). The other settings for node attributes, can be best looked up in the individual custom operator classes: [e.g. for MVAU](https://github.com/Xilinx/finn/blob/dev/src/finn/custom_op/fpgadataflow/matrixvectoractivation.py#L64)" + ] + }, { "cell_type": "markdown", "id": "4a675834", From 45e8c37faa7d542dddb0a6439f3085aaf83e4c96 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 29 Aug 2023 21:25:17 +0100 Subject: [PATCH 53/77] [nb] Add details about verification section in advanced nb --- .../4_advanced_builder_settings.ipynb | 180 +++++++++++++++--- 1 file changed, 151 insertions(+), 29 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 1e17f640ef..16c4e1a8fa 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -7,8 +7,6 @@ "source": [ "# Advanced Builder settings\n", "\n", - "**Live FINN tutorial:** We recommend clicking **Cell -> Run All** when you start reading this notebook for \"latency hiding\".\n", - "\n", "\"drawing\"\n", "\n", "In this notebook, we'll use the FINN compiler to generate an FPGA accelerator with a streaming dataflow architecture from a small convolutional network trained on CIFAR-10. The key idea in streaming dataflow architectures is to parallelize across layers as well as within layers by dedicating a proportionate amount of compute resources to each layer, illustrated on the figure to the left. You can read more about the general concept in the [FINN](https://arxiv.org/pdf/1612.07119) and [FINN-R](https://dl.acm.org/doi/pdf/10.1145/3242897) papers. This is done by mapping each layer to a Vitis HLS description, parallelizing each layer's implementation to the appropriate degree and using on-chip FIFOs to link up the layers to create the full accelerator.\n", @@ -135,6 +133,8 @@ "metadata": {}, "outputs": [], "source": [ + "## Quick recap on how to setup the default builder flow for resource estimations\n", + "\n", "import finn.builder.build_dataflow as build\n", "import finn.builder.build_dataflow_config as build_cfg\n", "import os\n", @@ -422,6 +422,8 @@ "metadata": {}, "outputs": [], "source": [ + "## Builder flow with custom step for pre-processing\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", @@ -528,14 +530,16 @@ "metadata": {}, "outputs": [], "source": [ + "## Builder flow with custom step for pre-processing and post-processing\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -771,14 +775,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with custom folding configuration\n", + "## folding_config_file = \"folding_config_all_lutram.json\"\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -872,14 +879,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with custom folding configuration\n", + "## folding_config_file = \"folding_config_all_bram.json\"\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", "output_dir = \"output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -966,6 +976,22 @@ "## Additional builder arguments " ] }, + { + "cell_type": "markdown", + "id": "f7012b9a", + "metadata": {}, + "source": [ + "In this section, we will have a peak into additional builder arguments the FINN compiler exposes. We will not be able to cover all but you will be able to have a look at a list and we encourage you to take your time to look into the different options there are to customize the FINN builder configuration." + ] + }, + { + "cell_type": "markdown", + "id": "467d8829", + "metadata": {}, + "source": [ + "We start by enabling the verification flow in the builder. The FINN compiler applies multiple transformations to the model before it gets turned into hardware, so we need to make sure that the functional behavior of the network does not change." + ] + }, { "cell_type": "markdown", "id": "e0c167f4", @@ -974,6 +1000,14 @@ "### Verification steps " ] }, + { + "cell_type": "markdown", + "id": "308d52ba", + "metadata": {}, + "source": [ + "Earlier in the tutorial, we had a look at how build steps are written. When investigating the `step_tidy_up`, we can see that before the changed model is returned a verification step can be run. In the case of `step_tidy_up` it is the step `\"initial python\"` that can be initiated by setting `VerificationStepType.TIDY_UP_PYTHON`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -985,6 +1019,14 @@ "showSrc(build_dataflow_steps.step_tidy_up)" ] }, + { + "cell_type": "markdown", + "id": "2bbb84fb", + "metadata": {}, + "source": [ + "Some of the default build steps have automatic verification enabled, when the corresponding verification step is set." + ] + }, { "cell_type": "code", "execution_count": null, @@ -995,6 +1037,14 @@ "showSrc(build_cfg.VerificationStepType)" ] }, + { + "cell_type": "markdown", + "id": "da1a2b88", + "metadata": {}, + "source": [ + "In the cells below, we will use an example input from the CIFAR-10 data set and use the forward pass in Brevitas to generate a reference output. We save the input as `input.npy` and the reference output as `expected_output.npy`." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1018,6 +1068,14 @@ "np.save(\"expected_output.npy\", output_tensor_npy)" ] }, + { + "cell_type": "markdown", + "id": "d03450e7", + "metadata": {}, + "source": [ + "In the next step we set up the builder flow again, this time we will set the build argument `verify_steps` and pass a list of verification steps." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1025,14 +1083,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## verification steps\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_with_verification\"\n", + "output_dir = \"output_with_verification\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1050,7 +1111,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1067,6 +1128,14 @@ ")" ] }, + { + "cell_type": "markdown", + "id": "1d05b985", + "metadata": {}, + "source": [ + "When execution the code below, the verification will be invoked in the background. After the execution we can check if the verification was successful by investigating the output directory." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1078,6 +1147,61 @@ "build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, + { + "cell_type": "markdown", + "id": "ca1d571d", + "metadata": {}, + "source": [ + "The output directory has now an additional directory called `verification_output`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca74d537", + "metadata": {}, + "outputs": [], + "source": [ + "!ls output_with_verification" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "908ecda4", + "metadata": {}, + "outputs": [], + "source": [ + "!ls output_with_verification/verification_output" + ] + }, + { + "cell_type": "markdown", + "id": "bcbc6f49", + "metadata": {}, + "source": [ + "The directory contains three .npy files. These files are the saved output files from the different verification steps. The suffix indicates if the array matches with the expected output. In our case, the suffix is for all verification steps `_SUCCESS`. Since the outputs are saved as .npy, we can open and investigate the files simply in Python." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a1b6ca9", + "metadata": {}, + "outputs": [], + "source": [ + "verify_initial_python = np.load(\"output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", + "print(\"The output of the verification step after the step_tidy_up is: \" + str(verify_initial_python))" + ] + }, + { + "cell_type": "markdown", + "id": "6558e19e", + "metadata": {}, + "source": [ + "If the generated output does not match the expected output, these files can be used for debugging." + ] + }, { "cell_type": "markdown", "id": "f0b30546", @@ -1109,14 +1233,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## standalone_thresholds = True\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_standalone_thresholds\"\n", + "output_dir = \"output_standalone_thresholds\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1134,7 +1261,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1183,14 +1310,17 @@ "metadata": {}, "outputs": [], "source": [ + "## Build flow with additional builder arguments enabled\n", + "## force_rtl_conv_inp_gen = True\n", + "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_rtl_swg\"\n", + "output_dir = \"output_rtl_swg\"\n", "\n", "#Delete previous run results if exist\n", - "if os.path.exists(estimates_output_dir):\n", - " shutil.rmtree(estimates_output_dir)\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", " print(\"Previous run results deleted!\")\n", "\n", "build_steps = [\n", @@ -1208,7 +1338,7 @@ "]\n", "\n", "cfg_estimates = build.DataflowBuildConfig(\n", - " output_dir = estimates_output_dir,\n", + " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", " target_fps = 1000000,\n", " synth_clk_period_ns = 10.0,\n", @@ -1275,7 +1405,7 @@ "id": "b12ab370", "metadata": {}, "source": [ - "There are attributes that come from the dataclasses-json class: to_dict, to_json, schema, from_json, from_dict. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" ] }, @@ -1315,14 +1445,6 @@ "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1ec10985", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { From e72c9dd0f3274833536c319ce791076811d4989b Mon Sep 17 00:00:00 2001 From: auphelia Date: Thu, 31 Aug 2023 15:50:19 +0100 Subject: [PATCH 54/77] [nb] Clean up advanced nb --- .../4_advanced_builder_settings.ipynb | 179 +++++++++--------- 1 file changed, 86 insertions(+), 93 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 16c4e1a8fa..1136dba9f4 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -36,8 +36,8 @@ "4. [Folding configuration json](#folding_config)\n", "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", - " 2. [Examples for additional builder arguments](#example_args)\n", - " 3. [Other builder arguments](#other_args)" + " 2. [Other builder arguments](#other_args)\n", + " 3. [Examples for additional builder arguments](#example_args)" ] }, { @@ -284,9 +284,9 @@ "source": [ "We have two nodes at the end of the graph that we were not able to convert: a floating poing scalar multiplication and addition. These operations are \"left-over\" from streamlining and cannot be merged into a succeeding thresholding operation. \n", "\n", - "Our example is a network for image classification, so the output is a vector of 10 values that give a probability for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", + "Our example is a network for image classification, so the output is a vector of 10 values that give a predicition score for each of the classes in the CIFAR-10 data set. If we are only interested in the Top-1 result of the classification, we can add a post-processing step which inserts a TopK node in the graph. \n", "\n", - "Since the last two layers are scalar operations, they have the same influence on all probability values in the output vector and we can safely merge them into the TopK node. " + "Since the last two layers are scalar operations, they have the same influence on all predicition scores in the output vector and we can safely merge them into the TopK node. " ] }, { @@ -683,12 +683,13 @@ "source": [ "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", - "* `ram_style` determines which memory resource will be used for the layer.\n", + "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to determine which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", " * `distributed`: LUTRAM will be used\n", " * `block`: BRAM will be used\n", " * `ultra`: URAM will be used, if available on the selected board\n", - "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", + "\n", "* `resType`: This is a node attribute for the MVAU layer and can be set to `lut` or `dsp`. Please note that selecting `dsp` will not enable the optimized RTL variant of the MVAU but rather generate HLS code utilizing DSPs, this is not optimal yet but can give an additional parameter for design space exploration.\n", "* `runtime_writeable_weights`: FINN offers the option to implement the weights as \"runtime writable\", this means you can write the weight values from the driver via an axilite interface." ] @@ -1204,32 +1205,98 @@ }, { "cell_type": "markdown", - "id": "f0b30546", + "id": "4609f94d", "metadata": {}, "source": [ - "### Examples for additional builder arguments " + "### Other builder arguments " ] }, { "cell_type": "markdown", - "id": "ddfb40e4", + "id": "37b6853d", "metadata": {}, "source": [ - "#### Standalone Thresholds" + "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9f6aa29", + "metadata": {}, + "outputs": [], + "source": [ + "# Filter out methods\n", + "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", + "print(\"\\n\".join(builder_args))" + ] + }, + { + "cell_type": "markdown", + "id": "b12ab370", + "metadata": {}, + "source": [ + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", + "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + ] + }, + { + "cell_type": "markdown", + "id": "9aba0493", + "metadata": {}, + "source": [ + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec39b9f2", + "metadata": {}, + "outputs": [], + "source": [ + "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" ] }, { "cell_type": "markdown", - "id": "bddbd686", + "id": "76df000f", "metadata": {}, "source": [ - " picture of im2col + matmul + multithreshold" + "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "caf49f03", + "metadata": {}, + "outputs": [], + "source": [ + "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", + "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + ] + }, + { + "cell_type": "markdown", + "id": "3b98eb65", + "metadata": {}, + "source": [ + "### Examples for additional builder arguments " + ] + }, + { + "cell_type": "markdown", + "id": "0dbdab42", + "metadata": {}, + "source": [ + "#### Standalone Thresholds" ] }, { "cell_type": "code", "execution_count": null, - "id": "de55871e", + "id": "2619ebde", "metadata": {}, "outputs": [], "source": [ @@ -1277,7 +1344,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c143f97a", + "id": "b2e9bc42", "metadata": {}, "outputs": [], "source": [ @@ -1288,7 +1355,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ba36f07b", + "id": "32ae296e", "metadata": {}, "outputs": [], "source": [ @@ -1297,7 +1364,7 @@ }, { "cell_type": "markdown", - "id": "b710fd28", + "id": "074d8253", "metadata": {}, "source": [ "#### RTL Convolutional Input Generator" @@ -1306,7 +1373,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8249280d", + "id": "ab0c4974", "metadata": {}, "outputs": [], "source": [ @@ -1354,7 +1421,7 @@ { "cell_type": "code", "execution_count": null, - "id": "64e83b16", + "id": "19fe4d85", "metadata": {}, "outputs": [], "source": [ @@ -1365,86 +1432,12 @@ { "cell_type": "code", "execution_count": null, - "id": "09c45dcd", + "id": "4c1f1ce9", "metadata": {}, "outputs": [], "source": [ "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" ] - }, - { - "cell_type": "markdown", - "id": "4609f94d", - "metadata": {}, - "source": [ - "### Other builder arguments " - ] - }, - { - "cell_type": "markdown", - "id": "37b6853d", - "metadata": {}, - "source": [ - "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e9f6aa29", - "metadata": {}, - "outputs": [], - "source": [ - "# Filter out methods\n", - "builder_args = [m for m in dir(build_cfg.DataflowBuildConfig) if not m.startswith('_')]\n", - "print(\"\\n\".join(builder_args))" - ] - }, - { - "cell_type": "markdown", - "id": "b12ab370", - "metadata": {}, - "source": [ - "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", - "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" - ] - }, - { - "cell_type": "markdown", - "id": "9aba0493", - "metadata": {}, - "source": [ - "So far, in this notebook, we only looked at configurations up to the generation of estimate reports so far, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec39b9f2", - "metadata": {}, - "outputs": [], - "source": [ - "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" - ] - }, - { - "cell_type": "markdown", - "id": "76df000f", - "metadata": {}, - "source": [ - "You can have a closer look at each step by either using the `showSrc()` function or by accessing the doc string." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "caf49f03", - "metadata": {}, - "outputs": [], - "source": [ - "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", - "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" - ] } ], "metadata": { From 79212877f4818eb322b76066741a7ac31a62a7fb Mon Sep 17 00:00:00 2001 From: auphelia Date: Sat, 2 Sep 2023 17:43:35 +0100 Subject: [PATCH 55/77] [NB] Rework end part of advanced builder tutorial --- .../4_advanced_builder_settings.ipynb | 182 ++++++++++++++++-- 1 file changed, 168 insertions(+), 14 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 1136dba9f4..aa244e4983 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -37,7 +37,7 @@ "5. [Additional builder arguments](#builder_arg)\n", " 1. [Verification steps](#verify)\n", " 2. [Other builder arguments](#other_args)\n", - " 3. [Examples for additional builder arguments](#example_args)" + " 3. [Examples for additional builder arguments & bitfile generation](#example_args)" ] }, { @@ -684,7 +684,7 @@ "As you can see from the printed cell above, the keys in the .json file are the node names of the layers in our network. For each of the layers, some node attributes are listed:\n", "* `PE` and `SIMD` are the folding parameters that determine the parallelism of each layer, depending on the layer they can be set to different values, for details refer to [this table](https://finn-dev.readthedocs.io/en/latest/internals.html#constraints-to-folding-factors-per-layer).\n", "* `mem_mode`: determines if the parameter memory will be implemented as part of the HLS code (`const`) or instantiated separately and connected with the layer over a memory streamer unit (`decoupled`). You can find more details in this part of the documentation: https://finn-dev.readthedocs.io/en/latest/internals.html#matrixvectoractivation-mem-mode . It is also possible to set the mem_mode to external which allows for the implementation for external weights.\n", - "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to determine which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", + "* `ram_style`: when selecting `decoupled` mode, the FINN compiler allows us to choose which memory resource will be used for the layer. The argument `ram_style` is set to the selected memory type:\n", " * `auto`: Vivado will make the decision if the implementation is using LUTRAM or BRAM\n", " * `distributed`: LUTRAM will be used\n", " * `block`: BRAM will be used\n", @@ -1216,7 +1216,8 @@ "id": "37b6853d", "metadata": {}, "source": [ - "Let's have a look at the additional builder arguments. We want to only filter out the FINN specific arguments." + "Next to the enablement of the verification flows, the FINN builder has numerous additional builder arguments to further customize your network. \n", + "Let's have a look at the options for the arguments. We want to only filter out the FINN specific arguments." ] }, { @@ -1236,8 +1237,9 @@ "id": "b12ab370", "metadata": {}, "source": [ - "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. These are not FINN builder specific. Some of the arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part, folding_config_file, ...\n", - "Please have a look here and scroll through the available builder arguments: https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155" + "There are attributes that come from the dataclasses-json class: `to_dict`, `to_json`, `schema`, `from_json`, `from_dict`. This class is used for the implementation of the FINN builder. In this tutorial, we are mainly interested in the FINN specific arguments. \n", + "\n", + "Some of these arguments we have seen already in the Cybersecurity notebook and in this notebook, e.g. target_fps, fpga_part and folding_config_file. In the code of the FINN builder, the function of each builder argument is documents, you can have a look [here](https://github.com/Xilinx/finn/blob/dev/src/finn/builder/build_dataflow_config.py#L155) and scroll through the available builder arguments." ] }, { @@ -1245,7 +1247,9 @@ "id": "9aba0493", "metadata": {}, "source": [ - "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow." + "So far, in this notebook, we only looked at configurations up to the generation of estimate reports, a lot of these builder arguments actually become relevant at a later stage in the FINN flow.\n", + "\n", + "Let's have a look at the default build dataflow steps for the complete FINN flow." ] }, { @@ -1258,6 +1262,15 @@ "print(\"\\n\".join(build_cfg.default_build_dataflow_steps))" ] }, + { + "cell_type": "markdown", + "id": "b9bc5715", + "metadata": {}, + "source": [ + "You can see that after the generation of the estimate reports, the code generation and the ip generation is invoked (`step_hls_codegen` and `step_hls_ipgen`). The FIFO depths are determined and the FIFOs are inserted in the network (`step_set_fifo_depths`), we can then create an IP design of our whole network by stitching the IPs from each layer together (`step_create_stitched_ip`). At this point we have an implementation of the neural network that we can integrate within a bigger FPGA design, we can run performance measurements using simulation (`step_measure_rtlsim_performance`) and out-of-context synthesis (`step_out_of_context_synthesis`) for it.\n", + "The FINN builder also provides automatic system integration for Zynq and Alveo devices, this can be invoked by running `step_synthesize_bitfile`, `step_make_pynq_driver` and `step_deployment_package`." + ] + }, { "cell_type": "markdown", "id": "76df000f", @@ -1274,7 +1287,25 @@ "outputs": [], "source": [ "import finn.builder.build_dataflow_steps as build_dataflow_steps\n", - "print(build_dataflow_steps.step_create_dataflow_partition.__doc__)" + "print(build_dataflow_steps.step_hls_codegen.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c84a9fbc", + "metadata": {}, + "outputs": [], + "source": [ + "showSrc(build_dataflow_steps.step_hls_codegen)" + ] + }, + { + "cell_type": "markdown", + "id": "c249f141", + "metadata": {}, + "source": [ + "This concludes the advanced builder settings tutorial. Below you can find code that can help you investigating more of the builder arguments and invoking the whole flow to generate a bitfile." ] }, { @@ -1282,7 +1313,7 @@ "id": "3b98eb65", "metadata": {}, "source": [ - "### Examples for additional builder arguments " + "### Examples for additional builder arguments & bitfile generation " ] }, { @@ -1293,6 +1324,21 @@ "#### Standalone Thresholds" ] }, + { + "cell_type": "markdown", + "id": "e21ff36f", + "metadata": {}, + "source": [ + "In FINN, convolutions are expressed with three components:\n", + "* An Im2Col operation\n", + "* A matrix multiplication\n", + "* A MultiThreshold operation\n", + "\n", + "When converting these nodes into HLS layers, by default the MatMul and the MultiThreshold gets converted into **one** component called Matrix-Vector-Activation Unit (MVAU). But the FINN compiler allows us to implement the activation separately. This gives an additional possibility for customization because we can adjust the folding parameters of the standalone threshold unit independently. \n", + "\n", + "If you would like to enable this feature, you can set the build argument `standalone_thresholds` to `True`. In the code below this feature is enabled and you can have a look at the generated .onnx file. Please note that you need to uncomment the code first." + ] + }, { "cell_type": "code", "execution_count": null, @@ -1348,8 +1394,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -1359,7 +1405,7 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" + "#showInNetron(build_dir+\"/output_standalone_thresholds/intermediate_models/step_generate_estimate_reports.onnx\")" ] }, { @@ -1370,6 +1416,26 @@ "#### RTL Convolutional Input Generator" ] }, + { + "cell_type": "markdown", + "id": "b85e5ac7", + "metadata": {}, + "source": [ + "Recently, we have worked on the *Operator Hardening* in the FINN compiler. This means that we implement core building blocks in RTL instead of using HLS.\n", + "One of these components is already available in the FINN compiler, you can enable the RTL implementation of the ConvolutionInputGenerator (aka Sliding Window Generator) by setting the build argument `force_rtl_conv_inp_gen` to `True`.\n", + "In the code below this feature is enabled and you can have a look at the generated .onnx file. Please note that you need to uncomment the code first." + ] + }, + { + "cell_type": "markdown", + "id": "2a90b63f", + "metadata": {}, + "source": [ + "
\n", + "Important notice: We are actively working on the integration of RTL components in the FINN flow, the enablement like shown below might change in the future.\n", + "
" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1425,8 +1491,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_estimates);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_estimates);" ] }, { @@ -1436,7 +1502,95 @@ "metadata": {}, "outputs": [], "source": [ - "showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + "#showInNetron(build_dir+\"/output_rtl_swg/intermediate_models/step_generate_estimate_reports.onnx\")" + ] + }, + { + "cell_type": "markdown", + "id": "601eb5f8", + "metadata": {}, + "source": [ + "#### Run the whole flow" + ] + }, + { + "cell_type": "markdown", + "id": "42aa929b", + "metadata": {}, + "source": [ + "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take up to an hour. Please note that you need to uncomment the code first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4efd46f4", + "metadata": {}, + "outputs": [], + "source": [ + "## Build flow with hardware build\n", + "\n", + "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", + "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", + "\n", + "output_dir = \"output_bitfile\"\n", + "\n", + "#Delete previous run results if exist\n", + "if os.path.exists(output_dir):\n", + " shutil.rmtree(output_dir)\n", + " print(\"Previous run results deleted!\")\n", + "\n", + "build_steps = [\n", + " custom_step_add_pre_proc,\n", + " custom_step_add_post_proc,\n", + " \"step_qonnx_to_finn\",\n", + " \"step_tidy_up\",\n", + " \"step_streamline\",\n", + " \"step_convert_to_hls\",\n", + " \"step_create_dataflow_partition\",\n", + " \"step_target_fps_parallelization\",\n", + " \"step_apply_folding_config\",\n", + " \"step_minimize_bit_width\",\n", + " \"step_generate_estimate_reports\",\n", + " \"step_hls_codegen\",\n", + " \"step_hls_ipgen\",\n", + " \"step_set_fifo_depths\",\n", + " \"step_create_stitched_ip\",\n", + " \"step_measure_rtlsim_performance\",\n", + " \"step_out_of_context_synthesis\",\n", + " \"step_synthesize_bitfile\",\n", + " \"step_make_pynq_driver\",\n", + " \"step_deployment_package\",\n", + "]\n", + "\n", + "cfg_build = build.DataflowBuildConfig(\n", + " output_dir = output_dir,\n", + " mvau_wwidth_max = 80,\n", + " target_fps = 1000000,\n", + " synth_clk_period_ns = 10.0,\n", + " fpga_part = \"xc7z020clg400-1\",\n", + " steps = build_steps,\n", + " generate_outputs=[\n", + " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", + " build_cfg.DataflowOutputType.STITCHED_IP,\n", + " build_cfg.DataflowOutputType.RTLSIM_PERFORMANCE,\n", + " build_cfg.DataflowOutputType.OOC_SYNTH,\n", + " build_cfg.DataflowOutputType.BITFILE,\n", + " build_cfg.DataflowOutputType.PYNQ_DRIVER,\n", + " build_cfg.DataflowOutputType.DEPLOYMENT_PACKAGE,\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c7ff6c19", + "metadata": {}, + "outputs": [], + "source": [ + "%%time\n", + "build.build_dataflow_cfg(model_file, cfg_build);" ] } ], From 3295c9bdd60fa1e8a99ae32de456e84ff7decda6 Mon Sep 17 00:00:00 2001 From: auphelia Date: Sat, 2 Sep 2023 22:30:47 +0100 Subject: [PATCH 56/77] [nb] Comment last build flow run --- notebooks/advanced/4_advanced_builder_settings.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index aa244e4983..8e0e3ef8cf 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -1518,7 +1518,7 @@ "id": "42aa929b", "metadata": {}, "source": [ - "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take up to an hour. Please note that you need to uncomment the code first." + "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take over an hour. Please note that you need to uncomment the code first." ] }, { @@ -1566,7 +1566,7 @@ "cfg_build = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 100,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1589,8 +1589,8 @@ "metadata": {}, "outputs": [], "source": [ - "%%time\n", - "build.build_dataflow_cfg(model_file, cfg_build);" + "#%%time\n", + "#build.build_dataflow_cfg(model_file, cfg_build);" ] } ], From c7cbe5e5f478fe73caf7aa3c1ffac53a519dc33e Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 10:27:51 +0100 Subject: [PATCH 57/77] [nb] Update final build flow --- .../4_advanced_builder_settings.ipynb | 40 +++++++++++++++---- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 8e0e3ef8cf..38bc19a6ca 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -154,7 +154,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = estimates_output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_cfg.estimate_only_dataflow_steps,\n", @@ -450,7 +450,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -559,7 +559,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1114,7 +1114,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " steps = build_steps,\n", @@ -1376,7 +1376,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " standalone_thresholds = True,\n", @@ -1473,7 +1473,7 @@ "cfg_estimates = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 1000000,\n", + " target_fps = 10000,\n", " synth_clk_period_ns = 10.0,\n", " fpga_part = \"xc7z020clg400-1\",\n", " force_rtl_conv_inp_gen = True,\n", @@ -1521,6 +1521,24 @@ "The code below can be used to invoke the full builder flow and obtain more output products, be aware that this runs synthesis and bitfile generation and it might take over an hour. Please note that you need to uncomment the code first." ] }, + { + "cell_type": "markdown", + "id": "ffa2a352", + "metadata": {}, + "source": [ + "For an optimized design, we download the folding configuration for cnv-w2a2 on the Pynq-Z1 board from [finn-examples](https://github.com/Xilinx/finn-examples). And will pass it to the build flow. Please also note below that we now pass the board as argument to the builder (`board = \"Pynq-Z1\"`) instead of just the fpga part. This time we will select all possible outputs to generate. Please be aware that running the full build might take a few hours." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "765e5ee7", + "metadata": {}, + "outputs": [], + "source": [ + "!wget https://raw.githubusercontent.com/Xilinx/finn-examples/main/build/bnn-pynq/folding_config/cnv-w2a2_folding_config.json" + ] + }, { "cell_type": "code", "execution_count": null, @@ -1528,6 +1546,11 @@ "metadata": {}, "outputs": [], "source": [ + "import finn.builder.build_dataflow as build\n", + "import finn.builder.build_dataflow_config as build_cfg\n", + "import os\n", + "import shutil\n", + "\n", "## Build flow with hardware build\n", "\n", "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", @@ -1566,9 +1589,10 @@ "cfg_build = build.DataflowBuildConfig(\n", " output_dir = output_dir,\n", " mvau_wwidth_max = 80,\n", - " target_fps = 100,\n", " synth_clk_period_ns = 10.0,\n", - " fpga_part = \"xc7z020clg400-1\",\n", + " folding_config_file = \"cnv-w2a2_folding_config.json\",\n", + " board = \"Pynq-Z1\",\n", + " shell_flow_type = build_cfg.ShellFlowType.VIVADO_ZYNQ,\n", " steps = build_steps,\n", " generate_outputs=[\n", " build_cfg.DataflowOutputType.ESTIMATE_REPORTS,\n", From ed163af32f0a43382f19145138432a042840bc55 Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 10:37:34 +0100 Subject: [PATCH 58/77] [Tests] Integrate advanced notebook into test suite --- tests/notebooks/test_jupyter_notebooks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/notebooks/test_jupyter_notebooks.py b/tests/notebooks/test_jupyter_notebooks.py index c2542380f1..e1415b9066 100644 --- a/tests/notebooks/test_jupyter_notebooks.py +++ b/tests/notebooks/test_jupyter_notebooks.py @@ -21,6 +21,7 @@ pytest.param(notebook_advanced_dir + "1_custom_transformation_pass.ipynb"), pytest.param(notebook_advanced_dir + "2_custom_op.ipynb"), pytest.param(notebook_advanced_dir + "3_folding.ipynb"), + pytest.param(notebook_advanced_dir + "4_advanced_builder_settings.ipynb"), ] cyber_notebooks = [ From 2d42e9b8650942aad6a52fb7378548238fcc43ff Mon Sep 17 00:00:00 2001 From: auphelia Date: Mon, 18 Sep 2023 11:01:15 +0100 Subject: [PATCH 59/77] [NBs] Make paths in advanced notebook absolute for testing --- notebooks/advanced/4_advanced_builder_settings.ipynb | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 38bc19a6ca..4af48ac233 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -190,7 +190,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_estimates_only/intermediate_models" + "!ls -t -r {build_dir}/output_estimates_only/intermediate_models" ] }, { @@ -478,7 +478,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_pre_proc/intermediate_models" + "!ls -t -r {build_dir}/output_pre_proc/intermediate_models" ] }, { @@ -587,7 +587,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls -t -r output_pre_and_post_proc/intermediate_models" + "!ls -t -r {build_dir}/output_pre_and_post_proc/intermediate_models" ] }, { @@ -1163,7 +1163,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls output_with_verification" + "!ls {build_dir}/output_with_verification" ] }, { @@ -1173,7 +1173,7 @@ "metadata": {}, "outputs": [], "source": [ - "!ls output_with_verification/verification_output" + "!ls {build_dir}/output_with_verification/verification_output" ] }, { From bbda540140427aa1d43a7f78c7e79332bc4e7bbe Mon Sep 17 00:00:00 2001 From: johnnoel Date: Fri, 22 Sep 2023 16:03:46 +0100 Subject: [PATCH 60/77] Update .Xilinx messaging --- docker/finn_entrypoint.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/finn_entrypoint.sh b/docker/finn_entrypoint.sh index b441c9359a..6b33a4c9bc 100644 --- a/docker/finn_entrypoint.sh +++ b/docker/finn_entrypoint.sh @@ -118,6 +118,7 @@ if [ -d "$FINN_ROOT/.Xilinx" ]; then mkdir "$HOME/.Xilinx" if [ -f "$FINN_ROOT/.Xilinx/HLS_init.tcl" ]; then cp "$FINN_ROOT/.Xilinx/HLS_init.tcl" "$HOME/.Xilinx/" + gecho "Found HLS_init.tcl and copied to $HOME/.Xilinx/HLS_init.tcl" else yecho "Unable to find $FINN_ROOT/.Xilinx/HLS_init.tcl" fi @@ -125,14 +126,13 @@ if [ -d "$FINN_ROOT/.Xilinx" ]; then if [ -f "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" ]; then mkdir "$HOME/.Xilinx/Vivado/" cp "$FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" "$HOME/.Xilinx/Vivado/" + gecho "Found Vivado_init.tcl and copied to $HOME/.Xilinx/Vivado/Vivado_init.tcl" else yecho "Unable to find $FINN_ROOT/.Xilinx/Vivado/Vivado_init.tcl" fi else - yecho "Unable to find $FINN_ROOT/.Xilinx" - yecho "Functionality dependent on beta devices will not be available." - yecho "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl " - yecho "are set correctly and mounted into the Docker container." + echo "If you need to enable a beta device, ensure .Xilinx/HLS_init.tcl and/or .Xilinx/Vivado/Vivado_init.tcl are set correctly and mounted" + echo "See https://docs.xilinx.com/r/en-US/ug835-vivado-tcl-commands/Tcl-Initialization-Scripts" fi export PATH=$PATH:$HOME/.local/bin From 161544ae8765b6fe29ef37e5184ab8eca6eee7a1 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Fri, 29 Sep 2023 11:16:57 +0100 Subject: [PATCH 61/77] Move successful archive step to parallel stage instead of post --- docker/jenkins/Jenkinsfile | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index f4f0533c3f..1f86ac1ef6 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -69,6 +69,9 @@ pipeline { // Use an env variable to help collect test results later in pipeline env.SANITY_UT = "SUCCESS" + + // Archive coverage report if successful + archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") } } } @@ -97,6 +100,9 @@ pipeline { // Use an env variable to help collect test results later in pipeline env.FPGADATAFLOW_RESULT = "SUCCESS" + + // Archive coverage report if successful + archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") } } } @@ -729,9 +735,6 @@ pipeline { archiveArtifacts artifacts: "reports/*.xml" archiveArtifacts artifacts: "reports/*.html" - archiveSuccessfulStage(env.SANITY_UT, "coverage_sanity_ut") - archiveSuccessfulStage(env.FPGADATAFLOW_RESULT, "coverage_fpgadataflow") - // Plot what XML files were created during the test run junit 'reports/*.xml' } From dd7806eff7b80212440d115886f16c26773de1a6 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 2 Oct 2023 16:37:06 +0100 Subject: [PATCH 62/77] [CI] Append a space to FINN_DOCKER_EXTRA to avoid malformed docker commands Jenkins unexpectedly trims trailing spaces from env variables. This leads to badly formed inputs for docker. Appending an extra space solves this issues and causes no further problems. --- run-docker.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/run-docker.sh b/run-docker.sh index c24dcec724..8df03636bb 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -100,6 +100,9 @@ SCRIPTPATH=$(dirname "$SCRIPT") DOCKER_INTERACTIVE="" +# Catch FINN_DOCKER_EXTRA options being passed in without a trailing space +FINN_DOCKER_EXTRA+=" " + if [ "$1" = "test" ]; then gecho "Running test suite (all tests)" DOCKER_CMD="python setup.py test" From 56b155fb60651ac8d9bf1d68603808ce78bb0fee Mon Sep 17 00:00:00 2001 From: johnnoel Date: Thu, 5 Oct 2023 15:31:39 +0100 Subject: [PATCH 63/77] [CI] Address PR comments --- docker/jenkins/Jenkinsfile | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 1f86ac1ef6..2d7ea5e918 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -126,7 +126,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Delete any build files from a previous build - sh "rm -rf ${env.FINN_HOST_BUILD_DIR}/*" + cleanPreviousBuildFiles(env.FINN_HOST_BUILD_DIR) // Pass in the marker to run with pytest and the XML test results filename runDockerPytestWithMarker(env.TEST_NAME, "${env.TEST_NAME}", '') @@ -310,7 +310,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "rm -rf ${env.BOARD}*" + cleanPreviousBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -358,7 +358,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_PynqZ1_zip" @@ -409,7 +409,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -458,7 +458,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "sanity_${env.BOARD}_zip" @@ -510,7 +510,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "rm -rf ${env.BOARD}*" + cleanPreviousBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -558,7 +558,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "PynqZ1_zip" @@ -609,7 +609,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -658,7 +658,7 @@ pipeline { catchError(stageResult: 'FAILURE') { script { // Clean any files from a previous run - sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${env.BOARD}*" + cleanPreviousBoardBuildFiles("${env.BOARD}*") // Get the test files unstash name: "${env.BOARD}_zip" @@ -747,7 +747,17 @@ pipeline { void cleanPreviousBuildFiles(String buildDir) { // Delete any build files from a previous build // Previous build folders affect findCopyZip() and can cause the stage to fail - sh "rm -rf ${buildDir}/*" + if (!buildDir.empty) { + sh "rm -rf ${buildDir}" + } +} + +void cleanPreviousBoardBuildFiles(String boardDir) { + // Delete any board build files + // Specifically used on Pynq boards which require sudo to delete + if (!boardDir.empty) { + sh "echo $USER_CREDENTIALS_PSW | sudo -S rm -rf ${boardDir}*" + } } void createMultiMarkerScript(String markers, String testResultsFilename, String additionalOptions) { @@ -765,7 +775,7 @@ void runDockerPytestWithMarker(String marker, String testResultsFilename, String sh """./run-docker.sh python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html ${additionalOptions}""" } -void findBoardBuildFiles(String board, String searchDir, String dirToFind) { +def findBoardBuildFiles(String searchDir, String dirToFind) { def result = sh(script: "find $searchDir -type d -name \"$dirToFind*\"", returnStdout: true).trim() if (result.empty) { error "Directory containing '$dirToFind' not found." @@ -774,7 +784,7 @@ void findBoardBuildFiles(String board, String searchDir, String dirToFind) { } void findCopyZip(String board, String findDir, String copyDir, String stashName) { - def buildDir = findBoardBuildFiles(board, findDir, "hw_deployment_${board}") + def buildDir = findBoardBuildFiles(findDir, "hw_deployment_${board}") sh "cp -r ${buildDir}/${board} ${copyDir}/" dir(copyDir) { sh "zip -r ${board}.zip ${board}/" @@ -802,7 +812,7 @@ python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${tes sh 'chmod 777 run-tests.sh' } -void isNodeOnline(String labelName) { +def isNodeOnline(String labelName) { Label label = Jenkins.instance.getLabel(labelName) def agentOnline = false From 206737f9bbb2ff90a8ead03422cc7aac2e3dc7ac Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 14:21:55 +0100 Subject: [PATCH 64/77] [Fix] Deprecate pkg-resources and update setuptools --- .isort.cfg | 2 +- .../bnn-pynq/cnv_end2end_example.ipynb | 11 ++++++----- requirements.txt | 2 ++ setup.py | 10 ---------- .../transformation/fpgadataflow/create_stitched_ip.py | 4 +--- .../transformation/fpgadataflow/make_pynq_driver.py | 11 +++++------ src/finn/util/pyverilator.py | 6 ++---- src/finn/util/test.py | 8 ++++---- tests/brevitas/test_brevitas_cnv.py | 8 ++++---- tests/end2end/test_end2end_cybsec_mlp.py | 4 +--- tests/end2end/test_ext_weights.py | 7 ++----- tests/fpgadataflow/test_convert_to_hls_layers_cnv.py | 8 ++++---- .../transformation/streamline/test_streamline_cnv.py | 8 ++++---- .../test_batchnorm_to_affine_bnn_pynq.py | 8 ++++---- tests/transformation/test_qonnx_to_finn.py | 8 ++++---- tests/util/test_build_dataflow.py | 4 +--- 16 files changed, 45 insertions(+), 64 deletions(-) diff --git a/.isort.cfg b/.isort.cfg index 6cfe1c8919..5378b88fad 100644 --- a/.isort.cfg +++ b/.isort.cfg @@ -2,7 +2,7 @@ line_length=88 indent=' ' skip=.tox,.venv,build,dist -known_standard_library=setuptools,pkg_resources +known_standard_library=setuptools known_test=pytest known_first_party=finn sections=FUTURE,STDLIB,TEST,THIRDPARTY,FIRSTPARTY,LOCALFOLDER diff --git a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb index a0dbbf4834..9e9d52e476 100644 --- a/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb +++ b/notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb @@ -516,12 +516,13 @@ "metadata": {}, "outputs": [], "source": [ - "import pkg_resources as pk\n", + "import importlib_resources\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "\n", - "fn = pk.resource_filename(\"finn.qnn-data\", \"cifar10/cifar10-test-data-class3.npz\")\n", - "x = np.load(fn)[\"arr_0\"]\n", + "ref = importlib_resources.files(\"finn.qnn-data\") / \"cifar10/cifar10-test-data-class3.npz\"\n", + "with importlib_resources.as_file(ref) as fn:\n", + " x = np.load(fn)[\"arr_0\"]\n", "x = x.reshape(3, 32,32).transpose(1, 2, 0)\n", "plt.imshow(x)" ] @@ -640,9 +641,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.10.12" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/requirements.txt b/requirements.txt index 1427d4f1ee..e03eff2c98 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ bitstring==3.1.7 clize==5.0.1 dataclasses-json==0.5.7 gspread==3.6.0 +importlib-resources==6.1.0 ipython==8.12.2 numpy==1.24.1 onnx==1.13.0 @@ -13,6 +14,7 @@ psutil==5.9.4 pyscaffold==4.4 scipy==1.10.1 setupext-janitor>=1.1.2 +setuptools==68.2.2 sigtools==4.0.1 toposort==1.7.0 vcdvcd==1.0.5 diff --git a/setup.py b/setup.py index 8fd781462c..7457bb9b38 100644 --- a/setup.py +++ b/setup.py @@ -35,17 +35,7 @@ PyScaffold helps you to put up the scaffold of your new Python project. Learn more under: https://pyscaffold.org/ """ -from pkg_resources import VersionConflict, require from setuptools import setup -import sys - -try: - require("setuptools>=38.3") -except VersionConflict: - print("Error: version of setuptools is too old (<38.3)!") - sys.exit(1) - - if __name__ == "__main__": setup(use_pyscaffold=True) diff --git a/src/finn/transformation/fpgadataflow/create_stitched_ip.py b/src/finn/transformation/fpgadataflow/create_stitched_ip.py index c9db69400b..9a653fe404 100644 --- a/src/finn/transformation/fpgadataflow/create_stitched_ip.py +++ b/src/finn/transformation/fpgadataflow/create_stitched_ip.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import json import multiprocessing as mp import os @@ -499,7 +497,7 @@ def apply(self, model): "[ipx::get_file_groups xilinx_simulationcheckpoint]" % block_name ) # add a rudimentary driver mdd to get correct ranges in xparameters.h later on - example_data_dir = pk.resource_filename("finn.qnn-data", "mdd-data/") + example_data_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/mdd-data" copytree(example_data_dir, vivado_stitch_proj_dir + "/data") ##### diff --git a/src/finn/transformation/fpgadataflow/make_pynq_driver.py b/src/finn/transformation/fpgadataflow/make_pynq_driver.py index 5a0e47c130..6d1fa290b4 100644 --- a/src/finn/transformation/fpgadataflow/make_pynq_driver.py +++ b/src/finn/transformation/fpgadataflow/make_pynq_driver.py @@ -26,9 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import pkg_resources as pk - import numpy as np import os import qonnx @@ -89,8 +86,8 @@ def apply(self, model): model.set_metadata_prop("pynq_driver_dir", pynq_driver_dir) # create the base FINN driver -- same for all accels - driver_base_template = pk.resource_filename( - "finn.qnn-data", "templates/driver/driver_base.py" + driver_base_template = ( + os.environ["FINN_ROOT"] + "/src/finn/qnn-data/templates/driver/driver_base.py" ) driver_base_py = pynq_driver_dir + "/driver_base.py" shutil.copy(driver_base_template, driver_base_py) @@ -268,7 +265,9 @@ def apply(self, model): # add validate.py to run full top-1 test (only for suitable networks) validate_py = pynq_driver_dir + "/validate.py" - validate_template = pk.resource_filename("finn.qnn-data", "templates/driver/validate.py") + validate_template = ( + os.environ["FINN_ROOT"] + "/src/finn/qnn-data/templates/driver/validate.py" + ) shutil.copy(validate_template, validate_py) # generate weight files for runtime-writable layers diff --git a/src/finn/util/pyverilator.py b/src/finn/util/pyverilator.py index 73c8755bfb..318ba7045e 100644 --- a/src/finn/util/pyverilator.py +++ b/src/finn/util/pyverilator.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import numpy as np import os import shutil @@ -94,7 +92,7 @@ def file_to_basename(x): # use custom version of axis infrastructure vh # to enable Verilator to simulate AMD/Xilinx components (e.g DWC) - custom_vh = pk.resource_filename("finn.qnn-data", "verilog/custom_axis_infrastructure.vh") + custom_vh = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/verilog/custom_axis_infrastructure.vh" shutil.copy(custom_vh, verilog_header_dir + "/axis_infrastructure_v1_1_0.vh") for fn in all_verilog_srcs: if fn.endswith(".vh"): @@ -131,7 +129,7 @@ def verilator_fifosim(model, n_inputs, max_iters=100000000): vivado_stitch_proj_dir = prepare_stitched_ip_for_verilator(model) verilog_header_dir = vivado_stitch_proj_dir + "/pyverilator_vh" build_dir = make_build_dir("verilator_fifosim_") - fifosim_cpp_fname = pk.resource_filename("finn.qnn-data", "cpp/verilator_fifosim.cpp") + fifosim_cpp_fname = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/cpp/verilator_fifosim.cpp" with open(fifosim_cpp_fname, "r") as f: fifosim_cpp_template = f.read() assert len(model.graph.input) == 1, "Only a single input stream is supported" diff --git a/src/finn/util/test.py b/src/finn/util/test.py index 1f36486048..5ff884f62d 100644 --- a/src/finn/util/test.py +++ b/src/finn/util/test.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -137,8 +136,9 @@ def get_example_input(topology): onnx_tensor = onnx.load_tensor_from_string(raw_i) return nph.to_array(onnx_tensor) elif topology == "cnv": - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) return input_tensor else: raise Exception("Unknown topology, can't return example input") diff --git a/tests/brevitas/test_brevitas_cnv.py b/tests/brevitas/test_brevitas_cnv.py index c8adafdce9..3950a5b6a7 100644 --- a/tests/brevitas/test_brevitas_cnv.py +++ b/tests/brevitas/test_brevitas_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import os import torch @@ -65,8 +64,9 @@ def test_brevitas_cnv_export_exec(wbits, abits): model = model.transform(RemoveStaticGraphInputs()) assert len(model.graph.input) == 1 assert len(model.graph.output) == 1 - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution diff --git a/tests/end2end/test_end2end_cybsec_mlp.py b/tests/end2end/test_end2end_cybsec_mlp.py index 7b73700909..12267aed47 100644 --- a/tests/end2end/test_end2end_cybsec_mlp.py +++ b/tests/end2end/test_end2end_cybsec_mlp.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import json @@ -83,7 +81,7 @@ def forward(self, x): @pytest.mark.end2end def test_end2end_cybsec_mlp_export(): - assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/") + assets_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/cybsec-mlp" # load up trained net in Brevitas input_size = 593 hidden1 = 64 diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index bef2e0ffa7..c91019ba99 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import os @@ -84,9 +82,8 @@ def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) build_env = get_build_env(build_kind, target_clk_ns) - folding_config_file = pk.resource_filename( - "finn.qnn-data", "test_ext_weights/tfc-w1a1-extw.json" - ) + test_data = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/test_ext_weights" + folding_config_file = test_data + "/tfc-w1a1-extw.json" output_dir = make_build_dir("test_end2end_ext_weights_build") cfg = build.DataflowBuildConfig( output_dir=output_dir, diff --git a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py index c4f3807aa0..c9cb4f0802 100644 --- a/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py +++ b/tests/fpgadataflow/test_convert_to_hls_layers_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import os import torch @@ -86,8 +85,9 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation): model = model.transform(Streamline()) model = model.transform(InferDataLayouts()) # load one of the test vectors - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # generate expected value from streamlined net diff --git a/tests/transformation/streamline/test_streamline_cnv.py b/tests/transformation/streamline/test_streamline_cnv.py index 86e4356ae4..8a91a49278 100644 --- a/tests/transformation/streamline/test_streamline_cnv.py +++ b/tests/transformation/streamline/test_streamline_cnv.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import torch from brevitas.export import export_qonnx @@ -78,8 +77,9 @@ def test_streamline_cnv(size, wbits, abits): model = model.transform(GiveReadableTensorNames()) model = model.transform(RemoveStaticGraphInputs()) # load one of the test vectors - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) # run using FINN-based execution diff --git a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py index b95c26d25f..fd5033674b 100644 --- a/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py +++ b/tests/transformation/test_batchnorm_to_affine_bnn_pynq.py @@ -26,10 +26,9 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -59,8 +58,9 @@ def test_batchnorm_to_affine_cnv_w1a1(): model = model.transform(ConvertQONNXtoFINN()) model = model.transform(InferShapes()) model = model.transform(FoldConstants()) - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 assert input_tensor.shape == (1, 3, 32, 32) input_dict = {"0": input_tensor} diff --git a/tests/transformation/test_qonnx_to_finn.py b/tests/transformation/test_qonnx_to_finn.py index 5bbcb1f9d4..939082b87b 100644 --- a/tests/transformation/test_qonnx_to_finn.py +++ b/tests/transformation/test_qonnx_to_finn.py @@ -27,10 +27,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest +import importlib_resources as importlib import numpy as np import onnx import onnx.numpy_helper as nph @@ -55,8 +54,9 @@ def get_brev_model_and_sample_inputs(model_name, wbits, abits): brev_model = get_test_model_trained(model_name, wbits, abits) elif model_name == "CNV": in_shape = (1, 3, 32, 32) - fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz") - input_tensor = np.load(fn)["arr_0"].astype(np.float32) + ref = importlib.files("finn.qnn-data") / "cifar10/cifar10-test-data-class3.npz" + with importlib.as_file(ref) as fn: + input_tensor = np.load(fn)["arr_0"].astype(np.float32) input_tensor = input_tensor / 255 brev_model = get_test_model_trained(model_name, wbits, abits) elif model_name == "mobilenet": diff --git a/tests/util/test_build_dataflow.py b/tests/util/test_build_dataflow.py index 02136b31a2..3649d6709e 100644 --- a/tests/util/test_build_dataflow.py +++ b/tests/util/test_build_dataflow.py @@ -26,8 +26,6 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import pkg_resources as pk - import pytest import numpy as np @@ -44,7 +42,7 @@ def test_end2end_build_dataflow_directory(): test_dir = make_build_dir("test_build_dataflow_directory_") target_dir = test_dir + "/build_dataflow" - example_data_dir = pk.resource_filename("finn.qnn-data", "build_dataflow/") + example_data_dir = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/build_dataflow" copytree(example_data_dir, target_dir) build_dataflow_directory(target_dir) # check the generated files From aac2704561e21d857e2d9651c284bc324ab6dfbc Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 15:44:39 +0100 Subject: [PATCH 65/77] [Setup] Removing pyscaffold from requirements --- setup.cfg | 2 -- setup.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index fb070a436e..e69ce4f829 100644 --- a/setup.cfg +++ b/setup.cfg @@ -56,8 +56,6 @@ packages = find_namespace: include_package_data = True package_dir = =src -# DON'T CHANGE THE FOLLOWING LINE! IT WILL BE UPDATED BY PYSCAFFOLD! -setup_requires = pyscaffold>=3.2a0,<3.3a0 # The usage of test_requires is discouraged, see `Dependency Management` docs # tests_require = pytest; pytest-cov # Require a specific Python version, e.g. Python 2.7 or >= 3.4 diff --git a/setup.py b/setup.py index 7457bb9b38..9a06632af1 100644 --- a/setup.py +++ b/setup.py @@ -38,4 +38,4 @@ from setuptools import setup if __name__ == "__main__": - setup(use_pyscaffold=True) + setup() From d8a4048d731af9dbf424745d94122d96a2a675ed Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 15:51:47 +0100 Subject: [PATCH 66/77] [Setup] Removing direct calls of setup.py --- docker/quicktest.sh | 8 ++++---- run-docker.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index 466fcfb09d..a990246b49 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,16 +6,16 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - python setup.py test --addopts "-m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL" + pytest -m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" - python setup.py test --addopts "-k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL" + pytest -k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "rtlsim" ]; then echo "Running rtlsim test suite with pytest-parallel" - python setup.py test --addopts "-k rtlsim --workers $PYTEST_PARALLEL" + pytest -k rtlsim --workers $PYTEST_PARALLEL elif [ $1 = "end2end" ]; then echo "Running end2end test suite with no parallelism" - python setup.py test --addopts "-k end2end" + pytest -k end2end elif [ $1 = "full" ]; then echo "Running full test suite, each step with appropriate parallelism" $0 main; diff --git a/run-docker.sh b/run-docker.sh index c24dcec724..cb23595365 100755 --- a/run-docker.sh +++ b/run-docker.sh @@ -102,7 +102,7 @@ DOCKER_INTERACTIVE="" if [ "$1" = "test" ]; then gecho "Running test suite (all tests)" - DOCKER_CMD="python setup.py test" + DOCKER_CMD="pytest" elif [ "$1" = "quicktest" ]; then gecho "Running test suite (non-Vivado, non-slow tests)" DOCKER_CMD="quicktest.sh" From 13313c293eb00d2eb88edcaf68386fdf52152aeb Mon Sep 17 00:00:00 2001 From: auphelia Date: Fri, 6 Oct 2023 17:21:45 +0100 Subject: [PATCH 67/77] [CI/docs] hotfix for Jenkins and docs to not use setup.py --- docker/jenkins/Jenkinsfile | 10 +++++----- docs/finn/developers.rst | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2954877c2a..6be8845ab7 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + sh("bash run-docker.sh pytest -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") + sh("bash run-docker.sh pytest -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") + sh("bash run-docker.sh pytest -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest -mfpgadataflow") } } } diff --git a/docs/finn/developers.rst b/docs/finn/developers.rst index f9252f764c..1e1c48e2b5 100644 --- a/docs/finn/developers.rst +++ b/docs/finn/developers.rst @@ -159,8 +159,8 @@ from the FINN root directory as follows: If you want to run tests in parallel (e.g. to take advantage of a multi-core CPU) you can use: -* pytest-parallel for any rtlsim tests, e.g. `python setup.py test --addopts "-k rtlsim --workers auto"` -* pytest-xdist for anything else, make sure to add `--dist=loadfile` if you have tests in the same file that have dependencies on each other e.g. `python setup.py test --addopts "-k mytest -n auto --dist=loadfile"` +* pytest-parallel for any rtlsim tests, e.g. `pytest -k rtlsim --workers auto` +* pytest-xdist for anything else, make sure to add `--dist=loadfile` if you have tests in the same file that have dependencies on each other e.g. `pytest -k mytest -n auto --dist=loadfile` Finally, the full test suite with appropriate parallelization can be run inside the container by: From b2e04731c238056f49ff820f4ac26bfc99d4a609 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Tue, 10 Oct 2023 16:20:22 +0100 Subject: [PATCH 68/77] [CI] exclude bnn_pynq from quicktest --- docker/quicktest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/quicktest.sh b/docker/quicktest.sh index a990246b49..3684e3a0d4 100755 --- a/docker/quicktest.sh +++ b/docker/quicktest.sh @@ -6,7 +6,7 @@ cd $FINN_ROOT # check if command line argument is empty or not present if [ -z $1 ]; then echo "Running quicktest: not (vivado or slow or board) with pytest-xdist" - pytest -m 'not (vivado or slow or vitis or board or notebooks)' --dist=loadfile -n $PYTEST_PARALLEL + pytest -m 'not (vivado or slow or vitis or board or notebooks or bnn_pynq)' --dist=loadfile -n $PYTEST_PARALLEL elif [ $1 = "main" ]; then echo "Running main test suite: not (rtlsim or end2end) with pytest-xdist" pytest -k 'not (rtlsim or end2end)' --dist=loadfile -n $PYTEST_PARALLEL From 98e94f72e9c2b53dc63d30aa4d3bea466a178c19 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Tue, 10 Oct 2023 17:49:26 +0100 Subject: [PATCH 69/77] [CI] fixing linting, lingering line left behind after resolving merge conflict --- tests/end2end/test_ext_weights.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/end2end/test_ext_weights.py b/tests/end2end/test_ext_weights.py index 25fb5e91e9..2f5f136d3a 100644 --- a/tests/end2end/test_ext_weights.py +++ b/tests/end2end/test_ext_weights.py @@ -80,7 +80,6 @@ def test_end2end_ext_weights_download(): def test_end2end_ext_weights_build(): model_file = get_checkpoint_name("download") load_test_checkpoint_or_skip(model_file) - build_env = get_build_env(build_kind, target_clk_ns) test_data = os.environ["FINN_ROOT"] + "/src/finn/qnn-data/test_ext_weights" folding_config_file = test_data + "/tfc-w1a1-extw.json" output_dir = make_build_dir("test_end2end_ext_weights_build") From 5eb535a7c86a84d7195b8059765ea33f075c761b Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 11 Oct 2023 14:34:10 +0100 Subject: [PATCH 70/77] [NB] make all output paths absolute in advanced notebook --- .../4_advanced_builder_settings.ipynb | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/notebooks/advanced/4_advanced_builder_settings.ipynb b/notebooks/advanced/4_advanced_builder_settings.ipynb index 4af48ac233..e748d85a1c 100644 --- a/notebooks/advanced/4_advanced_builder_settings.ipynb +++ b/notebooks/advanced/4_advanced_builder_settings.ipynb @@ -143,7 +143,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "estimates_output_dir = \"output_estimates_only\"\n", + "estimates_output_dir = build_dir + \"/output_estimates_only\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(estimates_output_dir):\n", @@ -427,7 +427,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_pre_proc\"\n", + "output_dir = build_dir + \"/output_pre_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -535,7 +535,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_pre_and_post_proc\"\n", + "output_dir = build_dir + \"/output_pre_and_post_proc\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -782,7 +782,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_all_lutram\"\n", + "output_dir = build_dir + \"/output_all_lutram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -886,7 +886,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_all_bram\"\n", + "output_dir = build_dir + \"/output_all_bram\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1090,7 +1090,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_with_verification\"\n", + "output_dir = build_dir + \"/output_with_verification\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1191,7 +1191,7 @@ "metadata": {}, "outputs": [], "source": [ - "verify_initial_python = np.load(\"output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", + "verify_initial_python = np.load(build_dir + \"/output_with_verification/verification_output/verify_initial_python_0_SUCCESS.npy\")\n", "print(\"The output of the verification step after the step_tidy_up is: \" + str(verify_initial_python))" ] }, @@ -1352,7 +1352,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_standalone_thresholds\"\n", + "output_dir = build_dir + \"/output_standalone_thresholds\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1449,7 +1449,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_rtl_swg\"\n", + "output_dir = build_dir + \"/output_rtl_swg\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", @@ -1556,7 +1556,7 @@ "model_dir = os.environ['FINN_ROOT'] + \"/notebooks/advanced\"\n", "model_file = model_dir + \"/end2end_cnv_w2a2_export.onnx\"\n", "\n", - "output_dir = \"output_bitfile\"\n", + "output_dir = build_dir + \"/output_bitfile\"\n", "\n", "#Delete previous run results if exist\n", "if os.path.exists(output_dir):\n", From 99e9b7366a5ab0238319c314ef81b1bb9f2d988a Mon Sep 17 00:00:00 2001 From: johnnoel Date: Thu, 12 Oct 2023 11:19:59 +0100 Subject: [PATCH 71/77] [CI] remove reference to unused hack script --- docker/jenkins/Jenkinsfile | 2 -- 1 file changed, 2 deletions(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 2d7ea5e918..47f855f433 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -797,14 +797,12 @@ void createTestScript(String board, String marker, String testResultsFilename) { sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh . ${CONDA_ENV_ACTIVATE} -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else sh """echo "#!/bin/bash . /etc/profile.d/pynq_venv.sh . /etc/profile.d/xrt_setup.sh -python hack_driver_script.py python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ From 07e3b39efc2dd9e82c3ffb239d1de934f564b84d Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 16 Oct 2023 16:12:45 +0100 Subject: [PATCH 72/77] [Tests] fix end2end bnn_pynq cnv transpose --- tests/end2end/test_end2end_bnn_pynq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index d98c06f7d0..b296dad827 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -330,7 +330,7 @@ def deploy_based_on_board(model, model_title, topology, wbits, abits, board): # The FC models contain a Reshape node, which FINN uses, so we therefore have to # reshape the input tensor data to match the reshaping in the model if topology == "cnv": - input_tensor_npy = input_tensor_npy.transpose(0, 3, 2, 1) + input_tensor_npy = input_tensor_npy.transpose(0, 2, 3, 1) else: input_shape = input_tensor_npy.shape new_input_shape = (input_shape[0], np.prod(input_shape[1:])) From 1bf20d50e156ea251f378caee128689346cab2b3 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 18 Oct 2023 14:38:38 +0100 Subject: [PATCH 73/77] [Tests] Disabling end2end_bnn_pynq U250 tests failing due to routing on 2022.2 tools --- tests/end2end/test_end2end_bnn_pynq.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/end2end/test_end2end_bnn_pynq.py b/tests/end2end/test_end2end_bnn_pynq.py index b296dad827..8ac2493d1e 100644 --- a/tests/end2end/test_end2end_bnn_pynq.py +++ b/tests/end2end/test_end2end_bnn_pynq.py @@ -728,6 +728,19 @@ def test_build(self, topology, wbits, abits, board): build_data = get_build_env(board, target_clk_ns) if build_data["kind"] == "alveo" and ("VITIS_PATH" not in os.environ): pytest.skip("VITIS_PATH not set") + if board == "U250" and wbits == 1 and abits == 1: + if topology == "lfc" or topology == "tfc": + pytest.xfail( + "bnn_w" + + str(wbits) + + "_a" + + str(abits) + + "_" + + topology + + "_" + + board + + " test_build currently disabled, see CR-1171874" + ) prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "fifodepth_" + board) model = load_test_checkpoint_or_skip(prev_chkpt_name) model = model.transform(build_data["build_fxn"]) From 4f51ed68a1dcd7dd44007c2cf0c6af05b21cd327 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Mon, 23 Oct 2023 11:00:49 +0100 Subject: [PATCH 74/77] [CI] Use virtual env instead of Conda for Jenkins testing --- docker/jenkins/Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile b/docker/jenkins/Jenkinsfile index 47f855f433..b19cbbccf1 100644 --- a/docker/jenkins/Jenkinsfile +++ b/docker/jenkins/Jenkinsfile @@ -796,7 +796,7 @@ void createTestScript(String board, String marker, String testResultsFilename) { if(board == "U250") sh """echo "#!/bin/bash . /opt/xilinx/xrt/setup.sh -. ${CONDA_ENV_ACTIVATE} +. ${VENV_ACTIVATE} python -m pytest -m ${marker} --junitxml=${testResultsFilename}.xml --html=${testResultsFilename}.html --self-contained-html" >> run-tests.sh """ else From 1093276f33651324eb8e2ed0779a1e1915b7158f Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 24 Oct 2023 18:12:52 +0100 Subject: [PATCH 75/77] [Jenkinsfile] Update Jenkinsfile_CI with pytest command --- docker/jenkins/Jenkinsfile_CI | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index 2954877c2a..f04ea0a49d 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest --addopts -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mstreamline") + sh("bash run-docker.sh pytest --addopts -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mutil") + sh("bash run-docker.sh pytest --addopts -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mtransform") + sh("bash run-docker.sh pytest --addopts -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh python setup.py test --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest --addopts -mfpgadataflow") } } } From 6e86f9c2a1acc465e803d9cd9ecc2ce80c184e70 Mon Sep 17 00:00:00 2001 From: auphelia Date: Tue, 24 Oct 2023 18:15:58 +0100 Subject: [PATCH 76/77] [Jenkins] Delete obsolete option in pytest command --- docker/jenkins/Jenkinsfile_CI | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index f04ea0a49d..6be8845ab7 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -14,31 +14,31 @@ node { parallel firstBranch: { stage('Brevitas export') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mbrevitas_export") + sh("bash run-docker.sh pytest -mbrevitas_export") } } }, secondBranch: { stage('Streamlining transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mstreamline") + sh("bash run-docker.sh pytest -mstreamline") } } }, thirdBranch: { stage('Util functions') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mutil") + sh("bash run-docker.sh pytest -mutil") } } }, fourthBranch: { stage('General transformations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mtransform") + sh("bash run-docker.sh pytest -mtransform") } } }, fifthBranch: { stage('Fpgadataflow transformations and simulations') { dir("${env.WORKSPACE}") { - sh("bash run-docker.sh pytest --addopts -mfpgadataflow") + sh("bash run-docker.sh pytest -mfpgadataflow") } } } From bd7f3b3b1a8fd29482caf3f027414ce8d1b2a619 Mon Sep 17 00:00:00 2001 From: johnnoel Date: Wed, 25 Oct 2023 16:03:18 +0100 Subject: [PATCH 77/77] [Jenkins] Add node label to Jenkinsfile_CI to target specific machine if available --- docker/jenkins/Jenkinsfile_CI | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/jenkins/Jenkinsfile_CI b/docker/jenkins/Jenkinsfile_CI index 6be8845ab7..5e7d5f1475 100644 --- a/docker/jenkins/Jenkinsfile_CI +++ b/docker/jenkins/Jenkinsfile_CI @@ -1,4 +1,4 @@ -node { +node('finn-build || built-in') { def app stage('Clone repository') { /* Let's make sure we have the repository cloned to our workspace */