From e410ff32ac660d9c6188f5a7681955ff31d1cda4 Mon Sep 17 00:00:00 2001 From: Ian Colbert <88047104+i-colbert@users.noreply.github.com> Date: Mon, 6 Nov 2023 11:04:38 -0800 Subject: [PATCH] Fix (examples): adding bias_quant to final linear layer in resnet18 (#720) --- src/brevitas_examples/bnn_pynq/models/resnet.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/brevitas_examples/bnn_pynq/models/resnet.py b/src/brevitas_examples/bnn_pynq/models/resnet.py index f5e23d479..24995c6b6 100644 --- a/src/brevitas_examples/bnn_pynq/models/resnet.py +++ b/src/brevitas_examples/bnn_pynq/models/resnet.py @@ -9,6 +9,7 @@ import brevitas.nn as qnn from brevitas.quant import Int8WeightPerChannelFloat from brevitas.quant import Int8WeightPerTensorFloat +from brevitas.quant import IntBias from brevitas.quant import TruncTo8bit from brevitas.quant_tensor import QuantTensor @@ -120,6 +121,7 @@ def __init__( act_bit_width=8, weight_bit_width=8, round_average_pool=False, + last_layer_bias_quant=IntBias, weight_quant=Int8WeightPerChannelFloat, first_layer_weight_quant=Int8WeightPerChannelFloat, last_layer_weight_quant=Int8WeightPerTensorFloat): @@ -163,6 +165,7 @@ def __init__( num_classes, weight_bit_width=8, bias=True, + bias_quant=last_layer_bias_quant, weight_quant=last_layer_weight_quant) for m in self.modules(): @@ -224,7 +227,8 @@ def quant_resnet18(cfg) -> QuantResNet: act_bit_width = cfg.getint('QUANT', 'ACT_BIT_WIDTH') num_classes = cfg.getint('MODEL', 'NUM_CLASSES') model = QuantResNet( - QuantBasicBlock, [2, 2, 2, 2], + block_impl=QuantBasicBlock, + num_blocks=[2, 2, 2, 2], num_classes=num_classes, weight_bit_width=weight_bit_width, act_bit_width=act_bit_width)