Skip to content

Commit

Permalink
DynMM: use inner dim as MW and MH
Browse files Browse the repository at this point in the history
Signed-off-by: aziz bahri <[email protected]>
  • Loading branch information
aziz bahri committed Dec 16, 2024
1 parent 4cffe7e commit 251d595
Showing 1 changed file with 6 additions and 4 deletions.
10 changes: 6 additions & 4 deletions src/finn/transformation/fpgadataflow/convert_to_hw_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1495,13 +1495,13 @@ def apply(self, model):
wdt = model.get_tensor_datatype(mm_weight)
if idt.is_integer() and wdt.is_integer():
mm_output = n.output[0]
# if mm_weight is not constant, skip node
# if mm_weight is not constant, skip node
if model.get_initializer(mm_weight) is None:
# TODO: AB: Hack for dynamic MM
# Assume that the weight tensor is the same as the input tensor B
inp_B = model.get_tensor_shape(mm_weight)
mh = int(inp_B[2])
mw = int(inp_B[1])
mh = int(inp_B[-1])
mw = int(inp_B[-2])
else:
W = model.get_initializer(mm_weight)
# extract weight shape, note that ONNX and finn-hlslib
Expand Down Expand Up @@ -1594,7 +1594,9 @@ def apply(self, model):
MW=mw,
MH=mh,
SIMD=simd,
N_VECTORS=mm_in_shape[1], # Height of the input tensor A for dynamic MVAU
N_VECTORS=mm_in_shape[
1
], # Height of the input tensor A for dynamic MVAU
PE=pe,
inputDataType=idt.name,
weightDataType=wdt.name,
Expand Down

0 comments on commit 251d595

Please sign in to comment.