Skip to content

Commit

Permalink
Validation checker
Browse files Browse the repository at this point in the history
  • Loading branch information
Giuseppe5 committed Jul 7, 2023
1 parent b57f994 commit b136503
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 17 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def main():
'Weight bit width',
'Act bit width',
'Bias bit width',
'Scaling per output channel',
'Weight quant granularity',
'Act quant type'])
idx = grouped_df['Top 1% quant accuracy'].transform(max) == df['Top 1% quant accuracy']
best_config_df = df[idx]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,23 +142,10 @@ def ptq_torchvision_models(df, args):
for key, value in zip(OPTIONS.keys(), combination):
setattr(config_namespace, key, value)

# Flexml supports only per-tensor scale factors, power of two scale factors
if config_namespace.target_backend == 'flexml' and (
config_namespace.weight_quant_granularity == 'per_channel' or
config_namespace.scale_factor_type == 'float32'):
return
# Merge bias can be enabled only when graph equalization is enabled
if config_namespace.graph_eq_iterations == 0 and config_namespace.graph_eq_merge_bias:
return
# For generic and layerwise backend, we only test for int32 bias bit width
if (config_namespace.target_backend == 'generic' or config_namespace.target_backend
== 'layerwise') and config_namespace.bias_bit_width == 'int16':
return
# If GPTQ is disabled, we do not care about the act_order heuristic
if not config_namespace.gptq and config_namespace.gptq_act_order:
return
if config_namespace.act_param_method == 'mse' and config_namespace.act_quant_percentile != 99.999:
if not validate_config(config_namespace):
return
if config_namespace.act_param_method == 'mse' and config_namespace.act_quant_percentile == 99.999:
config_namespace.act_quant_percentile = None

fp_accuracy = TORCHVISION_TOP1_MAP[config_namespace.model_name]
# Get model-specific configurations about input shapes and normalization
Expand Down Expand Up @@ -273,5 +260,31 @@ def ptq_torchvision_models(df, args):
torchvision_df.to_csv(os.path.join(folder, 'RESULTS_TORCHVISION.csv'), index=False)


def validate_config(config_namespace):
is_valid = True
# Flexml supports only per-tensor scale factors, power of two scale factors
if config_namespace.target_backend == 'flexml' and (
config_namespace.weight_quant_granularity == 'per_channel' or
config_namespace.scale_factor_type == 'float32'):
is_valid = False
# Merge bias can be enabled only when graph equalization is enabled
if config_namespace.graph_eq_iterations == 0 and config_namespace.graph_eq_merge_bias:
is_valid = False
# For generic and layerwise backend, we only test for int32 bias bit width
if (config_namespace.target_backend == 'generic' or config_namespace.target_backend
== 'layerwise') and config_namespace.bias_bit_width == 'int16':
is_valid = False
# If GPTQ is disabled, we do not care about the act_order heuristic
if not config_namespace.gptq and config_namespace.gptq_act_order:
is_valid = False
if config_namespace.act_param_method == 'mse' and config_namespace.act_quant_percentile != 99.999:
is_valid = False
if config_namespace.act_equalization == 'layerwise' and config_namespace.target_backend == 'generic':
is_valid = False
if config_namespace.act_bit_width < config_namespace.weight_bit_width:
is_valid = False
return is_valid


if __name__ == '__main__':
main()

0 comments on commit b136503

Please sign in to comment.