Skip to content

Commit

Permalink
fix line too long issue
Browse files Browse the repository at this point in the history
  • Loading branch information
wenhuach21 committed Oct 22, 2024
1 parent 96fdba6 commit 89aa77d
Showing 1 changed file with 4 additions and 2 deletions.
6 changes: 4 additions & 2 deletions auto_round/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,8 @@ def __init__(self, orig_layer, enable_minmax_tuning=True, enable_norm_bias_tunin
self.params = {}

if self.act_quant:
self.act_quant_func, self.act_data_type = get_quant_func(self.orig_layer.data_type, self.act_bits, self.act_sym)
self.act_quant_func, self.act_data_type = get_quant_func(self.orig_layer.data_type, self.act_bits,
self.act_sym)

self.q_scale_thresh = 1e-5

Expand Down Expand Up @@ -421,7 +422,8 @@ def __init__(self, orig_layer, enable_minmax_tuning=True, enable_norm_bias_tunin
self.act_quant = self.act_bits <= 8
self.weight_quant_func, self.data_type = get_quant_func(self.orig_layer.data_type, self.bits, self.sym)
if self.act_quant:
self.act_quant_func, self.act_data_type = get_quant_func(self.orig_layer.data_type, self.act_bits, self.act_sym)
self.act_quant_func, self.act_data_type = get_quant_func(self.orig_layer.data_type, self.act_bits,
self.act_sym)

self.q_scale_thresh = 1e-5
weight_dtype = torch.float32
Expand Down

0 comments on commit 89aa77d

Please sign in to comment.