Skip to content

Commit

Permalink
tested threshold
Browse files Browse the repository at this point in the history
  • Loading branch information
fabianandresgrob committed Nov 15, 2023
1 parent 8736bc7 commit f7687e9
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion src/brevitas/graph/gpfq.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def single_layer_update(self):
s = self.layer.quant_weight_scale()

permutation_list = [torch.tensor(range(weight.shape[-1]))]
l1_norm = torch.zeros(weight.shape[:-1])
l1_norm = torch.zeros(weight.shape[:-1], device=dev)
for t in range(weight.shape[-1]):
for group_index in range(self.groups):
U[group_index] += torch.matmul(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@
default=None,
type=int,
help='Accumulator Bit Width for GPFQ in combination with A2Q (default: None)')
add_bool_arg(parser, 'gptq', default=True, help='GPTQ (default: enabled)')
add_bool_arg(parser, 'gptq', default=False, help='GPTQ (default: disabled)')
add_bool_arg(parser, 'gpfq', default=False, help='GPFQ (default: disabled)')
add_bool_arg(
parser, 'gptq-act-order', default=False, help='GPTQ Act order heuristic (default: disabled)')
Expand Down

0 comments on commit f7687e9

Please sign in to comment.