Skip to content

Commit

Permalink
format code
Browse files Browse the repository at this point in the history
  • Loading branch information
leslie-fang-intel committed Mar 4, 2025
1 parent c89a731 commit 6c3710b
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 2 deletions.
4 changes: 3 additions & 1 deletion test/integration/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,9 @@ def _int4wo_api(mod, use_hqq=False):
and TORCH_VERSION_AT_LEAST_2_6
):
quantize_(
mod, int4_weight_only(layout=Int4CPULayout(), use_hqq=use_hqq), set_inductor_config=False
mod,
int4_weight_only(layout=Int4CPULayout(), use_hqq=use_hqq),
set_inductor_config=False,
)
unwrap_tensor_subclass(mod)
elif TORCH_VERSION_AT_LEAST_2_4:
Expand Down
7 changes: 6 additions & 1 deletion test/quantization/test_quant_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,12 @@ def test_int4wo_cpu(self, dtype, x_dim, use_hqq):
example_inputs = (example_inputs[0].unsqueeze(0),)

with torch.no_grad():
quantize_(m, int4_weight_only(group_size=32, layout=Int4CPULayout(), use_hqq=use_hqq))
quantize_(
m,
int4_weight_only(
group_size=32, layout=Int4CPULayout(), use_hqq=use_hqq
),
)
# ensure the expected op is in the code
_, code = torch._inductor.utils.run_and_get_code(
torch.compile(m, fullgraph=True, dynamic=True),
Expand Down

0 comments on commit 6c3710b

Please sign in to comment.