Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ repos:
name: Unused noqa

- repo: https://github.com/pycqa/isort
rev: 5.13.2
rev: 6.0.1
hooks:
- id: isort
exclude: |
Expand Down Expand Up @@ -101,7 +101,7 @@ repos:
)$

- repo: https://github.com/psf/black.git
rev: 24.10.0
rev: 25.1.0
hooks:
- id: black
files: (.*\.py)$
Expand Down Expand Up @@ -130,7 +130,7 @@ repos:
)$

- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
rev: v2.4.1
hooks:
- id: codespell
args: [-w]
Expand All @@ -149,7 +149,7 @@ repos:
)$

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.6
rev: v0.11.4
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ We also upstreamed several INT8 models into HuggingFace [model hub](https://hugg
2. User specifies fp32 'model', calibration dataset 'q_dataloader' and a custom "eval_func" which encapsulates the evaluation dataset and metrics by itself.

## 2. Code Prepare
We update `run_glue.py` like belows:
We update `run_glue.py` like below:

```python
trainer = QuestionAnsweringTrainer(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -483,7 +483,7 @@ def clear_memo(self):
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
useful when reusing picklers.
"""
self.memo.clear()

Expand Down
8 changes: 4 additions & 4 deletions neural_compressor/common/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,13 @@ def __init__(self):
max_extension_support = cpuid.get_max_extension_support()
if max_extension_support >= 7:
ecx = cpuid._run_asm(
b"\x31\xC9", # xor ecx, ecx
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
b"\x31\xc9", # xor ecx, ecx
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
)
self._vnni = bool(ecx & (1 << 11))
eax = cpuid._run_asm(
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
)
self._bf16 = bool(eax & (1 << 5))
self._info = info
Expand Down
8 changes: 4 additions & 4 deletions neural_compressor/tensorflow/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,13 +220,13 @@ def __init__(self):
max_extension_support = cpuid.get_max_extension_support()
if max_extension_support >= 7:
ecx = cpuid._run_asm(
b"\x31\xC9", # xor ecx, ecx
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
b"\x31\xc9", # xor ecx, ecx
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
)
self._vnni = bool(ecx & (1 << 11))
eax = cpuid._run_asm(
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
)
self._bf16 = bool(eax & (1 << 5))
if "arch" in info and "ARM" in info["arch"]: # pragma: no cover
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ def clear_memo(self):
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
useful when reusing picklers.
"""
self.memo.clear()

Expand Down
8 changes: 4 additions & 4 deletions neural_compressor/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,13 +239,13 @@ def __init__(self):
max_extension_support = cpuid.get_max_extension_support()
if max_extension_support >= 7:
ecx = cpuid._run_asm(
b"\x31\xC9", # xor ecx, ecx
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret
b"\x31\xc9", # xor ecx, ecx
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret
)
self._vnni = bool(ecx & (1 << 11))
eax = cpuid._run_asm(
b"\xB9\x01\x00\x00\x00", # mov ecx, 1
b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret
b"\xb9\x01\x00\x00\x00", # mov ecx, 1
b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret
)
self._bf16 = bool(eax & (1 << 5))
self._info = info
Expand Down
4 changes: 2 additions & 2 deletions test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -1965,7 +1965,7 @@ def test_fp16(self):
weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]]
node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]]
model = self.build_model(inps, outs, weights, node_infos)
input_data = self.build_test_data(["input1"], [(2)], ["float32"])
input_data = self.build_test_data(["input1"], [2], ["float32"])
convert_model = self.get_fp16_mixed_precision_model(model)
self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()]))
self.assertTrue(10 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"]))
Expand Down Expand Up @@ -2190,7 +2190,7 @@ def test_bf16(self):
weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]]
node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]]
model = self.build_model(inps, outs, weights, node_infos)
input_data = self.build_test_data(["input1"], [(2)], ["float32"])
input_data = self.build_test_data(["input1"], [2], ["float32"])
convert_model = self.get_bf16_mixed_precision_model(model)
self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()]))
self.assertTrue(16 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"]))
Expand Down
Loading