diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 92ff6c02d82..b94000b727b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -72,7 +72,7 @@ repos: name: Unused noqa - repo: https://github.com/pycqa/isort - rev: 5.13.2 + rev: 6.0.1 hooks: - id: isort exclude: | @@ -101,7 +101,7 @@ repos: )$ - repo: https://github.com/psf/black.git - rev: 24.10.0 + rev: 25.1.0 hooks: - id: black files: (.*\.py)$ @@ -130,7 +130,7 @@ repos: )$ - repo: https://github.com/codespell-project/codespell - rev: v2.3.0 + rev: v2.4.1 hooks: - id: codespell args: [-w] @@ -149,7 +149,7 @@ repos: )$ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.11.4 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix, --no-cache] diff --git a/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/README.md b/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/README.md index abe73321134..c8cb525b1f4 100644 --- a/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/README.md +++ b/examples/pytorch/nlp/huggingface_models/text-classification/quantization/ptq_dynamic/fx/README.md @@ -105,7 +105,7 @@ We also upstreamed several INT8 models into HuggingFace [model hub](https://hugg 2. User specifies fp32 'model', calibration dataset 'q_dataloader' and a custom "eval_func" which encapsulates the evaluation dataset and metrics by itself. ## 2. Code Prepare -We update `run_glue.py` like belows: +We update `run_glue.py` like below: ```python trainer = QuestionAnsweringTrainer( diff --git a/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle.py b/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle.py index eac4ce343e0..858473791bd 100644 --- a/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle.py +++ b/neural_compressor/adaptor/torch_utils/layer_wise_quant/modified_pickle.py @@ -483,7 +483,7 @@ def clear_memo(self): The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when re-using picklers. + useful when reusing picklers. """ self.memo.clear() diff --git a/neural_compressor/common/utils/utility.py b/neural_compressor/common/utils/utility.py index 9285864a14c..e585c435dc3 100644 --- a/neural_compressor/common/utils/utility.py +++ b/neural_compressor/common/utils/utility.py @@ -108,13 +108,13 @@ def __init__(self): max_extension_support = cpuid.get_max_extension_support() if max_extension_support >= 7: ecx = cpuid._run_asm( - b"\x31\xC9", # xor ecx, ecx - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret + b"\x31\xc9", # xor ecx, ecx + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret ) self._vnni = bool(ecx & (1 << 11)) eax = cpuid._run_asm( - b"\xB9\x01\x00\x00\x00", # mov ecx, 1 - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret + b"\xb9\x01\x00\x00\x00", # mov ecx, 1 + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret ) self._bf16 = bool(eax & (1 << 5)) self._info = info diff --git a/neural_compressor/tensorflow/utils/utility.py b/neural_compressor/tensorflow/utils/utility.py index 44a7af398a1..279f091a63a 100644 --- a/neural_compressor/tensorflow/utils/utility.py +++ b/neural_compressor/tensorflow/utils/utility.py @@ -220,13 +220,13 @@ def __init__(self): max_extension_support = cpuid.get_max_extension_support() if max_extension_support >= 7: ecx = cpuid._run_asm( - b"\x31\xC9", # xor ecx, ecx - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret + b"\x31\xc9", # xor ecx, ecx + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret ) self._vnni = bool(ecx & (1 << 11)) eax = cpuid._run_asm( - b"\xB9\x01\x00\x00\x00", # mov ecx, 1 - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret + b"\xb9\x01\x00\x00\x00", # mov ecx, 1 + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret ) self._bf16 = bool(eax & (1 << 5)) if "arch" in info and "ARM" in info["arch"]: # pragma: no cover diff --git a/neural_compressor/torch/algorithms/layer_wise/modified_pickle.py b/neural_compressor/torch/algorithms/layer_wise/modified_pickle.py index f336930e718..a2b780efd59 100644 --- a/neural_compressor/torch/algorithms/layer_wise/modified_pickle.py +++ b/neural_compressor/torch/algorithms/layer_wise/modified_pickle.py @@ -482,7 +482,7 @@ def clear_memo(self): The memo is the data structure that remembers which objects the pickler has already seen, so that shared or recursive objects are pickled by reference and not by value. This method is - useful when re-using picklers. + useful when reusing picklers. """ self.memo.clear() diff --git a/neural_compressor/utils/utility.py b/neural_compressor/utils/utility.py index ed12488205e..e934d580474 100644 --- a/neural_compressor/utils/utility.py +++ b/neural_compressor/utils/utility.py @@ -239,13 +239,13 @@ def __init__(self): max_extension_support = cpuid.get_max_extension_support() if max_extension_support >= 7: ecx = cpuid._run_asm( - b"\x31\xC9", # xor ecx, ecx - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xC8" b"\xC3", # mov eax, 7 # cpuid # mov ax, cx # ret + b"\x31\xc9", # xor ecx, ecx + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\x89\xc8" b"\xc3", # mov eax, 7 # cpuid # mov ax, cx # ret ) self._vnni = bool(ecx & (1 << 11)) eax = cpuid._run_asm( - b"\xB9\x01\x00\x00\x00", # mov ecx, 1 - b"\xB8\x07\x00\x00\x00" b"\x0f\xa2" b"\xC3", # mov eax, 7 # cpuid # ret + b"\xb9\x01\x00\x00\x00", # mov ecx, 1 + b"\xb8\x07\x00\x00\x00" b"\x0f\xa2" b"\xc3", # mov eax, 7 # cpuid # ret ) self._bf16 = bool(eax & (1 << 5)) self._info = info diff --git a/test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py b/test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py index 2ebe0e10551..584c5aa3177 100644 --- a/test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py +++ b/test/adaptor/onnxrt_adaptor/test_onnxrt_operators.py @@ -1965,7 +1965,7 @@ def test_fp16(self): weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]] node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]] model = self.build_model(inps, outs, weights, node_infos) - input_data = self.build_test_data(["input1"], [(2)], ["float32"]) + input_data = self.build_test_data(["input1"], [2], ["float32"]) convert_model = self.get_fp16_mixed_precision_model(model) self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()])) self.assertTrue(10 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"])) @@ -2190,7 +2190,7 @@ def test_bf16(self): weights = [["input2", TensorProto.FLOAT, [2], np.random.random((2))]] node_infos = [["test", ["input1", "input2"], ["output"], optype, "com.microsoft"]] model = self.build_model(inps, outs, weights, node_infos) - input_data = self.build_test_data(["input1"], [(2)], ["float32"]) + input_data = self.build_test_data(["input1"], [2], ["float32"]) convert_model = self.get_bf16_mixed_precision_model(model) self.assertTrue("Cast" in set([i.op_type for i in convert_model.nodes()])) self.assertTrue(16 in set([i.attribute[0].i for i in convert_model.nodes() if i.op_type == "Cast"]))