diff --git a/paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h b/paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h index cfd4875c73bf3e..f8f0056ff5829f 100644 --- a/paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h +++ b/paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h @@ -59,12 +59,21 @@ inline std::unordered_map GetAttributeMap( inline void SetActivationAttrs(paddle::framework::OpDesc* fused_op, paddle::framework::OpDesc* act_op, const std::string& act_type) { - if (fused_op->HasAttr("use_mkldnn")) { + bool use_mkldnn = false; + if (fused_op->HasAttr("use_mkldnn") && !fused_op->HasAttr("use_onednn")) { PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn")), common::errors::PreconditionNotMet( - "oneDNN activation fuses require use_mkldnn=True")); + "oneDNN activation fuses require use_onednn=True")); + } + if (fused_op->HasAttr("use_mkldnn")) { + use_mkldnn = PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn")); + } + if (!use_mkldnn && fused_op->HasAttr("use_onednn")) { + PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_onednn")), + common::errors::PreconditionNotMet( + "oneDNN activation fuses require use_onednn=True")); } - fused_op->SetAttr("use_mkldnn", true); + fused_op->SetAttr("use_onednn", true); auto attr_map = GetAttributeMap(act_type); for (const auto& attr : attr_map) { diff --git a/python/paddle/static/quantization/quant_int8_onednn_pass.py b/python/paddle/static/quantization/quant_int8_onednn_pass.py index 2387e8bd9b70f7..909a94427c9718 100644 --- a/python/paddle/static/quantization/quant_int8_onednn_pass.py +++ b/python/paddle/static/quantization/quant_int8_onednn_pass.py @@ -177,7 +177,7 @@ def _transform_to_conv_onednn(self, graph, op_node): conv_op_node.set_attr("Scale_weights", scale_w) conv_op_node.set_attr("Scale_in", scale_in) conv_op_node.set_attr("Scale_out", 1.0) - conv_op_node.set_attr("use_mkldnn", 1) + conv_op_node.set_attr("use_onednn", 1) conv_op_node.set_attr("force_fp32_output", 1) graph.link_to(input_var_node, conv_op_node) graph.link_to(weight_var_node, conv_op_node) @@ -223,7 +223,7 @@ def _transform_to_mul_onednn(self, graph, op_node): mul_op_node.set_attr("scale_y", scale_w) mul_op_node.set_attr("scale_x", scale_in) mul_op_node.set_attr("scale_out", 1.0) - mul_op_node.set_attr("use_mkldnn", 1) + mul_op_node.set_attr("use_onednn", 1) mul_op_node.set_attr("force_fp32_output", 1) graph.link_to(input_var_node, mul_op_node) graph.link_to(weight_var_node, mul_op_node) @@ -248,7 +248,7 @@ def _transform_to_quantize_onednn(self, graph, op_node): op_type='quantize', attrs={ 'data_format': 'ONEDNNLAYOUT', - 'use_mkldnn': 1, + 'use_onednn': 1, 'Scale': scale_in, 'is_negative_input': 1, }, diff --git a/test/deprecated/ir/inference/CMakeLists.txt b/test/deprecated/ir/inference/CMakeLists.txt index 86f03ba89d9850..7fcff5451e2d2c 100755 --- a/test/deprecated/ir/inference/CMakeLists.txt +++ b/test/deprecated/ir/inference/CMakeLists.txt @@ -56,7 +56,7 @@ if(WIN32) endif() -if(NOT WITH_MKLDNN +if(NOT WITH_ONEDNN AND NOT TENSORRT_FOUND AND NOT WITH_GPU) foreach(target ${TEST_INFERENCE_CPU_UT}) diff --git a/test/deprecated/ir/inference/auto_scan_test.py b/test/deprecated/ir/inference/auto_scan_test.py index 752b5f32d011ba..16a8dbf24c8f30 100755 --- a/test/deprecated/ir/inference/auto_scan_test.py +++ b/test/deprecated/ir/inference/auto_scan_test.py @@ -226,7 +226,7 @@ def create_inference_config( self, passes: list[str] | None = None, use_gpu: bool = False, - use_mkldnn: bool = False, + use_onednn: bool = False, use_xpu: bool = False, ir_optim: bool | None = None, ): @@ -238,7 +238,7 @@ def create_inference_config( config.switch_ir_optim(ir_optim) if use_gpu: config.enable_use_gpu(100, 0) - if not use_mkldnn: + if not use_onednn: config.disable_onednn() if use_xpu: config.enable_xpu() @@ -337,7 +337,7 @@ def run_test(self, quant=False, *args, **kwargs): def inference_config_str(self, config) -> str: dic = {} enable_onednn = config.onednn_enabled() - dic["use_mkldnn"] = enable_onednn + dic["use_onednn"] = enable_onednn enable_gpu = config.use_gpu() dic["use_gpu"] = enable_gpu return str(dic) @@ -573,7 +573,7 @@ def run_test(self, quant=False, prog_configs=None): def inference_config_str(self, config) -> str: dic = {} enable_onednn = config.onednn_enabled() - dic["use_mkldnn"] = enable_onednn + dic["use_onednn"] = enable_onednn enable_gpu = config.use_gpu() dic['use_gpu'] = enable_gpu enable_xpu = config.use_xpu() diff --git a/test/deprecated/ir/inference/inference_pass_test.py b/test/deprecated/ir/inference/inference_pass_test.py index 739716382f50bd..acf9b68aefa458 100644 --- a/test/deprecated/ir/inference/inference_pass_test.py +++ b/test/deprecated/ir/inference/inference_pass_test.py @@ -129,7 +129,7 @@ def _get_inference_outs(self, config): return outs def _get_analysis_config( - self, use_gpu=False, use_trt=False, use_mkldnn=False + self, use_gpu=False, use_trt=False, use_onednn=False ): ''' Return a new object of AnalysisConfig. @@ -177,7 +177,7 @@ def _get_analysis_config( if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() - elif use_mkldnn: + elif use_onednn: config.enable_onednn() if self.enable_onednn_bfloat16: config.enable_onednn_bfloat16() @@ -186,7 +186,7 @@ def _get_analysis_config( def check_output(self, atol=1e-3): ''' Check whether calculating on CPU and GPU, enable TensorRT - or disable TensorRT, enable MKLDNN or disable MKLDNN + or disable TensorRT, enable ONEDNN or disable ONEDNN are all the same. ''' self.assertFalse( @@ -201,7 +201,7 @@ def check_output_with_option( ): ''' Check whether calculating on CPU and GPU, enable TensorRT - or disable TensorRT, enable MKLDNN or disable MKLDNN + or disable TensorRT, enable ONEDNN or disable ONEDNN are all the same. ''' place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() @@ -287,13 +287,13 @@ def check_output_with_option( if (not use_gpu) and self.enable_mkldnn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_mkldnn ) ) self.assertTrue( len(paddle_outs) == len(onednn_outputs), - "The number of outputs is different between CPU and MKLDNN. ", + "The number of outputs is different between CPU and ONEDNN. ", ) if self.enable_onednn_bfloat16: @@ -304,7 +304,7 @@ def check_output_with_option( onednn_output, rtol=1e-05, atol=atol, - err_msg='Output has diff between CPU and MKLDNN. ', + err_msg='Output has diff between CPU and ONEDNN. ', ) class TensorRTParam: diff --git a/test/deprecated/ir/inference/quant_dequant_test.py b/test/deprecated/ir/inference/quant_dequant_test.py index 69f2ddfaaa4fda..cb3ddc06b76f13 100644 --- a/test/deprecated/ir/inference/quant_dequant_test.py +++ b/test/deprecated/ir/inference/quant_dequant_test.py @@ -190,7 +190,7 @@ def _get_inference_outs(self, config): return outs def _get_analysis_config( - self, use_gpu=False, use_trt=False, use_mkldnn=False + self, use_gpu=False, use_trt=False, use_onednn=False ): ''' Return a new object of AnalysisConfig. @@ -230,7 +230,7 @@ def _get_analysis_config( if self.enable_tensorrt_varseqlen: config.enable_tensorrt_varseqlen() - elif use_mkldnn: + elif use_onednn: config.enable_onednn() if self.enable_onednn_bfloat16: config.enable_onednn_bfloat16() @@ -241,7 +241,7 @@ def check_output_with_option( ): ''' Check whether calculating on CPU and GPU, enable TensorRT - or disable TensorRT, enable MKLDNN or disable MKLDNN + or disable TensorRT, enable ONEDNN or disable ONEDNN are all the same. ''' place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace() @@ -390,13 +390,13 @@ def check_output_with_option( if (not use_gpu) and self.enable_mkldnn: onednn_outputs = self._get_inference_outs( self._get_analysis_config( - use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn + use_gpu=use_gpu, use_onednn=self.enable_mkldnn ) ) self.assertTrue( len(paddle_outs) == len(onednn_outputs), - "The number of outputs is different between CPU and MKLDNN. ", + "The number of outputs is different between CPU and ONEDNN. ", ) if self.enable_onednn_bfloat16: @@ -407,7 +407,7 @@ def check_output_with_option( onednn_output, rtol=1e-05, atol=atol, - err_msg='Output has diff between CPU and MKLDNN. ', + err_msg='Output has diff between CPU and ONEDNN. ', ) class TensorRTParam: diff --git a/test/deprecated/legacy_test/test_batch_norm_op_deprecated.py b/test/deprecated/legacy_test/test_batch_norm_op_deprecated.py index 9c4abf21fab0d2..bed1666fffa63b 100644 --- a/test/deprecated/legacy_test/test_batch_norm_op_deprecated.py +++ b/test/deprecated/legacy_test/test_batch_norm_op_deprecated.py @@ -303,7 +303,7 @@ def test_with_place(place, data_layout, shape): "epsilon": epsilon, "is_test": False, "data_layout": data_layout, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "fuse_with_relu": self.fuse_with_relu, "use_global_stats": self.use_global_stats, } diff --git a/test/deprecated/legacy_test/test_layer_norm_op_deprecated.py b/test/deprecated/legacy_test/test_layer_norm_op_deprecated.py index d487569028ddea..c097e5b3ce8c70 100644 --- a/test/deprecated/legacy_test/test_layer_norm_op_deprecated.py +++ b/test/deprecated/legacy_test/test_layer_norm_op_deprecated.py @@ -142,10 +142,10 @@ def check_forward_backward( has_scale=True, has_bias=True, y_grad_scale=1.0, - use_mkldnn=False, + use_onednn=False, ): def test_with_place( - place, shape, begin_norm_axis, use_mkldnn=use_mkldnn + place, shape, begin_norm_axis, use_onednn=use_onednn ): # attr epsilon = 0.00001 @@ -221,7 +221,7 @@ def test_with_place( attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": use_mkldnn, + "use_onednn": use_onednn, }, ) # generate backward op_desc diff --git a/test/deprecated/legacy_test/test_program_deprecated.py b/test/deprecated/legacy_test/test_program_deprecated.py index 5efba85dc5c0b0..582feeda7aabb2 100644 --- a/test/deprecated/legacy_test/test_program_deprecated.py +++ b/test/deprecated/legacy_test/test_program_deprecated.py @@ -153,7 +153,7 @@ class TestProgramProto(unittest.TestCase): def test_update_op(self): program = build_program() a = program.desc.serialize_to_string() - program.current_block().ops[0]._set_attr('use_mkldnn', True) + program.current_block().ops[0]._set_attr('use_onednn', True) self.assertTrue(program.desc.need_update()) b = program.desc.serialize_to_string() self.assertFalse(a == b) @@ -230,7 +230,7 @@ def test_program_update(self): hash1 = program.desc.cached_hash_str() id1 = id(program) # change mul's attr - program.current_block().ops[0]._set_attr('use_mkldnn', True) + program.current_block().ops[0]._set_attr('use_onednn', True) program.current_block().ops[0]._set_attr('scale_x', 2.0) hash2 = program.desc.cached_hash_str() id2 = id(program) diff --git a/test/deprecated/mkldnn/CMakeLists.txt b/test/deprecated/mkldnn/CMakeLists.txt index 12dfb5eb93d25b..997e554e2cd9de 100644 --- a/test/deprecated/mkldnn/CMakeLists.txt +++ b/test/deprecated/mkldnn/CMakeLists.txt @@ -1,12 +1,12 @@ file( - GLOB TEST_MKLDNN_LISTS + GLOB TEST_ONEDNN_LISTS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") -string(REPLACE ".py" "" TEST_MKLDNN_LISTS "${TEST_MKLDNN_LISTS}") +string(REPLACE ".py" "" TEST_ONEDNN_LISTS "${TEST_ONEDNN_LISTS}") if(WIN32) message(STATUS "Skip tests unrelated to onednn/mkldnn") elseif(WITH_ONEDNN) - foreach(target ${TEST_MKLDNN_LISTS}) + foreach(target ${TEST_ONEDNN_LISTS}) py_test_modules(${target} MODULES ${target}) set_tests_properties(${target} PROPERTIES LABELS "RUN_TYPE=INFER" TIMEOUT 120) diff --git a/test/deprecated/mkldnn/test_activation_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_activation_mkldnn_op_deprecated.py index 4bfa8ff2d99668..b03853ff809151 100644 --- a/test/deprecated/mkldnn/test_activation_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_activation_mkldnn_op_deprecated.py @@ -56,7 +56,7 @@ class TestONEDNNReluDim2(TestRelu): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -66,7 +66,7 @@ class TestONEDNNRelu_ZeroDim(TestRelu_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -75,7 +75,7 @@ def init_dtype(self): class TestONEDNNRelu6Dim2(TestRelu6): def setUp(self): super().setUp() - self.attrs.update({"use_mkldnn": True}) + self.attrs.update({"use_onednn": True}) self.check_pir_onednn = False def init_dtype(self): @@ -85,7 +85,7 @@ def init_dtype(self): class TestONEDNNRelu6_ZeroDim(TestRelu6_ZeroDim): def setUp(self): super().setUp() - self.attrs.update({"use_mkldnn": True}) + self.attrs.update({"use_onednn": True}) self.check_pir_onednn = False def init_dtype(self): @@ -96,7 +96,7 @@ class TestONEDNNLeakyReluDim2(TestLeakyRelu): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -117,7 +117,7 @@ class TestONEDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -135,7 +135,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -150,7 +150,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -165,7 +165,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True, "approximate": True} + self.attrs = {"use_onednn": True, "approximate": True} self.check_pir_onednn = False @@ -173,7 +173,7 @@ class TestONEDNNTanhDim2(TestTanh): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -184,7 +184,7 @@ class TestONEDNNTanh_ZeroDim(TestTanh_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -195,7 +195,7 @@ class TestONEDNNSqrtDim2(TestSqrt): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -206,7 +206,7 @@ class TestONEDNNSqrt_ZeroDim(TestSqrt_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -216,7 +216,7 @@ def init_dtype(self): class TestONEDNNAbsDim2(TestAbs): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -226,7 +226,7 @@ class TestONEDNNAbsZeroSize(TestAbs): def setUp(self): super().setUp() self.check_pir_onednn = True - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_shape(self): self.shape = [0, 12, 0] @@ -236,7 +236,7 @@ class TestONEDNNAbsZeroSize1(TestONEDNNAbsZeroSize): def setUp(self): super().setUp() self.check_pir_onednn = True - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_shape(self): self.shape = [0, 12, 0] @@ -245,7 +245,7 @@ def init_shape(self): class TestONEDNNAbs_ZeroDim(TestAbs_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -255,7 +255,7 @@ class TestONEDNNSwishDim2(TestSwish): def setUp(self): super().setUp() - self.attrs["use_mkldnn"] = True + self.attrs["use_onednn"] = True self.check_pir_onednn = False def init_dtype(self): @@ -266,7 +266,7 @@ class TestONEDNNSwish_ZeroDim(TestSwish_ZeroDim): def setUp(self): super().setUp() - self.attrs["use_mkldnn"] = True + self.attrs["use_onednn"] = True self.check_eager = False self.check_pir_onednn = False @@ -277,27 +277,27 @@ def init_dtype(self): class TestONEDNNHardSwishDim2(TestHardSwish): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False class TestONEDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False class TestONEDNNSigmoidDim2(TestSigmoid): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} class TestONEDNNSigmoid_ZeroDim(TestSigmoid_ZeroDim): def setUp(self): super().setUp() - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} class TestONEDNNReluDim4(TestRelu): @@ -311,7 +311,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -328,7 +328,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -356,7 +356,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -371,7 +371,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True, "approximate": True} + self.attrs = {"use_onednn": True, "approximate": True} self.check_pir_onednn = False @@ -389,7 +389,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def test_check_output(self): @@ -413,7 +413,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True, "approximate": True} + self.attrs = {"use_onednn": True, "approximate": True} self.check_pir_onednn = False def test_check_output(self): @@ -431,7 +431,7 @@ def setUp(self): 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") } self.outputs = {'Out': np.tanh(self.inputs['X'])} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -443,7 +443,7 @@ def setUp(self): 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") } self.outputs = {'Out': np.sqrt(self.inputs['X'])} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -456,7 +456,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 self.inputs = {'X': x} self.outputs = {'Out': np.abs(self.inputs['X'])} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_dtype(self): self.dtype = np.float32 @@ -487,7 +487,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def init_dtype(self): @@ -505,7 +505,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -520,7 +520,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False @@ -533,7 +533,7 @@ def setUp(self): self.inputs = {'X': x} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def test_check_output(self): @@ -554,7 +554,7 @@ def setUp(self): self.inputs = {'X': x} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} self.check_pir_onednn = False def test_check_output(self): @@ -574,7 +574,7 @@ def setUp(self): out = 1 / (1 + np.exp(-x)) self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} class TestONEDNNEluDefaultAlpha(TestActivation): @@ -586,7 +586,7 @@ def setUp(self): x = np.random.random((5, 5, 4)).astype("float32") self.inputs = {'X': x} - self.attrs = {'use_mkldnn': True, 'alpha': self.alpha} + self.attrs = {'use_onednn': True, 'alpha': self.alpha} self.outputs = { 'Out': np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1)) @@ -606,7 +606,7 @@ def setUp(self): x = np.random.random(()).astype("float32") self.inputs = {'X': x} - self.attrs = {'use_mkldnn': True, 'alpha': self.alpha} + self.attrs = {'use_onednn': True, 'alpha': self.alpha} self.outputs = { 'Out': np.maximum(0, x) + np.minimum(0, self.alpha * (np.exp(x) - 1)) @@ -629,7 +629,7 @@ def setUp(self): x = np.random.random((5, 5, 4)).astype("float32") self.inputs = {'X': x} - self.attrs = {'use_mkldnn': True} + self.attrs = {'use_onednn': True} self.outputs = {'Out': np.exp(x)} self.check_pir_onednn = False @@ -641,7 +641,7 @@ def setUp(self): x = np.random.random(()).astype("float32") self.inputs = {'X': x} - self.attrs = {'use_mkldnn': True} + self.attrs = {'use_onednn': True} self.outputs = {'Out': np.exp(x)} self.check_pir_onednn = False @@ -674,7 +674,7 @@ def test_check(self): class TestONEDNNSoftplusDim2(TestSoftplus): def setUp(self): super().setUp() - self.attrs.update({"use_mkldnn": True}) + self.attrs.update({"use_onednn": True}) self.check_pir_onednn = False def init_dtype(self): @@ -684,7 +684,7 @@ def init_dtype(self): class TestONEDNNSoftplus_ZeroDim(TestSoftplus_ZeroDim): def setUp(self): super().setUp() - self.attrs.update({"use_mkldnn": True}) + self.attrs.update({"use_onednn": True}) def init_dtype(self): self.dtype = np.float32 diff --git a/test/deprecated/mkldnn/test_clip_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_clip_mkldnn_op_deprecated.py index bd9adb38dcc865..3f30cfee0892bd 100644 --- a/test/deprecated/mkldnn/test_clip_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_clip_mkldnn_op_deprecated.py @@ -59,7 +59,7 @@ def adjust_op_settings(self): pass def set_attrs(self): - self.attrs = {'min': 7.2, 'max': 9.6, 'use_mkldnn': True} + self.attrs = {'min': 7.2, 'max': 9.6, 'use_onednn': True} def test_check_output(self): self.check_output(check_dygraph=False, check_pir_onednn=True) diff --git a/test/deprecated/mkldnn/test_concat_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_concat_mkldnn_op_deprecated.py index 59e6590b0ddec1..9bef735b1e48a5 100644 --- a/test/deprecated/mkldnn/test_concat_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_concat_mkldnn_op_deprecated.py @@ -32,7 +32,7 @@ def setUp(self): self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} self.attrs = { 'axis': self.axis, - 'use_mkldnn': True, + 'use_onednn': True, 'mkldnn_data_type': self.onednn_data_type, } @@ -117,7 +117,7 @@ def setUp(self): self.inputs = {'X': [(f'x{i}', self.x) for i in range(136)]} self.attrs = { 'axis': self.axis, - 'use_mkldnn': True, + 'use_onednn': True, 'mkldnn_data_type': self.onednn_data_type, } diff --git a/test/deprecated/mkldnn/test_layer_norm_bf16_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_layer_norm_bf16_mkldnn_op_deprecated.py index 9b656f3aa0bf85..52f03f6e3ff22a 100644 --- a/test/deprecated/mkldnn/test_layer_norm_bf16_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_layer_norm_bf16_mkldnn_op_deprecated.py @@ -116,7 +116,7 @@ def check_forward( attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": True, + "use_onednn": True, "is_test": with_is_test, }, ) diff --git a/test/deprecated/mkldnn/test_layer_norm_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_layer_norm_mkldnn_op_deprecated.py index a3d56abd628405..226a7602b5c58c 100644 --- a/test/deprecated/mkldnn/test_layer_norm_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_layer_norm_mkldnn_op_deprecated.py @@ -126,7 +126,7 @@ def check_forward( attrs={ "epsilon": epsilon, "begin_norm_axis": begin_norm_axis, - "use_mkldnn": True, + "use_onednn": True, "is_test": with_is_test, }, ) diff --git a/test/deprecated/mkldnn/test_prelu_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_prelu_mkldnn_op_deprecated.py index 304830b673fbe5..72e65827acf1a6 100644 --- a/test/deprecated/mkldnn/test_prelu_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_prelu_mkldnn_op_deprecated.py @@ -59,7 +59,7 @@ def setUp(self): self.x = np.random.random((2, 4, 5, 5)).astype("float32") + 1 self.init_attrs() self.set_inputs() - self.attrs = {'mode': self.mode, 'use_mkldnn': True} + self.attrs = {'mode': self.mode, 'use_onednn': True} self.set_dtype_attr() self.outputs = {'Out': ref_prelu(self.x, self.alpha, self.mode)} @@ -102,7 +102,7 @@ def setUp(self): self.x = np.random.random(()).astype("float32") self.init_attrs() self.set_inputs() - self.attrs = {'mode': self.mode, 'use_mkldnn': True} + self.attrs = {'mode': self.mode, 'use_onednn': True} self.set_dtype_attr() self.outputs = {'Out': self.x if self.x > 0 else self.x * self.alpha} diff --git a/test/deprecated/mkldnn/test_reduce_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_reduce_mkldnn_op_deprecated.py index 5c44e58f4f33e0..b9f52322bb95ba 100644 --- a/test/deprecated/mkldnn/test_reduce_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_reduce_mkldnn_op_deprecated.py @@ -26,7 +26,7 @@ def setUp(self): self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} self.check_pir_onednn = True def test_check_output(self): @@ -53,7 +53,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((5, 10, 5, 5)).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': [2]} + self.attrs = {'use_onednn': self.use_onednn, 'dim': [2]} self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } @@ -66,7 +66,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((5, 10, 5, 3)).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': [0, 1, 2, 3]} + self.attrs = {'use_onednn': self.use_onednn, 'dim': [0, 1, 2, 3]} self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } @@ -77,7 +77,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((2, 5, 3, 2, 2)).astype("float32")} - self.attrs = {'dim': (2, 3, 4), 'keep_dim': True, 'use_mkldnn': True} + self.attrs = {'dim': (2, 3, 4), 'keep_dim': True, 'use_onednn': True} self.outputs = { 'Out': self.inputs['X'].sum( axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] @@ -90,7 +90,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random(()).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': []} + self.attrs = {'use_onednn': self.use_onednn, 'dim': []} self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } @@ -103,7 +103,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((2, 5, 3, 2, 2)).astype("float32")} - self.attrs = {'reduce_all': True, 'keep_dim': True, 'use_mkldnn': True} + self.attrs = {'reduce_all': True, 'keep_dim': True, 'use_onednn': True} self.outputs = { 'Out': self.inputs['X'].sum(keepdims=self.attrs['keep_dim']) } @@ -115,7 +115,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} - self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_onednn} + self.attrs = {'reduce_all': True, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.inputs['X'].sum()} self.check_pir_onednn = False @@ -131,7 +131,7 @@ def setUp(self): self.op_type = "reduce_sum" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} - self.attrs = {'dim': (), 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': (), 'use_onednn': self.use_onednn} self.outputs = {'Out': np.copy(self.inputs['X'])} @@ -146,7 +146,7 @@ def setUp(self): self.op_type = "reduce_max" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': [-1], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [-1], 'use_onednn': self.use_onednn} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } @@ -161,7 +161,7 @@ def setUp(self): self.op_type = "reduce_max" self.use_onednn = True self.inputs = {'X': np.random.random(()).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': []} + self.attrs = {'use_onednn': self.use_onednn, 'dim': []} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } @@ -180,7 +180,7 @@ def setUp(self): self.op_type = "reduce_max" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 10, 9)).astype("float32")} - self.attrs = {'dim': [-1, 0, 1], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [-1, 0, 1], 'use_onednn': self.use_onednn} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } @@ -197,7 +197,7 @@ def setUp(self): self.op_type = "reduce_min" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': [2], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [2], 'use_onednn': self.use_onednn} self.outputs = { 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) } @@ -212,7 +212,7 @@ def setUp(self): self.op_type = "reduce_min" self.use_onednn = True self.inputs = {'X': np.random.random(()).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': []} + self.attrs = {'use_onednn': self.use_onednn, 'dim': []} self.outputs = { 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) } @@ -223,7 +223,7 @@ def setUp(self): self.op_type = "reduce_mean" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': [0], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [0], 'use_onednn': self.use_onednn} self.outputs = { 'Out': self.inputs['X'].sum(axis=0) / self.inputs['X'].shape[0] } @@ -234,7 +234,7 @@ def setUp(self): self.op_type = "reduce_mean" self.use_onednn = True self.inputs = {'X': np.random.random(()).astype("float32")} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': []} + self.attrs = {'use_onednn': self.use_onednn, 'dim': []} self.outputs = { # scalar mean is equal to sum 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) @@ -246,7 +246,7 @@ def setUp(self): self.op_type = "reduce_mean" self.use_onednn = True self.inputs = {'X': np.random.random((5, 6, 8, 10)).astype("float32")} - self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_onednn} + self.attrs = {'reduce_all': True, 'use_onednn': self.use_onednn} self.outputs = { 'Out': self.inputs['X'].sum() / np.asarray(self.inputs['X'].shape).prod() diff --git a/test/deprecated/mkldnn/test_reshape_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_reshape_mkldnn_op_deprecated.py index be2c1c948a19cd..8f48abd784a29d 100644 --- a/test/deprecated/mkldnn/test_reshape_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_reshape_mkldnn_op_deprecated.py @@ -36,7 +36,7 @@ def setUp(self): 'XShape': np.random.random(self.ori_shape).astype("float32"), } self.x = self.inputs["X"] - self.attrs['use_mkldnn'] = True + self.attrs['use_onednn'] = True self.set_additional_inputs() self.set_outputs() @@ -208,7 +208,7 @@ def setUp(self): super().setUp() self.dtype = np.uint16 self.inputs = {"X": convert_float_to_uint16(self.x)} - self.attrs['use_mkldnn'] = True + self.attrs['use_onednn'] = True def calculate_grads(self): self.dout = self.outputs['Out'] diff --git a/test/deprecated/mkldnn/test_scale_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_scale_mkldnn_op_deprecated.py index 1d50d92e8e4581..9570bb2091edb8 100644 --- a/test/deprecated/mkldnn/test_scale_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_scale_mkldnn_op_deprecated.py @@ -25,7 +25,7 @@ def setUp(self): self.init_shape() self.op_type = "scale" self.inputs = {'X': np.random.random(self.shape).astype(np.float32)} - self.attrs = {'scale': -2.3, 'use_mkldnn': True, 'bias': 0.2} + self.attrs = {'scale': -2.3, 'use_onednn': True, 'bias': 0.2} self.use_onednn = True self.outputs = { 'Out': (self.inputs['X'] * self.attrs['scale']) + self.attrs['bias'] @@ -54,7 +54,7 @@ def setUp(self): self.inputs = {'X': np.random.random((10, 10)).astype(np.float32)} self.attrs = { 'scale': 1.5, - 'use_mkldnn': True, + 'use_onednn': True, 'bias': 2.3, 'bias_after_scale': False, } diff --git a/test/deprecated/mkldnn/test_softmax_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_softmax_mkldnn_op_deprecated.py index 6056535c6d9eb2..645d1e675e6bad 100644 --- a/test/deprecated/mkldnn/test_softmax_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_softmax_mkldnn_op_deprecated.py @@ -67,7 +67,7 @@ def setUp(self): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } def test_check_output(self): diff --git a/test/deprecated/mkldnn/test_split_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_split_mkldnn_op_deprecated.py index 3a01f29aa0d305..95d65ed46e8699 100644 --- a/test/deprecated/mkldnn/test_split_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_split_mkldnn_op_deprecated.py @@ -52,7 +52,7 @@ def setUp(self): self.init_data_type() self.init_test_case() self.inputs = {'X': self.x} - self.attrs = {'use_mkldnn': True, 'num': self.num} + self.attrs = {'use_onednn': True, 'num': self.num} if self.axis is not None: self.attrs['axis'] = self.axis diff --git a/test/deprecated/mkldnn/test_sum_mkldnn_op_deprecated.py b/test/deprecated/mkldnn/test_sum_mkldnn_op_deprecated.py index a00e1c6096757d..3ca84284f7f3f6 100644 --- a/test/deprecated/mkldnn/test_sum_mkldnn_op_deprecated.py +++ b/test/deprecated/mkldnn/test_sum_mkldnn_op_deprecated.py @@ -32,7 +32,7 @@ def setUp(self): self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def init_data_type(self): self.dtype = np.float32 @@ -73,7 +73,7 @@ def test_check_output(self): tensor.set(var_value, place) sum_op = Operator( - "sum", X=["x0", "x1"], Out=out_var_name, use_mkldnn=True + "sum", X=["x0", "x1"], Out=out_var_name, use_onednn=True ) expected_out = np.array(self.x0 + self.x1) sum_op.run(scope, place) diff --git a/test/deprecated/quantization/CMakeLists.txt b/test/deprecated/quantization/CMakeLists.txt index c5b4d9d3a67137..dbf0dbd0806a43 100644 --- a/test/deprecated/quantization/CMakeLists.txt +++ b/test/deprecated/quantization/CMakeLists.txt @@ -5,13 +5,13 @@ file( string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") function(_inference_analysis_python_api_int8_test target model_dir data_path - filename use_mkldnn) + filename use_onednn) py_test( ${target} SRCS ${filename} ENVS CPU_NUM_THREADS=${CPU_NUM_THREADS_ON_CI} - FLAGS_use_onednn=${use_mkldnn} + FLAGS_use_onednn=${use_onednn} ARGS --infer_model ${model_dir}/model @@ -207,7 +207,7 @@ if(NOT WITH_GPU) list(REMOVE_ITEM TEST_OPS test_apply_per_channel_scale) endif() -if(LINUX AND WITH_MKLDNN) +if(LINUX AND WITH_ONEDNN) #### Image classification dataset: ImageNet (small) # The dataset should already be downloaded for INT8v2 unit tests diff --git a/test/deprecated/quantization/test_quant2_int8_mkldnn_pass_deprecated.py b/test/deprecated/quantization/test_quant2_int8_mkldnn_pass_deprecated.py index d7221b53ecbd50..2a73ad7154f4fe 100644 --- a/test/deprecated/quantization/test_quant2_int8_mkldnn_pass_deprecated.py +++ b/test/deprecated/quantization/test_quant2_int8_mkldnn_pass_deprecated.py @@ -64,7 +64,7 @@ def prepare_program_mul(self, program): type=self.op_name(), inputs={"X": block.var('mul_input'), "Y": block.var('mul_weights')}, outputs={"Out": block.var('mul_output')}, - attrs={'use_mkldnn': self.use_onednn}, + attrs={'use_onednn': self.use_onednn}, ) def test_dequantize_op_weights(self): @@ -179,7 +179,7 @@ def prepare_program_conv2d(self, program): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu': True, }, @@ -197,7 +197,7 @@ def prepare_program_conv2d(self, program): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, }, ) @@ -312,7 +312,7 @@ def prepare_program(self, program): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu': True, }, @@ -329,7 +329,7 @@ def prepare_program(self, program): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, }, ) block.append_op( diff --git a/test/deprecated/quantization/test_quantization_mkldnn_pass_deprecated.py b/test/deprecated/quantization/test_quantization_mkldnn_pass_deprecated.py index addd9aad1179b9..2100bdccaa4857 100644 --- a/test/deprecated/quantization/test_quantization_mkldnn_pass_deprecated.py +++ b/test/deprecated/quantization/test_quantization_mkldnn_pass_deprecated.py @@ -60,7 +60,7 @@ def conv_net(img, label): return avg_loss -class TestMKLDNNTransformBasedFreezePass(unittest.TestCase): +class TestONEDNNTransformBasedFreezePass(unittest.TestCase): def setUp(self): self.quantizable_op_and_inputs = { 'conv2d': ['Input', 'Filter'], diff --git a/test/dygraph_to_static/simnet_dygraph_model.py b/test/dygraph_to_static/simnet_dygraph_model.py index 35262bd77e8397..a3e19de4cc3670 100644 --- a/test/dygraph_to_static/simnet_dygraph_model.py +++ b/test/dygraph_to_static/simnet_dygraph_model.py @@ -410,7 +410,7 @@ def forward(self, input): type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}, - attrs={"use_mkldnn": False}, + attrs={"use_onednn": False}, ) if self._b is not None: diff --git a/test/legacy_test/hygon_dcu/hygon_llama_ops.py b/test/legacy_test/hygon_dcu/hygon_llama_ops.py index c6f0d6d20aa38d..4ead7b15c39028 100644 --- a/test/legacy_test/hygon_dcu/hygon_llama_ops.py +++ b/test/legacy_test/hygon_dcu/hygon_llama_ops.py @@ -480,7 +480,7 @@ def setUp(self): self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def init_kernel_type(self): self.dtype = np.float16 @@ -545,7 +545,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def init_kernel_type(self): @@ -631,7 +631,7 @@ def setUp(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def init_kernel_type(self): self.use_onednn = False diff --git a/test/legacy_test/op_test.py b/test/legacy_test/op_test.py index e6eca9654f330e..3a5d26c93b9516 100644 --- a/test/legacy_test/op_test.py +++ b/test/legacy_test/op_test.py @@ -633,8 +633,10 @@ def is_float16_op(self): def is_onednn_op(self): return (hasattr(self, "use_onednn") and self.use_onednn) or ( hasattr(self, "attrs") - and "use_mkldnn" in self.attrs - and self.attrs["use_mkldnn"] + and ( + ("use_mkldnn" in self.attrs and self.attrs["use_mkldnn"]) + or ("use_onednn" in self.attrs and self.attrs["use_onednn"]) + ) ) def is_xpu_op(self): @@ -2198,7 +2200,10 @@ def check_inplace_output_with_place( attrs_use_mkldnn = hasattr(self, 'attrs') and bool( self.attrs.get('use_mkldnn', False) ) - if flags_use_onednn or attrs_use_mkldnn: + attrs_use_onednn = hasattr(self, 'attrs') and bool( + self.attrs.get('use_onednn', False) + ) + if flags_use_onednn or attrs_use_mkldnn or attrs_use_onednn: warnings.warn( "check inplace_grad for ops using mkldnn is not supported" ) @@ -3441,9 +3446,13 @@ def check_grad_with_place( cache_list = self.cache_name_list # oneDNN numeric gradient should use CPU kernel - use_onednn = False + use_mkldnn = False if op_attrs.get("use_mkldnn"): op_attrs["use_mkldnn"] = False + use_mkldnn = True + use_onednn = False + if op_attrs.get("use_onednn"): + op_attrs["use_onednn"] = False use_onednn = True if hasattr(self, "attrs"): for k, v in self.attrs.items(): @@ -3459,8 +3468,10 @@ def check_grad_with_place( cache_list=cache_list, ) - if use_onednn: + if use_mkldnn: op_attrs["use_mkldnn"] = True + if use_onednn: + op_attrs["use_onednn"] = True if no_grad_set is None: no_grad_set = set() diff --git a/test/legacy_test/test_batch_norm_op.py b/test/legacy_test/test_batch_norm_op.py index b8525403e59876..556a3637791e34 100644 --- a/test/legacy_test/test_batch_norm_op.py +++ b/test/legacy_test/test_batch_norm_op.py @@ -317,7 +317,7 @@ def check_with_place(self, place, data_layout, dtype, shape): # attrs is_test=True, data_layout=data_layout, - use_mkldnn=self.use_onednn, + use_onednn=self.use_onednn, fuse_with_relu=self.fuse_with_relu, epsilon=epsilon, ) diff --git a/test/legacy_test/test_broadcast_tensors_op.py b/test/legacy_test/test_broadcast_tensors_op.py index 296aea9b007e3e..dfac9d35108a77 100644 --- a/test/legacy_test/test_broadcast_tensors_op.py +++ b/test/legacy_test/test_broadcast_tensors_op.py @@ -112,7 +112,7 @@ def set_dtype(self): def setUp(self): self.op_type = "broadcast_tensors" self.use_onednn = False - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} self.test_gen_func_list = [ gen_rank_diff_test, gen_no_broadcast_test, @@ -198,7 +198,7 @@ def setUp(self): self.dtype = np.uint16 self.np_dtype = "float32" self.use_onednn = False - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} self.test_gen_func_list = [ gen_rank_diff_test, gen_no_broadcast_test, diff --git a/test/legacy_test/test_conv2d_op.py b/test/legacy_test/test_conv2d_op.py index 1186f1c2720115..4ee915872aa85a 100644 --- a/test/legacy_test/test_conv2d_op.py +++ b/test/legacy_test/test_conv2d_op.py @@ -483,7 +483,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search, @@ -817,7 +817,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search, diff --git a/test/legacy_test/test_conv2d_transpose_op.py b/test/legacy_test/test_conv2d_transpose_op.py index 1dbfeda253f482..f62e3b5277da6a 100644 --- a/test/legacy_test/test_conv2d_transpose_op.py +++ b/test/legacy_test/test_conv2d_transpose_op.py @@ -210,7 +210,7 @@ def setUp(self): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } if self.output_size is not None: diff --git a/test/legacy_test/test_conv3d_op.py b/test/legacy_test/test_conv3d_op.py index 65cc6c0c26431b..63c003118219f8 100644 --- a/test/legacy_test/test_conv3d_op.py +++ b/test/legacy_test/test_conv3d_op.py @@ -444,7 +444,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -804,7 +804,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } self.outputs = {'Output': output} diff --git a/test/legacy_test/test_elementwise_add_op.py b/test/legacy_test/test_elementwise_add_op.py index 4d8d2d2815d942..e0000e7d6aa992 100644 --- a/test/legacy_test/test_elementwise_add_op.py +++ b/test/legacy_test/test_elementwise_add_op.py @@ -47,7 +47,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def check_dygraph(self): @@ -244,7 +244,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.x)), 'Y': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.y)), } - self.attrs = {'axis': self.axis, 'use_mkldnn': False} + self.attrs = {'axis': self.axis, 'use_onednn': False} self.outputs = {'Out': convert_float_to_uint16(self.out)} self.if_enable_cinn() @@ -827,7 +827,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -968,7 +968,7 @@ def test_warnings(self): type="elementwise_add", inputs={'X': data, 'Y': data}, outputs={'Out': out}, - attrs={'axis': 1, 'use_mkldnn': False}, + attrs={'axis': 1, 'use_onednn': False}, ) self.assertTrue( "op elementwise_add's attr axis = 1 is not the default value: -1" @@ -1042,7 +1042,7 @@ def setUp(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def check_dygraph(self): diff --git a/test/legacy_test/test_elementwise_div_op.py b/test/legacy_test/test_elementwise_div_op.py index 0ff6dd4a26bac8..e6502ebef6146b 100644 --- a/test/legacy_test/test_elementwise_div_op.py +++ b/test/legacy_test/test_elementwise_div_op.py @@ -589,7 +589,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): diff --git a/test/legacy_test/test_elementwise_floordiv_op.py b/test/legacy_test/test_elementwise_floordiv_op.py index 1a8266f27beb75..186592c609e56a 100644 --- a/test/legacy_test/test_elementwise_floordiv_op.py +++ b/test/legacy_test/test_elementwise_floordiv_op.py @@ -43,7 +43,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def test_check_output(self): diff --git a/test/legacy_test/test_elementwise_mod_op.py b/test/legacy_test/test_elementwise_mod_op.py index 3620215c186114..618643229d73ec 100644 --- a/test/legacy_test/test_elementwise_mod_op.py +++ b/test/legacy_test/test_elementwise_mod_op.py @@ -46,7 +46,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def test_check_output(self): @@ -195,7 +195,7 @@ def setUp(self): 'X': convert_float_to_uint16(OpTest.np_dtype_to_base_dtype(self.x)), 'Y': convert_float_to_uint16(OpTest.np_dtype_to_base_dtype(self.y)), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': convert_float_to_uint16(self.out)} def test_check_output(self): diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index a4f365ea92b1a8..8c6fbc679213af 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -49,7 +49,7 @@ def setUp(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def test_check_output(self): # TODO(wangzhongpu): support onednn op in dygraph mode @@ -242,7 +242,7 @@ def setUp(self): 'Y': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.y)), } self.outputs = {'Out': convert_float_to_uint16(self.out)} - self.attrs = {'axis': self.axis, 'use_mkldnn': False} + self.attrs = {'axis': self.axis, 'use_onednn': False} self.if_enable_cinn() def test_check_output(self): @@ -381,7 +381,7 @@ def init_input_attr_output(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def init_dtype(self): self.dtype = np.float64 @@ -406,7 +406,7 @@ def init_input_attr_output(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def init_axis(self): self.axis = 0 @@ -592,7 +592,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): diff --git a/test/legacy_test/test_elementwise_sub_op.py b/test/legacy_test/test_elementwise_sub_op.py index 28e336539f868d..736f1b33d7f7c5 100644 --- a/test/legacy_test/test_elementwise_sub_op.py +++ b/test/legacy_test/test_elementwise_sub_op.py @@ -859,7 +859,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} self.if_check_prim() self.if_enable_cinn() @@ -1207,7 +1207,7 @@ def test_warnings(self): type="elementwise_sub", inputs={'X': data, 'Y': data}, outputs={'Out': out}, - attrs={'axis': 1, 'use_mkldnn': False}, + attrs={'axis': 1, 'use_onednn': False}, ) self.assertTrue( "op elementwise_sub's attr axis = 1 is not the default value: -1" diff --git a/test/legacy_test/test_expand_v2_op.py b/test/legacy_test/test_expand_v2_op.py index 5e7ca4b28f92ef..ccf32a49665cbf 100644 --- a/test/legacy_test/test_expand_v2_op.py +++ b/test/legacy_test/test_expand_v2_op.py @@ -764,7 +764,7 @@ def setUp(self): self.init_place() self.python_api = paddle.expand self.x = np.zeros(self.ori_shape).astype("float32") - self.attrs = {'shape': self.shape, 'use_mkldnn': True} + self.attrs = {'shape': self.shape, 'use_onednn': True} self.use_onednn = True self.set_inputs() self.set_additional_inputs() diff --git a/test/legacy_test/test_fc_op.py b/test/legacy_test/test_fc_op.py index d61c93361097b7..a740ce0c49c304 100644 --- a/test/legacy_test/test_fc_op.py +++ b/test/legacy_test/test_fc_op.py @@ -73,7 +73,7 @@ def setUp(self): activation_type = "relu" else: activation_type = "" - self.attrs = {'use_mkldnn': False, 'activation_type': activation_type} + self.attrs = {'use_onednn': False, 'activation_type': activation_type} self.outputs = { 'Out': fc_refer(self.matrix, self.with_bias, self.with_relu) diff --git a/test/legacy_test/test_fusion_gru_op.py b/test/legacy_test/test_fusion_gru_op.py index 950142835e6524..80f2bd185876b5 100644 --- a/test/legacy_test/test_fusion_gru_op.py +++ b/test/legacy_test/test_fusion_gru_op.py @@ -111,7 +111,7 @@ def setUp(self): 'gate_activation': self.act_gate, 'is_reverse': self.is_reverse, 'origin_mode': self.origin_mode, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } def test_check_output(self): diff --git a/test/legacy_test/test_gaussian_random_op.py b/test/legacy_test/test_gaussian_random_op.py index c4f860bcc7e973..36b8453b097865 100644 --- a/test/legacy_test/test_gaussian_random_op.py +++ b/test/legacy_test/test_gaussian_random_op.py @@ -40,7 +40,7 @@ def setUp(self): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, } paddle.seed(10) @@ -82,7 +82,7 @@ def setUp(self): "std": self.std, "seed": 10, "dtype": paddle.float16, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, } paddle.seed(10) @@ -134,7 +134,7 @@ def setUp(self): "std": self.std, "seed": 10, "dtype": paddle.bfloat16, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, } paddle.seed(10) @@ -184,7 +184,7 @@ def setUp(self): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.inputs = {"ShapeTensorList": shape_tensor_list} @@ -251,7 +251,7 @@ def setUp(self): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = {'Out': np.zeros((123, 92), dtype='float32')} diff --git a/test/legacy_test/test_kron_op.py b/test/legacy_test/test_kron_op.py index 05ff4b6dd777a4..7f634707a352f9 100644 --- a/test/legacy_test/test_kron_op.py +++ b/test/legacy_test/test_kron_op.py @@ -272,7 +272,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): diff --git a/test/legacy_test/test_matmul_v2_op.py b/test/legacy_test/test_matmul_v2_op.py index 5e1938dc704141..16bce228f637b5 100644 --- a/test/legacy_test/test_matmul_v2_op.py +++ b/test/legacy_test/test_matmul_v2_op.py @@ -713,7 +713,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -766,7 +766,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -828,7 +828,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -854,7 +854,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -880,7 +880,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -906,7 +906,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_base_dtype(self): @@ -950,7 +950,7 @@ def setUp(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.out = np.matmul(self.x, self.y) - self.attrs = {'axis': -1, 'use_mkldnn': False} + self.attrs = {'axis': -1, 'use_onednn': False} self.outputs = {'Out': self.out} def init_input_output(self): diff --git a/test/legacy_test/test_pool2d_op.py b/test/legacy_test/test_pool2d_op.py index 3c38b4a1ec9381..b2eea65d3caef0 100644 --- a/test/legacy_test/test_pool2d_op.py +++ b/test/legacy_test/test_pool2d_op.py @@ -451,7 +451,7 @@ def setUp(self): 'pooling_type': self.pool_type, 'global_pooling': self.global_pool, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'ceil_mode': self.ceil_mode, 'data_format': self.data_format, 'exclusive': self.exclusive, diff --git a/test/legacy_test/test_reshape_op.py b/test/legacy_test/test_reshape_op.py index 06ff5633ba4b07..011ae2a55606d5 100755 --- a/test/legacy_test/test_reshape_op.py +++ b/test/legacy_test/test_reshape_op.py @@ -406,7 +406,7 @@ def setUp(self): self.inputs = {'X': OpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'shape': self.new_shape, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { "Out": self.inputs["X"].reshape(self.inferred_shape), diff --git a/test/legacy_test/test_sgd_op_bf16.py b/test/legacy_test/test_sgd_op_bf16.py index 25bacbbecf0aff..4cefc0c97df638 100644 --- a/test/legacy_test/test_sgd_op_bf16.py +++ b/test/legacy_test/test_sgd_op_bf16.py @@ -49,7 +49,7 @@ def setUp(self): self.inputs = {'Param': w_bf16, 'Grad': g_bf16, 'LearningRate': lr_bf16} self.outputs = {'ParamOut': w - lr * g} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def conf(self): self.h = 102 @@ -157,7 +157,7 @@ def test_sparse_grad_sgd(self): Grad='Grad', ParamOut='Param', LearningRate='LearningRate', - use_mkldnn=True, + use_onednn=True, ) sgd_op.run(scope, place) @@ -215,7 +215,7 @@ def test_sparse_param_grad_sgd(self): Grad='Grad', ParamOut='Param', LearningRate='LearningRate', - use_mkldnn=True, + use_onednn=True, ) sgd_op.run(scope, place) diff --git a/test/legacy_test/test_slice_op.py b/test/legacy_test/test_slice_op.py index 88e8b802c5a704..a75b4192ac986a 100644 --- a/test/legacy_test/test_slice_op.py +++ b/test/legacy_test/test_slice_op.py @@ -160,7 +160,7 @@ def setUp(self): 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, - 'use_mkldnn': True, + 'use_onednn': True, } def config(self): diff --git a/test/legacy_test/test_softmax_op.py b/test/legacy_test/test_softmax_op.py index d44102567b6c84..1b9ce32daac00c 100644 --- a/test/legacy_test/test_softmax_op.py +++ b/test/legacy_test/test_softmax_op.py @@ -78,7 +78,7 @@ def setUp(self): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.enable_cinn = True @@ -161,7 +161,7 @@ def setUp(self): self.attrs = { 'axis': -1, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.enable_cinn = False @@ -210,7 +210,7 @@ def setUp(self): self.attrs = { 'axis': -1, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.enable_cinn = False @@ -487,7 +487,7 @@ def setUp(self): self.attrs = { 'axis': self.axis, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } def init_cudnn(self): diff --git a/test/legacy_test/test_sum_op.py b/test/legacy_test/test_sum_op.py index 0746cc46d022a9..f310d4400e2847 100644 --- a/test/legacy_test/test_sum_op.py +++ b/test/legacy_test/test_sum_op.py @@ -37,7 +37,7 @@ from paddle.framework import in_pir_mode -def sum_wrapper(X, use_mkldnn=False): +def sum_wrapper(X, use_onednn=False): res = paddle.full(shape=X[0].shape, fill_value=0.0, dtype=X[0].dtype) for x in X: res = paddle.add(res, x) @@ -59,7 +59,7 @@ def setUp(self): self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def init_kernel_type(self): self.dtype = np.float64 diff --git a/test/legacy_test/test_transpose_op.py b/test/legacy_test/test_transpose_op.py index 69f2f55dd063a7..c229b0578a8724 100644 --- a/test/legacy_test/test_transpose_op.py +++ b/test/legacy_test/test_transpose_op.py @@ -35,7 +35,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), @@ -146,7 +146,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), @@ -169,7 +169,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), @@ -191,7 +191,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float64"), @@ -234,7 +234,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype(self.dtype), @@ -279,7 +279,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': convert_float_to_uint16( @@ -330,7 +330,7 @@ def setUp(self): self.inputs = {'X': x} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype(self.dtype), @@ -376,7 +376,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': convert_float_to_uint16( diff --git a/test/mkldnn/onednn_op_test.py b/test/mkldnn/onednn_op_test.py index 7eabd3b4d9c0ff..171000f910ded9 100644 --- a/test/mkldnn/onednn_op_test.py +++ b/test/mkldnn/onednn_op_test.py @@ -48,7 +48,7 @@ def check_if_onednn_primitives_exist_in_bwd( 'X': block.var('x'), }, outputs={'Out': block.var('out')}, - attrs={'use_mkldnn': True}, + attrs={'use_onednn': True}, ) # Generate backward op_desc @@ -122,7 +122,7 @@ def check_if_onednn_batchnorm_primitives_exist_in_bwd( "epsilon": test_case.epsilon, "is_test": False, "data_layout": data_layout, - "use_mkldnn": test_case.use_mkldnn, + "use_onednn": test_case.use_onednn, "fuse_with_relu": test_case.fuse_with_relu, "use_global_stats": test_case.use_global_stats, }, diff --git a/test/mkldnn/test_activation_bf16_mkldnn_op.py b/test/mkldnn/test_activation_bf16_mkldnn_op.py index e5ac9d71a044a3..d9685692eb9a72 100644 --- a/test/mkldnn/test_activation_bf16_mkldnn_op.py +++ b/test/mkldnn/test_activation_bf16_mkldnn_op.py @@ -39,7 +39,7 @@ def op_grad(self, dout, x): pass def set_attrs(self): - self.attrs = {"use_mkldnn": True} + self.attrs = {"use_onednn": True} def init_data(self): self.x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32) @@ -147,7 +147,7 @@ def op_grad(self, dout, x): ) def set_attrs(self): - self.attrs = {"use_mkldnn": True, "approximate": True} + self.attrs = {"use_onednn": True, "approximate": True} class TestONEDNNGeluTanhDim2BF16Op(TestONEDNNGeluTanhBF16Op): @@ -211,7 +211,7 @@ def op_grad(self, dout, x): def set_attrs(self): self.alpha = 0.2 - self.attrs = {"use_mkldnn": True, "alpha": self.alpha} + self.attrs = {"use_onednn": True, "alpha": self.alpha} class TestONEDNNSwishBF16Op(ONEDNNBF16ActivationOp, TestActivation): @@ -230,7 +230,7 @@ def op_grad(self, dout, x): def set_attrs(self): self.beta = 0.2 - self.attrs = {"use_mkldnn": True, "beta": self.beta} + self.attrs = {"use_onednn": True, "beta": self.beta} class TestONEDNNHardSwishBF16Op(ONEDNNBF16ActivationOp, TestActivation): @@ -284,7 +284,7 @@ def op_grad(self, dout, x): def set_attrs(self): self.alpha = 0.2 - self.attrs = {"use_mkldnn": True, "alpha": self.alpha} + self.attrs = {"use_onednn": True, "alpha": self.alpha} class TestONEDNNExpBF16Op(ONEDNNBF16ActivationOp, TestActivation): diff --git a/test/mkldnn/test_bilinear_interp_v2_mkldnn_op.py b/test/mkldnn/test_bilinear_interp_v2_mkldnn_op.py index eca6ef8b9c7b0e..84970be1aaf057 100644 --- a/test/mkldnn/test_bilinear_interp_v2_mkldnn_op.py +++ b/test/mkldnn/test_bilinear_interp_v2_mkldnn_op.py @@ -145,7 +145,7 @@ def setUp(self): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = {'Out': output_np} diff --git a/test/mkldnn/test_cast_mkldnn_op.py b/test/mkldnn/test_cast_mkldnn_op.py index db12d0b21101bf..02aa59396208d7 100644 --- a/test/mkldnn/test_cast_mkldnn_op.py +++ b/test/mkldnn/test_cast_mkldnn_op.py @@ -42,7 +42,7 @@ def setUp(self): self.attrs = { 'in_dtype': prepare_dtype(self.x), 'out_dtype': prepare_dtype(self.out), - 'use_mkldnn': True, + 'use_onednn': True, } self.op_type = 'cast' diff --git a/test/mkldnn/test_concat_bf16_mkldnn_op.py b/test/mkldnn/test_concat_bf16_mkldnn_op.py index 606deb6976d4ac..0faf7e16482fb5 100644 --- a/test/mkldnn/test_concat_bf16_mkldnn_op.py +++ b/test/mkldnn/test_concat_bf16_mkldnn_op.py @@ -35,7 +35,7 @@ def setUp(self): self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} self.attrs = { 'axis': self.axis, - 'use_mkldnn': True, + 'use_onednn': True, 'mkldnn_data_type': self.onednn_data_type, } diff --git a/test/mkldnn/test_concat_int8_mkldnn_op.py b/test/mkldnn/test_concat_int8_mkldnn_op.py index 89d2b71c688807..7f25b41c4191ea 100644 --- a/test/mkldnn/test_concat_int8_mkldnn_op.py +++ b/test/mkldnn/test_concat_int8_mkldnn_op.py @@ -27,7 +27,7 @@ def setUp(self): self.init_shape() self.init_test_data() self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]} - self.attrs = {'axis': self.axis, 'use_mkldnn': True} + self.attrs = {'axis': self.axis, 'use_onednn': True} self.output = np.concatenate( (self.x0, self.x1, self.x2), axis=self.axis diff --git a/test/mkldnn/test_conv2d_bf16_mkldnn_op.py b/test/mkldnn/test_conv2d_bf16_mkldnn_op.py index da802ed21ba979..562595733933df 100644 --- a/test/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -110,7 +110,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, 'force_fp32_output': self.force_fp32_output, 'fuse_residual_connection': self.fuse_residual, diff --git a/test/mkldnn/test_conv2d_int8_mkldnn_op.py b/test/mkldnn/test_conv2d_int8_mkldnn_op.py index d2e6d33607e4fe..23b3e938349b2f 100644 --- a/test/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -166,7 +166,7 @@ def residual_helper(init_low, init_high, output_): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'exhaustive_search': self.exhaustive_search, 'Scale_in': self.scale_in, diff --git a/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py b/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py index eaa12b49ee993f..5273b8c232a5b8 100644 --- a/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py @@ -90,7 +90,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'is_test': self.is_test, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, 'force_fp32_output': self.force_fp32_output, 'data_format': self.data_format, diff --git a/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py b/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py index b51d7e989c371a..c552d1215267c6 100644 --- a/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py @@ -36,7 +36,7 @@ def setUp(self): self.y_bf16 = convert_float_to_uint16(self.y) self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': convert_float_to_uint16(self.out)} def generate_data(self): diff --git a/test/mkldnn/test_elementwise_div_mkldnn_op.py b/test/mkldnn/test_elementwise_div_mkldnn_op.py index 367c2b2b210e7b..f081f00e398a0e 100644 --- a/test/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_div_mkldnn_op.py @@ -37,7 +37,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def init_input_output(self): @@ -164,7 +164,7 @@ def setUp(self): self.x_bf16 = convert_float_to_uint16(self.x) self.y_bf16 = convert_float_to_uint16(self.y) self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': convert_float_to_uint16(self.out)} def init_dtype(self): diff --git a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py index 8500c7dea868ba..b138c87f0cd477 100644 --- a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py @@ -35,7 +35,7 @@ def setUp(self): self.x_bf16 = convert_float_to_uint16(self.x) self.y_bf16 = convert_float_to_uint16(self.y) self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': convert_float_to_uint16(self.out)} def generate_data(self): diff --git a/test/mkldnn/test_elementwise_sub_onednn_op.py b/test/mkldnn/test_elementwise_sub_onednn_op.py index a9787c115109eb..51e30dd4d6bca4 100644 --- a/test/mkldnn/test_elementwise_sub_onednn_op.py +++ b/test/mkldnn/test_elementwise_sub_onednn_op.py @@ -44,7 +44,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def init_input_output(self): @@ -225,7 +225,7 @@ def setUp(self): self.x_bf16 = convert_float_to_uint16(self.x) self.y_bf16 = convert_float_to_uint16(self.y) self.inputs = {'X': self.x_bf16, 'Y': self.y_bf16} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': convert_float_to_uint16(self.out)} def init_dtype(self): diff --git a/test/mkldnn/test_expand_v2_mkldnn_op.py b/test/mkldnn/test_expand_v2_mkldnn_op.py index 8d30412e510dd0..3036069b50b010 100644 --- a/test/mkldnn/test_expand_v2_mkldnn_op.py +++ b/test/mkldnn/test_expand_v2_mkldnn_op.py @@ -30,7 +30,7 @@ def setUp(self): self.op_type = "expand_v2" self.init_data() self.x = np.random.random(self.ori_shape).astype("float32") - self.attrs = {'shape': self.shape, 'use_mkldnn': True} + self.attrs = {'shape': self.shape, 'use_onednn': True} self.set_inputs() self.set_additional_inputs() output = np.tile(self.x, self.expand_times) diff --git a/test/mkldnn/test_fc_bf16_mkldnn_op.py b/test/mkldnn/test_fc_bf16_mkldnn_op.py index 05c4d6775283fd..b04120c1e7e5a6 100644 --- a/test/mkldnn/test_fc_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fc_bf16_mkldnn_op.py @@ -60,7 +60,7 @@ def setUp(self): } self.attrs = { - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'force_fp32_output': self.force_fp32_output, } diff --git a/test/mkldnn/test_fc_int8_mkldnn_op.py b/test/mkldnn/test_fc_int8_mkldnn_op.py index da14db39df48da..353978b12b23d4 100644 --- a/test/mkldnn/test_fc_int8_mkldnn_op.py +++ b/test/mkldnn/test_fc_int8_mkldnn_op.py @@ -33,7 +33,7 @@ def setUp(self): ) self.attrs = { - 'use_mkldnn': True, + 'use_onednn': True, 'Scale_in': self.x_scale, 'Scale_weights': [self.y_scale] * y_scales_size, 'Scale_out': self.out_scale, diff --git a/test/mkldnn/test_fc_mkldnn_op.py b/test/mkldnn/test_fc_mkldnn_op.py index 3372238db9d9d4..b625cb57db35b1 100644 --- a/test/mkldnn/test_fc_mkldnn_op.py +++ b/test/mkldnn/test_fc_mkldnn_op.py @@ -45,7 +45,7 @@ def setUp(self): 'Bias': self.bias, } - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} self.outputs = { 'Out': fully_connected_naive( diff --git a/test/mkldnn/test_fill_constant_mkldnn_op.py b/test/mkldnn/test_fill_constant_mkldnn_op.py index 562a0dd0ae503d..01d1feb83d06ea 100644 --- a/test/mkldnn/test_fill_constant_mkldnn_op.py +++ b/test/mkldnn/test_fill_constant_mkldnn_op.py @@ -57,7 +57,7 @@ def set_inputs(self): self.inputs = {} def set_attrs(self): - self.attrs = {'shape': (3, 5), 'use_mkldnn': True, 'value': self.value} + self.attrs = {'shape': (3, 5), 'use_onednn': True, 'value': self.value} def test_check_output(self): self.check_output(check_pir_onednn=True) @@ -87,7 +87,7 @@ def set_inputs(self): class TestFillZerosLike2DStringValueInfOneDNNOp(TestFillConstant2DOneDNNOp): def set_attrs(self): self.str_value = "inf" - self.attrs = {'shape': (10, 13), 'use_mkldnn': True, 'str_value': "inf"} + self.attrs = {'shape': (10, 13), 'use_onednn': True, 'str_value': "inf"} class TestFillZerosLike2DStringValueMinusInfOneDNNOp( @@ -97,7 +97,7 @@ def set_attrs(self): self.str_value = "-inf" self.attrs = { 'shape': (10, 13), - 'use_mkldnn': True, + 'use_onednn': True, 'str_value': "-inf", } @@ -107,7 +107,7 @@ def set_attrs(self): self.str_value = "0.123" self.attrs = { 'shape': (10, 13), - 'use_mkldnn': True, + 'use_onednn': True, 'str_value': "0.123", } diff --git a/test/mkldnn/test_flags_use_mkldnn.py b/test/mkldnn/test_flags_use_mkldnn.py index 54b2be715809c9..01d483f9f9e2fe 100644 --- a/test/mkldnn/test_flags_use_mkldnn.py +++ b/test/mkldnn/test_flags_use_mkldnn.py @@ -22,7 +22,7 @@ class TestFlagsUseOnednn(unittest.TestCase): def setUp(self): self._python_interp = sys.executable - self._python_interp += " check_flags_use_mkldnn.py" + self._python_interp += " check_flags_use_onednn.py" self.env = os.environ.copy() self.env["GLOG_v"] = "1" diff --git a/test/mkldnn/test_flatten_mkldnn_op.py b/test/mkldnn/test_flatten_mkldnn_op.py index 7bd90724082a17..2ba826e3ddc9ed 100644 --- a/test/mkldnn/test_flatten_mkldnn_op.py +++ b/test/mkldnn/test_flatten_mkldnn_op.py @@ -27,7 +27,7 @@ def setUp(self): self.set_op_type() self.init_test_case() self.set_inputs() - self.attrs = {"axis": self.axis, 'use_mkldnn': True} + self.attrs = {"axis": self.axis, 'use_onednn': True} self.ori_shape = self.inputs['X'].shape self.outputs = {"Out": self.inputs["X"].copy().reshape(self.new_shape)} diff --git a/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py b/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py index e51b67888f402a..6248a7fe7e102e 100644 --- a/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py @@ -129,7 +129,7 @@ def setUp(self): 'is_reverse': self.is_reverse, 'origin_mode': self.origin_mode, 'force_fp32_output': self.force_fp32_output, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, } diff --git a/test/mkldnn/test_fusion_gru_int8_mkldnn_op.py b/test/mkldnn/test_fusion_gru_int8_mkldnn_op.py index 043a5eaa074030..e88fce1507f884 100644 --- a/test/mkldnn/test_fusion_gru_int8_mkldnn_op.py +++ b/test/mkldnn/test_fusion_gru_int8_mkldnn_op.py @@ -141,7 +141,7 @@ def setUp(self): 'gate_activation': self.act_gate, 'is_reverse': self.is_reverse, 'origin_mode': self.origin_mode, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, 'force_fp32_output': self.force_fp32_output, 'Scale_data': scale_data, diff --git a/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py b/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py index f87b674f59c6ae..bff4586e3d0c0e 100644 --- a/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py @@ -145,7 +145,7 @@ def setUp(self): 'cell_activation': self.act_cell, 'candidate_activation': self.act_cand, 'force_fp32_output': self.force_fp32_output, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, } diff --git a/test/mkldnn/test_fusion_lstm_int8_mkldnn_op.py b/test/mkldnn/test_fusion_lstm_int8_mkldnn_op.py index 198bc2685cec49..c27e7b226fd283 100644 --- a/test/mkldnn/test_fusion_lstm_int8_mkldnn_op.py +++ b/test/mkldnn/test_fusion_lstm_int8_mkldnn_op.py @@ -130,7 +130,7 @@ def setUp(self): 'candidate_activation': self.act_cand, 'is_reverse': self.is_reverse, 'use_peepholes': self.use_peepholes, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, 'force_fp32_output': self.force_fp32_output, 'Scale_data': scale_data, diff --git a/test/mkldnn/test_gaussian_random_mkldnn_op.py b/test/mkldnn/test_gaussian_random_mkldnn_op.py index 84bcea864c306f..d45c678769a857 100644 --- a/test/mkldnn/test_gaussian_random_mkldnn_op.py +++ b/test/mkldnn/test_gaussian_random_mkldnn_op.py @@ -40,7 +40,7 @@ def setUp(self): "mean": 1.0, "std": 2.0, "seed": 10, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, } @@ -57,7 +57,7 @@ def setUp(self): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, } paddle.seed(10) diff --git a/test/mkldnn/test_log_softmax_mkldnn_op.py b/test/mkldnn/test_log_softmax_mkldnn_op.py index 9f4807acb3fbc2..6d838bc86ff9c1 100644 --- a/test/mkldnn/test_log_softmax_mkldnn_op.py +++ b/test/mkldnn/test_log_softmax_mkldnn_op.py @@ -44,7 +44,7 @@ def setUp(self): self.inputs = {'X': x} self.outputs = {'Out': out} - self.attrs = {'axis': self.axis, 'use_mkldnn': True} + self.attrs = {'axis': self.axis, 'use_onednn': True} def set_dtype(self): self.dtype = np.float32 diff --git a/test/mkldnn/test_lrn_mkldnn_op.py b/test/mkldnn/test_lrn_mkldnn_op.py index 046bad391ee09b..874c73628d77a1 100644 --- a/test/mkldnn/test_lrn_mkldnn_op.py +++ b/test/mkldnn/test_lrn_mkldnn_op.py @@ -22,7 +22,7 @@ class TestLRNONEDNNOp(TestLRNOp): def get_attrs(self): attrs = TestLRNOp.get_attrs(self) - attrs['use_mkldnn'] = True + attrs['use_onednn'] = True return attrs def test_check_output(self): diff --git a/test/mkldnn/test_matmul_bf16_mkldnn_op.py b/test/mkldnn/test_matmul_bf16_mkldnn_op.py index 8f9e932620714e..78a943e73d889d 100644 --- a/test/mkldnn/test_matmul_bf16_mkldnn_op.py +++ b/test/mkldnn/test_matmul_bf16_mkldnn_op.py @@ -33,7 +33,7 @@ def generate_data(self): def set_attributes(self): self.attrs = { 'alpha': self.alpha, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "mkldnn_data_type": self.onednn_data_type, "force_fp32_output": self.force_fp32_output, 'transpose_X': False, @@ -146,7 +146,7 @@ def generate_data(self): def set_attributes(self): self.attrs = { - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "mkldnn_data_type": self.onednn_data_type, 'transpose_X': True, 'transpose_Y': False, @@ -161,7 +161,7 @@ def generate_data(self): def set_attributes(self): self.attrs = { - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "mkldnn_data_type": self.onednn_data_type, 'transpose_Y': True, 'transpose_X': False, diff --git a/test/mkldnn/test_matmul_v2_mkldnn_op.py b/test/mkldnn/test_matmul_v2_mkldnn_op.py index 836fa86c6d43d6..4c132ebef63bb1 100644 --- a/test/mkldnn/test_matmul_v2_mkldnn_op.py +++ b/test/mkldnn/test_matmul_v2_mkldnn_op.py @@ -77,7 +77,7 @@ def setUp(self): self.attrs = { 'trans_x': self.trans_x, 'trans_y': self.trans_y, - 'use_mkldnn': True, + 'use_onednn': True, } self.set_dtype_attr() self.outputs = {'Out': result} diff --git a/test/mkldnn/test_mul_int8_mkldnn_op.py b/test/mkldnn/test_mul_int8_mkldnn_op.py index 71db940a027e0c..802a2e9d4aae73 100644 --- a/test/mkldnn/test_mul_int8_mkldnn_op.py +++ b/test/mkldnn/test_mul_int8_mkldnn_op.py @@ -35,7 +35,7 @@ def setUp(self): self.init_data_type() self.init_data() self.attrs = { - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "scale_x": self.scale_x, "scale_y": self.scale_y, "scale_out": self.scale_out, @@ -106,7 +106,7 @@ def setUp(self): self.init_data_type() self.init_data() self.attrs = { - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "scale_x": self.scale_x, "scale_y": self.scale_y, "scale_out": self.scale_out, diff --git a/test/mkldnn/test_mul_mkldnn_op.py b/test/mkldnn/test_mul_mkldnn_op.py index 9759a581dbb4cf..d528631246b779 100644 --- a/test/mkldnn/test_mul_mkldnn_op.py +++ b/test/mkldnn/test_mul_mkldnn_op.py @@ -25,7 +25,7 @@ class TestMulOneDNNOp(OpTest): def setUp(self): self.op_type = "mul" - self.attrs = {'use_mkldnn': True} + self.attrs = {'use_onednn': True} self.init_shapes_and_attrs() self.x_fp32 = np.random.random(self.x_shape).astype(np.float32) diff --git a/test/mkldnn/test_multi_gru_mkldnn_op.py b/test/mkldnn/test_multi_gru_mkldnn_op.py index f4d2b9cb9e60d9..ea6fc57bc94ae2 100644 --- a/test/mkldnn/test_multi_gru_mkldnn_op.py +++ b/test/mkldnn/test_multi_gru_mkldnn_op.py @@ -194,7 +194,7 @@ def setUp(self): 'gate_activation': 'sigmoid', 'layers': self.layers, 'origin_mode': self.origin_mode, - 'use_mkldnn': True, + 'use_onednn': True, } if is_int8: diff --git a/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py b/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py index e1ae1bcf3b7c6b..caf65abd9cc4ea 100644 --- a/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py +++ b/test/mkldnn/test_nearest_interp_v2_mkldnn_op.py @@ -146,7 +146,7 @@ def setUp(self): 'out_w': self.out_w, 'scale': self.scale, 'data_layout': self.data_layout, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = {'Out': output_np} diff --git a/test/mkldnn/test_reduce_bf16_mkldnn_op.py b/test/mkldnn/test_reduce_bf16_mkldnn_op.py index 91606f6bf6329e..b8f0e497bbdaad 100644 --- a/test/mkldnn/test_reduce_bf16_mkldnn_op.py +++ b/test/mkldnn/test_reduce_bf16_mkldnn_op.py @@ -37,7 +37,7 @@ def setUp(self): self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} self.outputs = {'Out': self.x_fp32.sum(axis=0)} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def test_check_output(self): self.check_output( @@ -100,7 +100,7 @@ def setUp(self): self.x_fp32 = np.random.normal(size=(2, 3, 5, 6)).astype('float32') self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': [0, 1, 2, 3]} + self.attrs = {'use_onednn': self.use_onednn, 'dim': [0, 1, 2, 3]} self.outputs = {'Out': self.x_fp32.sum(axis=tuple(self.attrs['dim']))} @@ -113,7 +113,7 @@ def setUp(self): self.x_fp32 = np.random.normal(size=(4, 7, 6, 6)).astype('float32') self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': [-1, -2, -3, -4]} + self.attrs = {'use_onednn': self.use_onednn, 'dim': [-1, -2, -3, -4]} self.outputs = {'Out': self.x_fp32.sum(axis=tuple(self.attrs['dim']))} @@ -126,7 +126,7 @@ def setUp(self): self.x_fp32 = np.random.normal(size=(2, 5, 3, 2, 5)).astype('float32') self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'reduce_all': True, 'keep_dim': True, 'use_mkldnn': True} + self.attrs = {'reduce_all': True, 'keep_dim': True, 'use_onednn': True} self.outputs = {'Out': self.x_fp32.sum(keepdims=self.attrs['keep_dim'])} @@ -139,7 +139,7 @@ def setUp(self): self.x_fp32 = np.random.normal(size=(4, 5, 4, 5)).astype('float32') self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_onednn} + self.attrs = {'reduce_all': True, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.x_fp32.sum()} @@ -156,7 +156,7 @@ def setUp(self): self.x_fp32 = np.random.random((5, 6, 10)).astype("float32") self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'dim': [-1], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [-1], 'use_onednn': self.use_onednn} self.outputs = {'Out': self.x_fp32.max(axis=tuple(self.attrs['dim']))} @@ -175,7 +175,7 @@ def setUp(self): self.x_fp32 = np.random.random((5, 6, 10, 9)).astype("float32") self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'dim': [-1, 0, 1], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [-1, 0, 1], 'use_onednn': self.use_onednn} self.outputs = {'Out': self.x_fp32.max(axis=tuple(self.attrs['dim']))} @@ -192,7 +192,7 @@ def setUp(self): self.x_fp32 = np.random.random((5, 6, 10)).astype("float32") self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'dim': [2], 'use_mkldnn': self.use_onednn} + self.attrs = {'dim': [2], 'use_onednn': self.use_onednn} self.outputs = {'Out': self.x_fp32.min(axis=tuple(self.attrs['dim']))} @@ -203,7 +203,7 @@ def setUp(self): self.x_fp32 = np.random.random((5, 6, 10)).astype("float32") self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} self.outputs = {'Out': self.x_fp32.sum(axis=0) / self.x_fp32.shape[0]} @@ -214,7 +214,7 @@ def setUp(self): self.x_fp32 = np.random.random((5, 6, 3, 5)).astype("float32") self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.inputs = {'X': self.x_bf16} - self.attrs = {'use_mkldnn': self.use_onednn, 'dim': [0, 1]} + self.attrs = {'use_onednn': self.use_onednn, 'dim': [0, 1]} self.outputs = { 'Out': self.x_fp32.sum(axis=tuple(self.attrs['dim'])) / (self.x_fp32.shape[0] * self.x_fp32.shape[1]) diff --git a/test/mkldnn/test_reshape_bf16_op.py b/test/mkldnn/test_reshape_bf16_op.py index 94978e67d81468..587e348644c66a 100644 --- a/test/mkldnn/test_reshape_bf16_op.py +++ b/test/mkldnn/test_reshape_bf16_op.py @@ -35,7 +35,7 @@ def setUp(self): self.inputs = {'X': self.input_data} self.attrs = { 'shape': self.new_shape, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, } self.outputs = { diff --git a/test/mkldnn/test_scale_bf16_mkldnn_op.py b/test/mkldnn/test_scale_bf16_mkldnn_op.py index 26943471b285dd..2ababf6f4441d4 100644 --- a/test/mkldnn/test_scale_bf16_mkldnn_op.py +++ b/test/mkldnn/test_scale_bf16_mkldnn_op.py @@ -35,7 +35,7 @@ def setUp(self): self.x_bf16 = convert_float_to_uint16(self.x_fp32) self.scale = -2.3 self.inputs = {'X': self.x_bf16} - self.attrs = {'scale': self.scale, 'use_mkldnn': True, 'bias': 0.4} + self.attrs = {'scale': self.scale, 'use_onednn': True, 'bias': 0.4} self.use_onednn = True self.outputs = { 'Out': (self.x_fp32 * self.attrs['scale']) + self.attrs['bias'] @@ -78,7 +78,7 @@ def setUp(self): self.inputs = {'X': self.x_bf16} self.attrs = { 'scale': self.scale, - 'use_mkldnn': True, + 'use_onednn': True, 'bias': 0.0, 'bias_after_scale': False, } @@ -99,7 +99,7 @@ def setUp(self): 'X': self.x_bf16, 'ScaleTensor': convert_float_to_uint16(self.scale_tensor), } - self.attrs = {'use_mkldnn': True} + self.attrs = {'use_onednn': True} self.outputs = {'Out': self.x_fp32 * self.scale} @@ -117,7 +117,7 @@ def setUp(self): self.attrs = { 'bias': -1.1, 'bias_after_scale': False, - 'use_mkldnn': True, + 'use_onednn': True, } self.outputs = {'Out': (self.x_fp32 + self.attrs['bias']) * self.scale} diff --git a/test/mkldnn/test_shuffle_channel_mkldnn_op.py b/test/mkldnn/test_shuffle_channel_mkldnn_op.py index e9510c96369617..36e10885a6c707 100644 --- a/test/mkldnn/test_shuffle_channel_mkldnn_op.py +++ b/test/mkldnn/test_shuffle_channel_mkldnn_op.py @@ -28,7 +28,7 @@ def setUp(self): self.set_dtype() self.set_group() self.inputs = {'X': np.random.random((5, 64, 2, 3)).astype(self.dtype)} - self.attrs = {'use_mkldnn': True, 'group': self.group} + self.attrs = {'use_onednn': True, 'group': self.group} _, c, h, w = self.inputs['X'].shape input_reshaped = np.reshape( diff --git a/test/mkldnn/test_slice_mkldnn_op.py b/test/mkldnn/test_slice_mkldnn_op.py index 1a71278a9f2167..e95b9626add571 100644 --- a/test/mkldnn/test_slice_mkldnn_op.py +++ b/test/mkldnn/test_slice_mkldnn_op.py @@ -36,7 +36,7 @@ def setUp(self): 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, - 'use_mkldnn': True, + 'use_onednn': True, } self.set_attrs() diff --git a/test/mkldnn/test_softmax_bf16_mkldnn_op.py b/test/mkldnn/test_softmax_bf16_mkldnn_op.py index b52dda9aa724ce..31b16cb38e0079 100644 --- a/test/mkldnn/test_softmax_bf16_mkldnn_op.py +++ b/test/mkldnn/test_softmax_bf16_mkldnn_op.py @@ -64,7 +64,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(x)} self.outputs = {'Out': out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def test_check_output(self): self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) diff --git a/test/mkldnn/test_softplus_mkldnn_op.py b/test/mkldnn/test_softplus_mkldnn_op.py index 0949b63cc2c59d..5903a9faf32193 100644 --- a/test/mkldnn/test_softplus_mkldnn_op.py +++ b/test/mkldnn/test_softplus_mkldnn_op.py @@ -37,7 +37,7 @@ def setUp(self): self.threshold = 20 self.config() self.set_dtype() - self.attrs = {'use_mkldnn': True, 'beta': self.beta} + self.attrs = {'use_onednn': True, 'beta': self.beta} self.x = np.random.random(self.x_shape) self.out = ref_softplus(self.x, self.beta, self.threshold) diff --git a/test/mkldnn/test_split_bf16_mkldnn_op.py b/test/mkldnn/test_split_bf16_mkldnn_op.py index ae8edba09fc74d..3234941a8ed553 100644 --- a/test/mkldnn/test_split_bf16_mkldnn_op.py +++ b/test/mkldnn/test_split_bf16_mkldnn_op.py @@ -45,7 +45,7 @@ def setUp(self): self.init_data() self.inputs = {'X': self.x} self.attrs = { - 'use_mkldnn': True, + 'use_onednn': True, 'num': self.num, 'mkldnn_data_type': "bfloat16", } diff --git a/test/mkldnn/test_squeeze2_mkldnn_op.py b/test/mkldnn/test_squeeze2_mkldnn_op.py index fc0f731f35b681..9e2a4bb774b99f 100644 --- a/test/mkldnn/test_squeeze2_mkldnn_op.py +++ b/test/mkldnn/test_squeeze2_mkldnn_op.py @@ -38,7 +38,7 @@ def set_inputs(self): self.inputs = {"X": self.x} def init_attrs(self): - self.attrs = {"axes": self.axes, 'use_mkldnn': True} + self.attrs = {"axes": self.axes, 'use_onednn': True} def set_outputs(self): self.outputs = { diff --git a/test/mkldnn/test_stack_mkldnn_op.py b/test/mkldnn/test_stack_mkldnn_op.py index 8b91c246d6e6b0..2bd48e74a377e1 100644 --- a/test/mkldnn/test_stack_mkldnn_op.py +++ b/test/mkldnn/test_stack_mkldnn_op.py @@ -56,7 +56,7 @@ def setUp(self): self.inputs = {'X': input_list} self.outputs = {'Y': np.stack(self.op_inputs, axis=self.axis)} - self.attrs = {'axis': self.axis, 'use_mkldnn': True} + self.attrs = {'axis': self.axis, 'use_onednn': True} def test_check_output(self): self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) diff --git a/test/mkldnn/test_sum_bf16_mkldnn_op.py b/test/mkldnn/test_sum_bf16_mkldnn_op.py index 341a17416df3e4..9bc17c6c168fa3 100644 --- a/test/mkldnn/test_sum_bf16_mkldnn_op.py +++ b/test/mkldnn/test_sum_bf16_mkldnn_op.py @@ -45,7 +45,7 @@ def setUp(self): y = x0 + x1 + x2 self.outputs = {'Out': convert_float_to_uint16(y)} - self.attrs = {'use_mkldnn': self.use_onednn} + self.attrs = {'use_onednn': self.use_onednn} def test_check_output(self): self.check_output_with_place(core.CPUPlace(), check_pir_onednn=True) diff --git a/test/mkldnn/test_transpose_bf16_mkldnn_op.py b/test/mkldnn/test_transpose_bf16_mkldnn_op.py index 8f0d5e9a952143..89c597a6d0de25 100644 --- a/test/mkldnn/test_transpose_bf16_mkldnn_op.py +++ b/test/mkldnn/test_transpose_bf16_mkldnn_op.py @@ -37,7 +37,7 @@ def setUp(self): self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'mkldnn_data_type': self.onednn_data_type, } diff --git a/test/mkldnn/test_transpose_int8_mkldnn_op.py b/test/mkldnn/test_transpose_int8_mkldnn_op.py index eefdc3dae12fb4..65205a9511c42f 100644 --- a/test/mkldnn/test_transpose_int8_mkldnn_op.py +++ b/test/mkldnn/test_transpose_int8_mkldnn_op.py @@ -36,7 +36,7 @@ def setUp(self): self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { diff --git a/test/mkldnn/test_transpose_mkldnn_op.py b/test/mkldnn/test_transpose_mkldnn_op.py index 125128a73b131d..f4a4bdaf173d9b 100644 --- a/test/mkldnn/test_transpose_mkldnn_op.py +++ b/test/mkldnn/test_transpose_mkldnn_op.py @@ -25,7 +25,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype("float32")} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, } self.outputs = { 'XShape': np.random.random(self.shape).astype("float32"), diff --git a/test/quantization/CMakeLists.txt b/test/quantization/CMakeLists.txt index 20082befcba268..c2f533b9b31d8c 100644 --- a/test/quantization/CMakeLists.txt +++ b/test/quantization/CMakeLists.txt @@ -6,13 +6,13 @@ file( string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") function(_inference_analysis_python_api_int8_test target model_dir data_path - filename use_mkldnn) + filename use_onednn) py_test( ${target} SRCS ${filename} ENVS CPU_NUM_THREADS=${CPU_NUM_THREADS_ON_CI} - FLAGS_use_onednn=${use_mkldnn} + FLAGS_use_onednn=${use_onednn} ARGS --infer_model ${model_dir}/model diff --git a/test/quantization/README.md b/test/quantization/README.md index eeb4b838fe7648..3137a49be0e5d3 100644 --- a/test/quantization/README.md +++ b/test/quantization/README.md @@ -264,7 +264,7 @@ The following options are also accepted: ```bash cd /PATH/TO/PADDLE -OMP_NUM_THREADS=28 FLAGS_use_mkldnn=true python python/paddle/static/quantization/slim/tests/quant2_int8_image_classification_comparison.py --quant_model=/PATH/TO/DOWNLOADED/QUANT/MODEL --fp32_model=/PATH/TO/DOWNLOADED/FP32/MODEL --infer_data=$HOME/.cache/paddle/dataset/int8/download/int8_full_val.bin --batch_size=50 --batch_num=1000 --acc_diff_threshold=0.01 --ops_to_quantize="conv2d,pool2d" +OMP_NUM_THREADS=28 FLAGS_use_onednn=true python python/paddle/static/quantization/slim/tests/quant2_int8_image_classification_comparison.py --quant_model=/PATH/TO/DOWNLOADED/QUANT/MODEL --fp32_model=/PATH/TO/DOWNLOADED/FP32/MODEL --infer_data=$HOME/.cache/paddle/dataset/int8/download/int8_full_val.bin --batch_size=50 --batch_num=1000 --acc_diff_threshold=0.01 --ops_to_quantize="conv2d,pool2d" ``` > Notes: Due to a large amount of images in the `int8_full_val.bin` dataset (50 000), the accuracy benchmark may last long. To accelerate accuracy measuring, it is recommended to set `OMP_NUM_THREADS` to the maximum number of physical cores available on the server. diff --git a/test/xpu/op_test_xpu.py b/test/xpu/op_test_xpu.py index 956506bd47e1c0..875280639bcec7 100644 --- a/test/xpu/op_test_xpu.py +++ b/test/xpu/op_test_xpu.py @@ -292,8 +292,8 @@ def get_grad_with_place( # oneDNN numeric gradient should use CPU kernel use_onednn = False - if op_attrs.get("use_mkldnn"): - op_attrs["use_mkldnn"] = False + if op_attrs.get("use_onednn"): + op_attrs["use_onednn"] = False use_onednn = True mean_grad_op_types = get_xpu_op_support_types('mean') @@ -311,7 +311,7 @@ def get_grad_with_place( ) if use_onednn: - op_attrs["use_mkldnn"] = True + op_attrs["use_onednn"] = True if no_grad_set is None: no_grad_set = set() diff --git a/test/xpu/test_batch_norm_op_xpu.py b/test/xpu/test_batch_norm_op_xpu.py index 97ab78297934dd..6bbc3efe16c7f2 100644 --- a/test/xpu/test_batch_norm_op_xpu.py +++ b/test/xpu/test_batch_norm_op_xpu.py @@ -448,7 +448,7 @@ def test_train(self): "epsilon": self.epsilon, "is_test": False, "data_layout": self.data_layout, - "use_mkldnn": False, + "use_onednn": False, "fuse_with_relu": False, "use_global_stats": self.use_global_stats, } diff --git a/test/xpu/test_conv2d_op_xpu.py b/test/xpu/test_conv2d_op_xpu.py index e93f5b89e35d0e..16b80018905c3e 100644 --- a/test/xpu/test_conv2d_op_xpu.py +++ b/test/xpu/test_conv2d_op_xpu.py @@ -241,7 +241,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search, @@ -402,7 +402,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'fuse_relu_before_depthwise_conv': self.fuse_relu_before_depthwise_conv, 'exhaustive_search': self.exhaustive_search, diff --git a/test/xpu/test_conv2d_transpose_op_xpu.py b/test/xpu/test_conv2d_transpose_op_xpu.py index 487fa004c105c9..8d7070a6697c5e 100644 --- a/test/xpu/test_conv2d_transpose_op_xpu.py +++ b/test/xpu/test_conv2d_transpose_op_xpu.py @@ -168,7 +168,7 @@ def setUp(self): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } if self.output_size is not None: diff --git a/test/xpu/test_conv3d_op_xpu.py b/test/xpu/test_conv3d_op_xpu.py index b198370a87767a..6a96930339129a 100644 --- a/test/xpu/test_conv3d_op_xpu.py +++ b/test/xpu/test_conv3d_op_xpu.py @@ -244,7 +244,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } self.outputs = {'Output': output} @@ -419,7 +419,7 @@ def setUp(self): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } self.outputs = {'Output': output} diff --git a/test/xpu/test_depthwise_conv2d_transpose_op_xpu.py b/test/xpu/test_depthwise_conv2d_transpose_op_xpu.py index 578cd3b9c88d85..7c59ded26f6792 100644 --- a/test/xpu/test_depthwise_conv2d_transpose_op_xpu.py +++ b/test/xpu/test_depthwise_conv2d_transpose_op_xpu.py @@ -168,7 +168,7 @@ def setUp(self): 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'is_test': self.is_test, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, } if self.output_size is not None: diff --git a/test/xpu/test_elementwise_add_op_xpu.py b/test/xpu/test_elementwise_add_op_xpu.py index ee0c70d75b3341..7f8fc159b1d588 100644 --- a/test/xpu/test_elementwise_add_op_xpu.py +++ b/test/xpu/test_elementwise_add_op_xpu.py @@ -49,7 +49,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def test_check_output(self): diff --git a/test/xpu/test_elementwise_add_op_xpu_kp.py b/test/xpu/test_elementwise_add_op_xpu_kp.py index 857e8d72b188cc..d3ef8e332c06e0 100644 --- a/test/xpu/test_elementwise_add_op_xpu_kp.py +++ b/test/xpu/test_elementwise_add_op_xpu_kp.py @@ -39,7 +39,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def test_check_output(self): diff --git a/test/xpu/test_elementwise_floordiv_op_xpu.py b/test/xpu/test_elementwise_floordiv_op_xpu.py index f5e1a0ecc8356a..a4795874874a21 100644 --- a/test/xpu/test_elementwise_floordiv_op_xpu.py +++ b/test/xpu/test_elementwise_floordiv_op_xpu.py @@ -50,7 +50,7 @@ def setUp(self): 'X': OpTest.np_dtype_to_base_dtype(self.x), 'Y': OpTest.np_dtype_to_base_dtype(self.y), } - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} self.outputs = {'Out': self.out} def test_check_output(self): diff --git a/test/xpu/test_elementwise_mod_op_xpu.py b/test/xpu/test_elementwise_mod_op_xpu.py index 035595d2e36e84..b3d212ada318c2 100644 --- a/test/xpu/test_elementwise_mod_op_xpu.py +++ b/test/xpu/test_elementwise_mod_op_xpu.py @@ -48,7 +48,7 @@ def init_input_output(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def init_dtype(self): pass @@ -81,7 +81,7 @@ def init_input_output(self): 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} class TestRemainderOp(unittest.TestCase): def test_dygraph(self): diff --git a/test/xpu/test_elementwise_mul_op_xpu.py b/test/xpu/test_elementwise_mul_op_xpu.py index c50de0285d66c1..c7116ea8f42905 100644 --- a/test/xpu/test_elementwise_mul_op_xpu.py +++ b/test/xpu/test_elementwise_mul_op_xpu.py @@ -126,7 +126,7 @@ def init_input_output(self): 'Y': self.y, } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def init_axis(self): pass @@ -281,7 +281,7 @@ def init_input_output(self): 'Y': self.y, } self.outputs = {'Out': self.out} - self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_onednn} + self.attrs = {'axis': self.axis, 'use_onednn': self.use_onednn} def gen_output(self): if self.cal_x is None: diff --git a/test/xpu/test_gaussian_random_op_xpu.py b/test/xpu/test_gaussian_random_op_xpu.py index f457e0056da782..d2bec51113d8fe 100644 --- a/test/xpu/test_gaussian_random_op_xpu.py +++ b/test/xpu/test_gaussian_random_op_xpu.py @@ -66,7 +66,7 @@ def setUp(self): "mean": self.mean, "std": self.std, "seed": 10, - "use_mkldnn": self.use_onednn, + "use_onednn": self.use_onednn, "dtype": typeid_dict[self.in_type_str], } paddle.seed(10) @@ -119,7 +119,7 @@ def setUp(self): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, "dtype": typeid_dict[self.in_type_str], } @@ -185,7 +185,7 @@ def setUp(self): 'mean': self.mean, 'std': self.std, 'seed': self.seed, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, "dtype": typeid_dict[self.in_type_str], } self.outputs = {'Out': np.zeros((123, 92), dtype=self.dtype)} diff --git a/test/xpu/test_pool2d_op_xpu.py b/test/xpu/test_pool2d_op_xpu.py index 1aab84bc6f11b6..a5cc545e7e7d22 100644 --- a/test/xpu/test_pool2d_op_xpu.py +++ b/test/xpu/test_pool2d_op_xpu.py @@ -331,7 +331,7 @@ def setUp(self): 'pooling_type': self.pool_type, 'global_pooling': self.global_pool, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_onednn, + 'use_onednn': self.use_onednn, 'data_format': self.data_format, 'exclusive': self.exclusive, 'adaptive': self.adaptive, diff --git a/test/xpu/test_transpose_op_xpu.py b/test/xpu/test_transpose_op_xpu.py index 8188984165969e..c46b7174b5def1 100644 --- a/test/xpu/test_transpose_op_xpu.py +++ b/test/xpu/test_transpose_op_xpu.py @@ -40,7 +40,7 @@ def setUp(self): self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)} self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': False, + 'use_onednn': False, 'use_xpu': True, } self.outputs = { @@ -156,7 +156,7 @@ def setUp(self): } self.attrs = { 'axis': list(self.axis), - 'use_mkldnn': False, + 'use_onednn': False, 'use_xpu': True, } self.outputs = {