Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions paddle/fluid/framework/ir/onednn/activation_onednn_fuse_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,21 @@ inline std::unordered_map<std::string, std::string> GetAttributeMap(
inline void SetActivationAttrs(paddle::framework::OpDesc* fused_op,
paddle::framework::OpDesc* act_op,
const std::string& act_type) {
if (fused_op->HasAttr("use_mkldnn")) {
bool use_mkldnn = false;
if (fused_op->HasAttr("use_mkldnn") && !fused_op->HasAttr("use_onednn")) {
PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn")),
common::errors::PreconditionNotMet(
"oneDNN activation fuses require use_mkldnn=True"));
"oneDNN activation fuses require use_onednn=True"));
}
if (fused_op->HasAttr("use_mkldnn")) {
use_mkldnn = PADDLE_GET_CONST(bool, fused_op->GetAttr("use_mkldnn"));
}
if (!use_mkldnn && fused_op->HasAttr("use_onednn")) {
PADDLE_ENFORCE(PADDLE_GET_CONST(bool, fused_op->GetAttr("use_onednn")),
common::errors::PreconditionNotMet(
"oneDNN activation fuses require use_onednn=True"));
}
fused_op->SetAttr("use_mkldnn", true);
fused_op->SetAttr("use_onednn", true);

auto attr_map = GetAttributeMap(act_type);
for (const auto& attr : attr_map) {
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/static/quantization/quant_int8_onednn_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def _transform_to_conv_onednn(self, graph, op_node):
conv_op_node.set_attr("Scale_weights", scale_w)
conv_op_node.set_attr("Scale_in", scale_in)
conv_op_node.set_attr("Scale_out", 1.0)
conv_op_node.set_attr("use_mkldnn", 1)
conv_op_node.set_attr("use_onednn", 1)
conv_op_node.set_attr("force_fp32_output", 1)
graph.link_to(input_var_node, conv_op_node)
graph.link_to(weight_var_node, conv_op_node)
Expand Down Expand Up @@ -223,7 +223,7 @@ def _transform_to_mul_onednn(self, graph, op_node):
mul_op_node.set_attr("scale_y", scale_w)
mul_op_node.set_attr("scale_x", scale_in)
mul_op_node.set_attr("scale_out", 1.0)
mul_op_node.set_attr("use_mkldnn", 1)
mul_op_node.set_attr("use_onednn", 1)
mul_op_node.set_attr("force_fp32_output", 1)
graph.link_to(input_var_node, mul_op_node)
graph.link_to(weight_var_node, mul_op_node)
Expand All @@ -248,7 +248,7 @@ def _transform_to_quantize_onednn(self, graph, op_node):
op_type='quantize',
attrs={
'data_format': 'ONEDNNLAYOUT',
'use_mkldnn': 1,
'use_onednn': 1,
'Scale': scale_in,
'is_negative_input': 1,
},
Expand Down
2 changes: 1 addition & 1 deletion test/deprecated/ir/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ if(WIN32)

endif()

if(NOT WITH_MKLDNN
if(NOT WITH_ONEDNN
AND NOT TENSORRT_FOUND
AND NOT WITH_GPU)
foreach(target ${TEST_INFERENCE_CPU_UT})
Expand Down
8 changes: 4 additions & 4 deletions test/deprecated/ir/inference/auto_scan_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def create_inference_config(
self,
passes: list[str] | None = None,
use_gpu: bool = False,
use_mkldnn: bool = False,
use_onednn: bool = False,
use_xpu: bool = False,
ir_optim: bool | None = None,
):
Expand All @@ -238,7 +238,7 @@ def create_inference_config(
config.switch_ir_optim(ir_optim)
if use_gpu:
config.enable_use_gpu(100, 0)
if not use_mkldnn:
if not use_onednn:
config.disable_onednn()
if use_xpu:
config.enable_xpu()
Expand Down Expand Up @@ -337,7 +337,7 @@ def run_test(self, quant=False, *args, **kwargs):
def inference_config_str(self, config) -> str:
dic = {}
enable_onednn = config.onednn_enabled()
dic["use_mkldnn"] = enable_onednn
dic["use_onednn"] = enable_onednn
enable_gpu = config.use_gpu()
dic["use_gpu"] = enable_gpu
return str(dic)
Expand Down Expand Up @@ -573,7 +573,7 @@ def run_test(self, quant=False, prog_configs=None):
def inference_config_str(self, config) -> str:
dic = {}
enable_onednn = config.onednn_enabled()
dic["use_mkldnn"] = enable_onednn
dic["use_onednn"] = enable_onednn
enable_gpu = config.use_gpu()
dic['use_gpu'] = enable_gpu
enable_xpu = config.use_xpu()
Expand Down
14 changes: 7 additions & 7 deletions test/deprecated/ir/inference/inference_pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def _get_inference_outs(self, config):
return outs

def _get_analysis_config(
self, use_gpu=False, use_trt=False, use_mkldnn=False
self, use_gpu=False, use_trt=False, use_onednn=False
):
'''
Return a new object of AnalysisConfig.
Expand Down Expand Up @@ -177,7 +177,7 @@ def _get_analysis_config(
if self.enable_tensorrt_varseqlen:
config.enable_tensorrt_varseqlen()

elif use_mkldnn:
elif use_onednn:
config.enable_onednn()
if self.enable_onednn_bfloat16:
config.enable_onednn_bfloat16()
Expand All @@ -186,7 +186,7 @@ def _get_analysis_config(
def check_output(self, atol=1e-3):
'''
Check whether calculating on CPU and GPU, enable TensorRT
or disable TensorRT, enable MKLDNN or disable MKLDNN
or disable TensorRT, enable ONEDNN or disable ONEDNN
are all the same.
'''
self.assertFalse(
Expand All @@ -201,7 +201,7 @@ def check_output_with_option(
):
'''
Check whether calculating on CPU and GPU, enable TensorRT
or disable TensorRT, enable MKLDNN or disable MKLDNN
or disable TensorRT, enable ONEDNN or disable ONEDNN
are all the same.
'''
place = base.CUDAPlace(0) if use_gpu else base.CPUPlace()
Expand Down Expand Up @@ -287,13 +287,13 @@ def check_output_with_option(
if (not use_gpu) and self.enable_mkldnn:
onednn_outputs = self._get_inference_outs(
self._get_analysis_config(
use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
)
)

self.assertTrue(
len(paddle_outs) == len(onednn_outputs),
"The number of outputs is different between CPU and MKLDNN. ",
"The number of outputs is different between CPU and ONEDNN. ",
)

if self.enable_onednn_bfloat16:
Expand All @@ -304,7 +304,7 @@ def check_output_with_option(
onednn_output,
rtol=1e-05,
atol=atol,
err_msg='Output has diff between CPU and MKLDNN. ',
err_msg='Output has diff between CPU and ONEDNN. ',
)

class TensorRTParam:
Expand Down
12 changes: 6 additions & 6 deletions test/deprecated/ir/inference/quant_dequant_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def _get_inference_outs(self, config):
return outs

def _get_analysis_config(
self, use_gpu=False, use_trt=False, use_mkldnn=False
self, use_gpu=False, use_trt=False, use_onednn=False
):
'''
Return a new object of AnalysisConfig.
Expand Down Expand Up @@ -230,7 +230,7 @@ def _get_analysis_config(
if self.enable_tensorrt_varseqlen:
config.enable_tensorrt_varseqlen()

elif use_mkldnn:
elif use_onednn:
config.enable_onednn()
if self.enable_onednn_bfloat16:
config.enable_onednn_bfloat16()
Expand All @@ -241,7 +241,7 @@ def check_output_with_option(
):
'''
Check whether calculating on CPU and GPU, enable TensorRT
or disable TensorRT, enable MKLDNN or disable MKLDNN
or disable TensorRT, enable ONEDNN or disable ONEDNN
are all the same.
'''
place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
Expand Down Expand Up @@ -390,13 +390,13 @@ def check_output_with_option(
if (not use_gpu) and self.enable_mkldnn:
onednn_outputs = self._get_inference_outs(
self._get_analysis_config(
use_gpu=use_gpu, use_mkldnn=self.enable_mkldnn
use_gpu=use_gpu, use_onednn=self.enable_mkldnn
)
)

self.assertTrue(
len(paddle_outs) == len(onednn_outputs),
"The number of outputs is different between CPU and MKLDNN. ",
"The number of outputs is different between CPU and ONEDNN. ",
)

if self.enable_onednn_bfloat16:
Expand All @@ -407,7 +407,7 @@ def check_output_with_option(
onednn_output,
rtol=1e-05,
atol=atol,
err_msg='Output has diff between CPU and MKLDNN. ',
err_msg='Output has diff between CPU and ONEDNN. ',
)

class TensorRTParam:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def test_with_place(place, data_layout, shape):
"epsilon": epsilon,
"is_test": False,
"data_layout": data_layout,
"use_mkldnn": self.use_onednn,
"use_onednn": self.use_onednn,
"fuse_with_relu": self.fuse_with_relu,
"use_global_stats": self.use_global_stats,
}
Expand Down
6 changes: 3 additions & 3 deletions test/deprecated/legacy_test/test_layer_norm_op_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,10 @@ def check_forward_backward(
has_scale=True,
has_bias=True,
y_grad_scale=1.0,
use_mkldnn=False,
use_onednn=False,
):
def test_with_place(
place, shape, begin_norm_axis, use_mkldnn=use_mkldnn
place, shape, begin_norm_axis, use_onednn=use_onednn
):
# attr
epsilon = 0.00001
Expand Down Expand Up @@ -221,7 +221,7 @@ def test_with_place(
attrs={
"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis,
"use_mkldnn": use_mkldnn,
"use_onednn": use_onednn,
},
)
# generate backward op_desc
Expand Down
4 changes: 2 additions & 2 deletions test/deprecated/legacy_test/test_program_deprecated.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ class TestProgramProto(unittest.TestCase):
def test_update_op(self):
program = build_program()
a = program.desc.serialize_to_string()
program.current_block().ops[0]._set_attr('use_mkldnn', True)
program.current_block().ops[0]._set_attr('use_onednn', True)
self.assertTrue(program.desc.need_update())
b = program.desc.serialize_to_string()
self.assertFalse(a == b)
Expand Down Expand Up @@ -230,7 +230,7 @@ def test_program_update(self):
hash1 = program.desc.cached_hash_str()
id1 = id(program)
# change mul's attr
program.current_block().ops[0]._set_attr('use_mkldnn', True)
program.current_block().ops[0]._set_attr('use_onednn', True)
program.current_block().ops[0]._set_attr('scale_x', 2.0)
hash2 = program.desc.cached_hash_str()
id2 = id(program)
Expand Down
6 changes: 3 additions & 3 deletions test/deprecated/mkldnn/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
file(
GLOB TEST_MKLDNN_LISTS
GLOB TEST_ONEDNN_LISTS
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"test_*.py")
string(REPLACE ".py" "" TEST_MKLDNN_LISTS "${TEST_MKLDNN_LISTS}")
string(REPLACE ".py" "" TEST_ONEDNN_LISTS "${TEST_ONEDNN_LISTS}")
if(WIN32)
message(STATUS "Skip tests unrelated to onednn/mkldnn")
elseif(WITH_ONEDNN)
foreach(target ${TEST_MKLDNN_LISTS})
foreach(target ${TEST_ONEDNN_LISTS})
py_test_modules(${target} MODULES ${target})
set_tests_properties(${target} PROPERTIES LABELS "RUN_TYPE=INFER" TIMEOUT
120)
Expand Down
Loading
Loading