Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
07b9be6
[API-Compat] paddle.compat.split is added and tested
Enigmatisms Aug 5, 2025
f5a1fda
[API-Compat] paddle.compat.split is rigorously tested
Enigmatisms Aug 6, 2025
21b8710
[API-Compat] Fixed erroneous func help doc
Enigmatisms Aug 6, 2025
ab94709
[API-Compat] Make the forbid_keywords decorator transparent
Enigmatisms Aug 6, 2025
5f0fd2a
[API-Compat] Fixed decorator str input
Enigmatisms Aug 6, 2025
5315118
[API-Compat] Fixed type annotation and removed legacy graph branch
Enigmatisms Aug 6, 2025
4fa3d1f
add slice manual with pr (#74445)
swgu98 Aug 7, 2025
1a7b798
【stride】fix copy_, set_value when dst is nullptr canot use stridecopy…
xiaoguoguo626807 Aug 7, 2025
8a39ceb
Parameter mapping from torch to paddle(broadcast_to) (#74449)
zhengshengning Aug 7, 2025
3c6512d
paddle/fluid/pybind/inference_api.cc add onednn [fluid_ops] (#74436)
co63oc Aug 7, 2025
daf95cf
build_strategy.cc modify mkldnn_enabled_op_types [fluid_ops] (#74417)
co63oc Aug 7, 2025
ddb38a9
fix argsort (#74434)
Difers Aug 7, 2025
7884e6c
split get_places function, add is_custom_device in op_test (#74363)
datutu-L Aug 7, 2025
1829d08
ADD Check chinese (#74460)
tianshuo78520a Aug 7, 2025
7108a15
op_compat.yaml add use_onednn (#74410)
co63oc Aug 7, 2025
e6b8ad1
[XPU] support python streams api for xpu (#73924)
runzhech Aug 7, 2025
455be69
Fix paddle.dist & paddle.nn.functional.normalize (#74448)
fxyfxy777 Aug 7, 2025
81baec3
fix bug of convert spliting to slice (#74462)
zyfncg Aug 7, 2025
326caed
【stride】revert (#74464)
xiaoguoguo626807 Aug 8, 2025
c64f903
Add graph API support for conv_transpose forward (#74431)
cszdrg Aug 8, 2025
7b4ba28
explanatory note for English (#74457)
zhengshengning Aug 8, 2025
abea8d2
Fix val warning (#74468)
co63oc Aug 8, 2025
cc6178e
[CINN] Align pow int for base=0 and exponent<0 (#74450)
lshpku Aug 8, 2025
7807dce
Fix SliceTensor to fix paddle.functional.diff for big tensor (#74451)
zrr1999 Aug 8, 2025
5a66737
Fix header include path for HIP float16.h (#74461)
feixi21 Aug 8, 2025
388f654
infermeta modify mkldnn_data_type [fluid_ops] (#74473)
co63oc Aug 8, 2025
0305d82
[API compatibility] add dtype conversion method (#74416)
zhiminzhang0830 Aug 8, 2025
d036c9c
[API compatibility] add msort api (#74421)
Starrysea996 Aug 8, 2025
fa91b4d
[API compatibility] add paddle.ravel (#74439)
Difers Aug 8, 2025
a888525
[Typo error] fix typo error conutn to count (#74475)
enkilee Aug 8, 2025
812f027
[Warning fix] fix warning for slice_utils.h (#74474)
enkilee Aug 8, 2025
d3cf775
Run XPU Paddlex (#74426)
tianshuo78520a Aug 8, 2025
f73a7c1
test directory modify mkldnn [fluid_ops] - part (#74458)
co63oc Aug 8, 2025
6d6e59f
add bypass slice (#74482)
swgu98 Aug 8, 2025
97947df
add paddle nn.functional.dropout1d api (#74444)
zhanghonggeng Aug 8, 2025
8f23ffb
Add Chinese documentation (#74453)
kjagsdq Aug 8, 2025
3ec6b4d
[API-Compat] More unittest & static graph check & updated decorator
Enigmatisms Aug 9, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .github/workflows/Slice-baseline.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,13 @@ name: Slice-baseline-paddle

on:
workflow_dispatch:
inputs:
PR_ID:
required: false
type: string
COMMIT_ID:
required: false
type: string
schedule:
- cron: '0 20 * * 0'

Expand Down Expand Up @@ -43,3 +50,5 @@ jobs:
docker_build_image: ${{ needs.build-docker.outputs.docker_build_image }}
slice-check: 'true'
SLICE_TEST_MODE: insert_baseline
MANUALLY_PR_ID: ${{ inputs.PR_ID }}
MANUALLY_COMMIT_ID: ${{ inputs.COMMIT_ID }}
2 changes: 1 addition & 1 deletion .github/workflows/_Linux-XPU.yml
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ jobs:
CCACHE_DIR: /root/.ccache
CCACHE_MAXSIZE: 150G
CCACHE_LIMIT_MULTIPLE: 0.8
IF_KUNLUN3: "OFF"
IF_KUNLUN3: "ON"
GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
home_dir: ${{ github.workspace }}/../../../..
FLAGS_use_stride_kernel: "0"
Expand Down
13 changes: 12 additions & 1 deletion .github/workflows/_Slice.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@ on:
type: string
required: false
default: 'paddle'
MANUALLY_PR_ID:
type: string
required: false
MANUALLY_COMMIT_ID:
type: string
required: false

env:
PR_ID: ${{ github.event.pull_request.number || '0' }}
Expand Down Expand Up @@ -47,6 +53,7 @@ jobs:

slice:
name: Slice test
needs: check-bypass
if: ${{ inputs.can-skip != 'true' && needs.check-bypass.outputs.can-skip != 'true' }}
runs-on:
group: slice
Expand Down Expand Up @@ -105,7 +112,11 @@ jobs:
if [[ "${{ inputs.SLICE_BENCHMARK_FRAMEWORKS }}" == "torch" ]];then
python3.10 -m pip install torch==2.6.0 torchvision==0.21.0 torchaudio==2.6.0 --index-url https://download.pytorch.org/whl/cu118
else
python3.10 -m pip install $wheel_link
if [[ "${{ inputs.MANUALLY_PR_ID }}" == "" ]]; then
python3.10 -m pip install $wheel_link
else
python3.10 -m pip install https://paddle-github-action.bj.bcebos.com/PR/build/${{ inputs.MANUALLY_PR_ID }}/${{ inputs.MANUALLY_COMMIT_ID }}/paddlepaddle_gpu-0.0.0-cp310-cp310-linux_x86_64.whl
fi
fi
python3.10 -m pip install -r PaddleTest/framework/e2e/api_benchmark/requirement.txt
cd PaddleTest/framework/slice_benchmark
Expand Down
6 changes: 6 additions & 0 deletions ci/check_approval.sh
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,12 @@ if [ "${ALL_PADDLE_ENFORCE}" != "" ] && [ "${PR_ID}" != "" ]; then
check_approval 1 luotao1 zhangbo9674 phlrain
fi

CHINESE_CHECK=$(git diff -U0 upstream/$BRANCH |grep "^+" |grep -P '[\p{Han}]')
if [ "${CHINESE_CHECK}" != "" ] && [ "${PR_ID}" != "" ]; then
echo_line="Not recommended to use Chinese. You must have one RD (tianshuo78520a or swgu98 or zhangbo9674 or risemeup1) approval."
check_approval 1 tianshuo78520a swgu98 zhangbo9674 risemeup1
fi

ALL_ADDED_LINES=$(git diff -U0 upstream/$BRANCH |grep "^+" || true)
ALL_PADDLE_CHECK=$(echo $ALL_ADDED_LINES |grep -zoE "(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\(.[^,\);]*.[^;]*\);\s" || true)
VALID_PADDLE_CHECK=$(echo "$ALL_PADDLE_CHECK" | grep -zoE '(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\(([^,;]+,)*[^";]*errors::.[^"]*".[^";]{20,}.[^;]*\);\s' || true)
Expand Down
3 changes: 2 additions & 1 deletion ci/kunlun_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,10 @@ set +x
git clone --depth 1000 https://gitee.com/paddlepaddle/PaddleX.git
cd PaddleX
pip install -e .
pip install numpy==1.24.4 pypdfium2

#install paddle x dependency
paddlex --install PaddleClas
paddlex --install PaddleClas -y

#download paddle dataset
wget -q https://paddle-model-ecology.bj.bcebos.com/paddlex/data/cls_flowers_examples.tar -P ./dataset
Expand Down
25 changes: 19 additions & 6 deletions cmake/cinn.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,21 @@ else()
endif()

if(NOT DEFINED ENV{runtime_include_dir})
message(
STATUS
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
add_definitions(
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
if(WITH_GPU)
message(
STATUS
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
add_definitions(
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/cuda")
elseif(WITH_ROCM)
message(
STATUS
"set runtime_include_dir: ${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
set(ENV{runtime_include_dir} "${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
add_definitions(
-DRUNTIME_INCLUDE_DIR="${CMAKE_SOURCE_DIR}/paddle/cinn/runtime/hip")
endif()
endif()

if(WITH_TESTING)
Expand Down Expand Up @@ -118,6 +127,10 @@ if(WITH_ROCM)
add_definitions(-DCINN_WITH_HIP)
endif()
link_libraries(${ROCM_HIPRTC_LIB})

message(
STATUS "copy paddle/cinn/common/float16.h to $ENV{runtime_include_dir}")
file(COPY paddle/cinn/common/float16.h DESTINATION $ENV{runtime_include_dir})
endif()

set(cinnapi_src CACHE INTERNAL "" FORCE)
Expand Down
6 changes: 6 additions & 0 deletions doc/README_cn.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# 致读者和开发者
感谢您阅读 PaddlePaddle 文档。

自 **2018年9月17日** 起,**0.15.0 及 develop** 分支的文档源码已迁移至 [FluidDoc Repo](https://github.com/PaddlePaddle/FluidDoc) 仓库 ,并将在该仓库中持续更新。

请前往 FluidDoc 仓库获取最新文档。
Original file line number Diff line number Diff line change
Expand Up @@ -742,9 +742,7 @@ class SplitOpPattern : public pir::OpRewritePattern<paddle::dialect::SplitOp> {
using pir::OpRewritePattern<paddle::dialect::SplitOp>::OpRewritePattern;

bool Match(paddle::dialect::SplitOp op) const override {
const bool is_denied = CompatibleInfo::IsDeniedForCinn(*op.operation());

return !is_denied && PatternConstraint(op);
return PatternConstraint(op);
}

void Rewrite(paddle::dialect::SplitOp op,
Expand Down
5 changes: 4 additions & 1 deletion paddle/cinn/runtime/cuda/cinn_cuda_runtime_source.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ extern "C" {

__device__ inline int FN_INT32(pow)(int a, int b) {
if (a == 0 && b < 0) {
return -1;
return 0;
}
float res = pow(__int2float_rd(a), __int2float_rd(b));
return __float2int_rn(res);
Expand Down Expand Up @@ -418,6 +418,9 @@ __device__ inline long long int FN_INT64(exp)(long long int a) {

__device__ inline long long int FN_INT64(pow)(long long int a,
long long int b) {
if (a == 0 && b < 0) {
return 0;
}
double res = pow(__ll2double_rd(a), __ll2double_rd(b));
return __double2ll_rn(res);
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/common/layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ inline DataLayout StringToDataLayout(const std::string& str) {
return DataLayout::kAnyLayout;
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else if (s == "ONEDNNLAYOUT") {
return DataLayout::ONEDNN;
} else if (s == "SPARSE_COO") {
return DataLayout::SPARSE_COO;
} else if (s == "SPARSE_CSR") {
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/build_strategy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ ir::Graph *BuildStrategy::Apply(ir::Graph *graph,
continue;
}
} else if (pass->Type() == "onednn_placement_pass") {
pass->Set("mkldnn_enabled_op_types",
pass->Set("onednn_enabled_op_types",
new std::unordered_set<std::string>(onednn_enabled_op_types_));
}
VLOG(1) << "Start Apply Pass " << pass->Type();
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/ir/onednn/onednn_placement_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ inline bool FoundPhiOneDNNKernelWithCorrectDataType(
return false;
}

bool MKLDNNPlacementPass::IsSupport(const Node* op) const {
bool ONEDNNPlacementPass::IsSupport(const Node* op) const {
if (FoundOneDNNKernelWithCorrectDataType(op) ||
FoundPhiOneDNNKernelWithCorrectDataType(op)) {
// For interpolate ops, there's a little difference between Paddle and
Expand All @@ -89,8 +89,8 @@ bool MKLDNNPlacementPass::IsSupport(const Node* op) const {

} // namespace paddle::framework::ir

REGISTER_PASS(onednn_placement_pass, paddle::framework::ir::MKLDNNPlacementPass)
.RequirePassAttr("mkldnn_enabled_op_types");
REGISTER_PASS(onednn_placement_pass, paddle::framework::ir::ONEDNNPlacementPass)
.RequirePassAttr("onednn_enabled_op_types");

REGISTER_PASS_CAPABILITY(onednn_placement_pass)
.AddCombination(
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/ir/onednn/onednn_placement_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,17 @@ namespace ir {
/*
* Specifies which operators should use MKLDNN.
*/
class MKLDNNPlacementPass : public PlacementPassBase {
class ONEDNNPlacementPass : public PlacementPassBase {
protected:
bool IsSupport(const Node* op) const override;

private:
const std::string GetPlacementName() const override { return "MKLDNN"; }
const std::string GetPlacementName() const override { return "ONEDNN"; }

const std::string GetAttrName() const override { return "use_mkldnn"; }

const std::unordered_set<std::string> GetOpTypesList() const override {
return Get<std::unordered_set<std::string>>("mkldnn_enabled_op_types");
return Get<std::unordered_set<std::string>>("onednn_enabled_op_types");
}
};

Expand Down
18 changes: 10 additions & 8 deletions paddle/fluid/framework/ir/onednn/onednn_placement_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class PlacementPassTest {

auto pass = PassRegistry::Instance().Get("onednn_placement_pass");

pass->Set("mkldnn_enabled_op_types",
pass->Set("onednn_enabled_op_types",
new std::unordered_set<std::string>(onednn_enabled_op_types));

graph.reset(pass->Apply(graph.release()));
Expand All @@ -143,8 +143,10 @@ class PlacementPassTest {
for (auto* node : graph->Nodes()) {
if (node->IsOp()) {
auto* op = node->Op();
if (op->HasAttr("use_mkldnn") &&
PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) {
if ((op->HasAttr("use_mkldnn") &&
PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) ||
(op->HasAttr("use_onednn") &&
PADDLE_GET_CONST(bool, op->GetAttr("use_onednn")))) {
++use_onednn_true_count;
}
}
Expand All @@ -156,27 +158,27 @@ class PlacementPassTest {
void PlacementNameTest() {
auto pass = PassRegistry::Instance().Get("onednn_placement_pass");
EXPECT_EQ(static_cast<PlacementPassBase*>(pass.get())->GetPlacementName(),
"MKLDNN");
"ONEDNN");
}
};

TEST(MKLDNNPlacementPass, enable_conv_relu) {
TEST(ONEDNNPlacementPass, enable_conv_relu) {
// 2 conv (1 conv is always true) + 2 relu (1 relu is always true) + 0 pool
PlacementPassTest().MainTest({"conv2d", "relu"}, 4);
}

TEST(MKLDNNPlacementPass, enable_relu_pool) {
TEST(ONEDNNPlacementPass, enable_relu_pool) {
// 1 conv (1 conv is always true) + 2 relu (1 relu is always true) + 1 pool
PlacementPassTest().MainTest({"relu", "pool2d"}, 4);
}

TEST(MKLDNNPlacementPass, enable_all) {
TEST(ONEDNNPlacementPass, enable_all) {
// 2 conv (1 conv is always true) + 2 relu (1 relu is always true) + 1 pool +
// 1 concat
PlacementPassTest().MainTest({}, 6);
}

TEST(MKLDNNPlacementPass, placement_name) {
TEST(ONEDNNPlacementPass, placement_name) {
PlacementPassTest().PlacementNameTest();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ IfInstruction::IfInstruction(size_t id,
outputs.emplace(value, GetValueIds(value, *value_exec_info));
}
if (value.use_count() > 0) {
VLOG(6) << "value " << i << " use conutn != 0";
VLOG(6) << "value " << i << " use count != 0";
is_last_op = false;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ CudaGraphInstruction::CudaGraphInstruction(
outputs.emplace(value, GetValueIds(value, *value_exec_info));
}
if (value.use_count() > 0) {
VLOG(6) << "value " << i << " use conutn != 0";
VLOG(6) << "value " << i << " use count != 0";
is_last_op = false;
}
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ inline void RegisterKernelClass(const char* op_type,
std::string library(library_type);
std::string data_layout = "ANYLAYOUT";
if (library == "MKLDNN") {
data_layout = "MKLDNNLAYOUT";
data_layout = "ONEDNNLAYOUT";
}
#ifdef PADDLE_WITH_CUSTOM_DEVICE
if (std::is_same<PlaceType, phi::CustomPlace>::value) {
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/inference/analysis/argument.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,12 +193,12 @@ struct Argument {
// whether to mute all logs in inference.
DECL_ARGUMENT_FIELD(disable_logs, DisableLogs, bool);

// Pass a set of op types to enable its mkldnn kernel
DECL_ARGUMENT_FIELD(mkldnn_enabled_op_types,
MKLDNNEnabledOpTypes,
// Pass a set of op types to enable its onednn kernel
DECL_ARGUMENT_FIELD(onednn_enabled_op_types,
ONEDNNEnabledOpTypes,
std::unordered_set<std::string>);
// The cache capacity of different input shapes for mkldnn.
DECL_ARGUMENT_FIELD(mkldnn_cache_capacity, MkldnnCacheCapacity, int);
// The cache capacity of different input shapes for onednn.
DECL_ARGUMENT_FIELD(mkldnn_cache_capacity, OnednnCacheCapacity, int);

#ifdef PADDLE_WITH_DNNL
// A set of op types to enable their quantized kernels
Expand All @@ -219,7 +219,7 @@ struct Argument {
Bfloat16EnabledOpTypes,
std::unordered_set<std::string>);

DECL_ARGUMENT_FIELD(use_onednn_int8, UseMkldnnInt8, bool);
DECL_ARGUMENT_FIELD(use_onednn_int8, UseOnednnInt8, bool);
#endif

// Passed from config.
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/analysis/ir_pass_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,9 @@ void IRPassManager::CreatePasses(Argument *argument,
pass->Set("optim_cache_dir", new std::string(std::move(optim_cache_dir)));
pass_num++;
} else if (pass_name == "onednn_placement_pass") {
pass->Set("mkldnn_enabled_op_types",
pass->Set("onednn_enabled_op_types",
new std::unordered_set<std::string>(
argument->mkldnn_enabled_op_types()));
argument->onednn_enabled_op_types()));
} else if (pass_name == "cudnn_placement_pass") {
pass->Set("cudnn_enabled_op_types",
new std::unordered_set<std::string>());
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1031,8 +1031,8 @@ void AnalysisPredictor::OptimizeInferencePirProgram() {
}
#endif
#ifdef PADDLE_WITH_DNNL
} else if (config_.mkldnn_enabled()) {
// mkldnn
} else if (config_.onednn_enabled()) {
// onednn
pir::IrContext *ctx = pir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::OneDNNOperatorDialect>();
if (!config_.custom_pass_only_) {
Expand Down Expand Up @@ -2100,9 +2100,9 @@ void AnalysisPredictor::PrepareArgument() {
argument_->SetIpuCustomPatterns(config_.ipu_custom_patterns_);
#endif

if (config_.mkldnn_enabled() && !config_.use_gpu()) {
LOG(INFO) << "MKLDNN is enabled";
argument_->SetMKLDNNEnabledOpTypes(config_.onednn_enabled_op_types_);
if (config_.onednn_enabled() && !config_.use_gpu()) {
LOG(INFO) << "ONEDNN is enabled";
argument_->SetONEDNNEnabledOpTypes(config_.onednn_enabled_op_types_);
}

if (config_.cinn_enabled()) {
Expand All @@ -2115,7 +2115,7 @@ void AnalysisPredictor::PrepareArgument() {
argument_->SetBfloat16EnabledOpTypes(config_.bfloat16_enabled_op_types_);
}

if (config_.mkldnn_int8_enabled()) {
if (config_.onednn_int8_enabled()) {
LOG(INFO) << "Int8 is enabled";
argument_->SetQuantizeEnabledOpTypes(config_.quantize_enabled_op_types_);
argument_->SetQuantizeExcludedOpIds(config_.quantize_excluded_op_ids_);
Expand Down Expand Up @@ -2296,7 +2296,7 @@ void AnalysisPredictor::OptimizeInferenceProgram() {
#if defined(_WIN32)
argument_->PartiallyRelease();
#else
if (config_.mkldnn_enabled() ||
if (config_.onednn_enabled() ||
(config_.tensorrt_engine_enabled() &&
config_.tensorrt_precision_mode_ ==
AnalysisConfig::Precision::kInt8)) { // NOLINT
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/capi/pd_config.cc
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ bool PD_OnednnEnabled(const PD_AnalysisConfig* config) {
config,
common::errors::InvalidArgument(
"The pointer of analysis configuration shouldn't be nullptr"));
return config->config.mkldnn_enabled();
return config->config.onednn_enabled();
}

void PD_SetCpuMathLibraryNumThreads(PD_AnalysisConfig* config,
Expand Down
Loading