diff --git a/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc b/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc index bc69cb5e9bf7..86acac880cb2 100644 --- a/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc +++ b/src/operator/quantization/mkldnn/mkldnn_quantized_act.cc @@ -40,7 +40,7 @@ static void MKLDNNQuantizedActForward(const nnvm::NodeAttrs& attrs, << "_contrib_quantized_act op only supports uint8 and int8 as input " "type"; - MKLDNNActivationForward(attrs, ctx, in_data[0], req[0], out_data[0]); + MKLDNNRun(MKLDNNActivationForward, attrs, ctx, in_data[0], req[0], out_data[0]); out_data[1].data().dptr()[0] = in_data[1].data().dptr()[0]; out_data[2].data().dptr()[0] = in_data[2].data().dptr()[0]; } diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index f00caf32332d..9e63730ec001 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -289,7 +289,7 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, CHECK_EQ(outputs.size(), 1U); if (SupportMKLDNNTranspose(param, inputs[0]) && req[0] == kWriteTo) { - MKLDNNTransposeForward(attrs, ctx, inputs[0], req[0], outputs[0]); + MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(Transpose, attrs, ctx, inputs, req, outputs);