diff --git a/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/CMakeLists.txt b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/CMakeLists.txt
new file mode 100755
index 0000000000..4f148869f8
--- /dev/null
+++ b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/CMakeLists.txt
@@ -0,0 +1,215 @@
+project(clas_system CXX C)
+
+option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON)
+option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF)
+option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
+option(WITH_TENSORRT "Compile demo with TensorRT." OFF)
+
+SET(PADDLE_LIB "" CACHE PATH "Location of libraries")
+SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
+SET(CUDA_LIB "" CACHE PATH "Location of libraries")
+SET(CUDNN_LIB "" CACHE PATH "Location of libraries")
+SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
+
+set(DEMO_NAME "clas_system")
+
+
+macro(safe_set_static_flag)
+ foreach(flag_var
+ CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+ if(${flag_var} MATCHES "/MD")
+ string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
+ endif(${flag_var} MATCHES "/MD")
+ endforeach(flag_var)
+endmacro()
+
+if (WITH_MKL)
+ ADD_DEFINITIONS(-DUSE_MKL)
+endif()
+
+if(NOT DEFINED PADDLE_LIB)
+ message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib")
+endif()
+
+if(NOT DEFINED OPENCV_DIR)
+ message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
+endif()
+
+
+if (WIN32)
+ include_directories("${PADDLE_LIB}/paddle/fluid/inference")
+ include_directories("${PADDLE_LIB}/paddle/include")
+ link_directories("${PADDLE_LIB}/paddle/fluid/inference")
+ find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
+
+else ()
+ find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
+ include_directories("${PADDLE_LIB}/paddle/include")
+ link_directories("${PADDLE_LIB}/paddle/lib")
+endif ()
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+if (WIN32)
+ add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
+ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
+ set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
+ if (WITH_STATIC_LIB)
+ safe_set_static_flag()
+ add_definitions(-DSTATIC_LIB)
+ endif()
+else()
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o3 -std=c++11")
+ set(CMAKE_STATIC_LIBRARY_PREFIX "")
+endif()
+message("flags" ${CMAKE_CXX_FLAGS})
+
+
+if (WITH_GPU)
+ if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
+ message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda-8.0/lib64")
+ endif()
+ if (NOT WIN32)
+ if (NOT DEFINED CUDNN_LIB)
+ message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn_v7.4/cuda/lib64")
+ endif()
+ endif(NOT WIN32)
+endif()
+
+include_directories("${PADDLE_LIB}/third_party/install/protobuf/include")
+include_directories("${PADDLE_LIB}/third_party/install/glog/include")
+include_directories("${PADDLE_LIB}/third_party/install/gflags/include")
+include_directories("${PADDLE_LIB}/third_party/install/xxhash/include")
+include_directories("${PADDLE_LIB}/third_party/install/zlib/include")
+include_directories("${PADDLE_LIB}/third_party/boost")
+include_directories("${PADDLE_LIB}/third_party/eigen3")
+
+include_directories("${CMAKE_SOURCE_DIR}/")
+
+if (NOT WIN32)
+ if (WITH_TENSORRT AND WITH_GPU)
+ include_directories("${TENSORRT_DIR}/include")
+ link_directories("${TENSORRT_DIR}/lib")
+ endif()
+endif(NOT WIN32)
+
+link_directories("${PADDLE_LIB}/third_party/install/zlib/lib")
+
+link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib")
+link_directories("${PADDLE_LIB}/third_party/install/glog/lib")
+link_directories("${PADDLE_LIB}/third_party/install/gflags/lib")
+link_directories("${PADDLE_LIB}/third_party/install/xxhash/lib")
+link_directories("${PADDLE_LIB}/paddle/lib")
+
+
+if(WITH_MKL)
+ include_directories("${PADDLE_LIB}/third_party/install/mklml/include")
+ if (WIN32)
+ set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.lib
+ ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.lib)
+ else ()
+ set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
+ ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
+ execute_process(COMMAND cp -r ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
+ endif ()
+ set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn")
+ if(EXISTS ${MKLDNN_PATH})
+ include_directories("${MKLDNN_PATH}/include")
+ if (WIN32)
+ set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
+ else ()
+ set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
+ endif ()
+ endif()
+else()
+ if (WIN32)
+ set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else ()
+ set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif ()
+endif()
+
+# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
+if(WITH_STATIC_LIB)
+ if(WIN32)
+ set(DEPS
+ ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
+ else()
+ set(DEPS
+ ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif()
+else()
+ if(WIN32)
+ set(DEPS
+ ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
+ else()
+ set(DEPS
+ ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+endif(WITH_STATIC_LIB)
+
+if (NOT WIN32)
+ set(DEPS ${DEPS}
+ ${MATH_LIB} ${MKLDNN_LIB}
+ glog gflags protobuf z xxhash
+ )
+ if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
+ set(DEPS ${DEPS} snappystream)
+ endif()
+ if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
+ set(DEPS ${DEPS} snappy)
+ endif()
+else()
+ set(DEPS ${DEPS}
+ ${MATH_LIB} ${MKLDNN_LIB}
+ glog gflags_static libprotobuf xxhash)
+ set(DEPS ${DEPS} libcmt shlwapi)
+ if (EXISTS "${PADDLE_LIB}/third_party/install/snappy/lib")
+ set(DEPS ${DEPS} snappy)
+ endif()
+ if(EXISTS "${PADDLE_LIB}/third_party/install/snappystream/lib")
+ set(DEPS ${DEPS} snappystream)
+ endif()
+endif(NOT WIN32)
+
+
+if(WITH_GPU)
+ if(NOT WIN32)
+ if (WITH_TENSORRT)
+ set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
+ endif()
+ set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
+ set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
+ else()
+ set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
+ set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
+ set(DEPS ${DEPS} ${CUDNN_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
+ endif()
+endif()
+
+
+if (NOT WIN32)
+ set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
+ set(DEPS ${DEPS} ${EXTERNAL_LIB})
+endif()
+
+set(DEPS ${DEPS} ${OpenCV_LIBS})
+
+AUX_SOURCE_DIRECTORY(./src SRCS)
+add_executable(${DEMO_NAME} ${SRCS})
+
+target_link_libraries(${DEMO_NAME} ${DEPS})
+
+if (WIN32 AND WITH_MKL)
+ add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
+ )
+endif()
diff --git a/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/README.md b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/README.md
index e69de29bb2..8ccb26d808 100644
--- a/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/README.md
+++ b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/README.md
@@ -0,0 +1,243 @@
+# 服务器端C++预测
+
+本教程将介绍在服务器端部署mobilenet_v3_small模型的详细步骤。
+
+
+## 1. 准备环境
+
+### 运行准备
+- Linux环境,推荐使用docker[安装说明](https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/docker/linux-docker.html)。
+
+### 1.1 编译opencv库
+
+* 首先需要从opencv官网上下载Linux环境下的源码,以3.4.7版本为例,下载及解压缩命令如下:
+
+```
+wget https://github.com/opencv/opencv/archive/3.4.7.tar.gz
+tar -xvf 3.4.7.tar.gz
+```
+
+最终可以在当前目录下看到`opencv-3.4.7/`的文件夹。
+
+* 编译opencv,首先设置opencv源码路径(`root_path`)以及安装路径(`install_path`),`root_path`为下载的opencv源码路径,`install_path`为opencv的安装路径。在本例中,源码路径即为当前目录下的`opencv-3.4.7/`。
+
+```shell
+cd ./opencv-3.4.7
+export root_path=$PWD
+export install_path=${root_path}/opencv3
+```
+
+* 然后在opencv源码路径下,按照下面的命令进行编译。
+
+```shell
+rm -rf build
+mkdir build
+cd build
+
+cmake .. \
+ -DCMAKE_INSTALL_PREFIX=${install_path} \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DBUILD_SHARED_LIBS=OFF \
+ -DWITH_IPP=OFF \
+ -DBUILD_IPP_IW=OFF \
+ -DWITH_LAPACK=OFF \
+ -DWITH_EIGEN=OFF \
+ -DCMAKE_INSTALL_LIBDIR=lib64 \
+ -DWITH_ZLIB=ON \
+ -DBUILD_ZLIB=ON \
+ -DWITH_JPEG=ON \
+ -DBUILD_JPEG=ON \
+ -DWITH_PNG=ON \
+ -DBUILD_PNG=ON \
+ -DWITH_TIFF=ON \
+ -DBUILD_TIFF=ON
+
+make -j
+make install
+```
+
+* `make install`完成之后,会在该文件夹下生成opencv头文件和库文件,用于后面的代码编译。
+
+以opencv3.4.7版本为例,最终在安装路径下的文件结构如下所示。**注意**:不同的opencv版本,下述的文件结构可能不同。
+
+```
+opencv3/
+|-- bin :可执行文件
+|-- include :头文件
+|-- lib64 :库文件
+|-- share :部分第三方库
+```
+
+### 1.2 下载或者编译Paddle预测库
+
+* 有2种方式获取Paddle预测库,下面进行详细介绍。
+
+#### 1.2.1 预测库源码编译
+* 如果希望获取最新预测库特性,可以从Paddle github上克隆最新代码,源码编译预测库。
+* 可以参考[Paddle预测库官网](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)的说明,从github上获取Paddle代码,然后进行编译,生成最新的预测库。使用git获取代码方法如下。
+
+```shell
+git clone https://github.com/PaddlePaddle/Paddle.git
+```
+
+* 进入Paddle目录后,使用如下命令编译。
+
+```shell
+rm -rf build
+mkdir build
+cd build
+
+cmake .. \
+ -DWITH_CONTRIB=OFF \
+ -DWITH_MKL=ON \
+ -DWITH_MKLDNN=ON \
+ -DWITH_TESTING=OFF \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DWITH_INFERENCE_API_TEST=OFF \
+ -DON_INFER=ON \
+ -DWITH_PYTHON=ON
+make -j
+make inference_lib_dist
+```
+
+更多编译参数选项可以参考Paddle C++预测库官网:[https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/build_and_install_lib_cn.html#id16)。
+
+
+* 编译完成之后,可以在`build/paddle_inference_install_dir/`文件下看到生成了以下文件及文件夹。
+
+```
+build/paddle_inference_install_dir/
+|-- CMakeCache.txt
+|-- paddle
+|-- third_party
+|-- version.txt
+```
+
+其中`paddle`就是之后进行C++预测时所需的Paddle库,`version.txt`中包含当前预测库的版本信息。
+
+#### 1.2.2 直接下载安装
+
+* [Paddle预测库官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html)上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本。
+
+ 以`manylinux_cuda11.1_cudnn8.1_avx_mkl_trt7_gcc8.2`版本为例,使用下述命令下载并解压:
+
+
+```shell
+wget https://paddle-inference-lib.bj.bcebos.com/2.2.2/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda11.1_cudnn8.1.1_trt7.2.3.4/paddle_inference.tgz
+
+tar -xvf paddle_inference.tgz
+```
+
+
+最终会在当前的文件夹中生成`paddle_inference/`的子文件夹,文件内容和上述的paddle_inference_install_dir一样。
+
+
+## 2 开始运行
+
+### 2.1 将模型导出为inference model
+
+* 可以参考[模型导出](../../tools/export_model.py),导出`inference model`,用于模型预测。得到预测模型后,假设模型文件放在`inference`目录下,则目录结构如下。
+
+```
+mobilenet_v3_small_infer/
+|--inference.pdmodel
+|--inference.pdiparams
+|--inference.pdiparams.info
+```
+**注意**:上述文件中,`inference.pdmodel`文件存储了模型结构信息,`inference.pdiparams`文件存储了模型参数信息。注意两个文件的路径需要与配置文件`tools/config.txt`中的`cls_model_path`和`cls_params_path`参数对应一致。
+
+### 2.2 编译 C++预测demo
+
+* 编译命令如下,其中Paddle C++预测库、opencv等其他依赖库的地址需要换成自己机器上的实际地址。
+
+
+```shell
+sh tools/build.sh
+```
+
+具体地,`tools/build.sh`中内容如下。
+
+```shell
+OPENCV_DIR=your_opencv_dir
+LIB_DIR=your_paddle_inference_dir
+CUDA_LIB_DIR=your_cuda_lib_dir
+CUDNN_LIB_DIR=your_cudnn_lib_dir
+TENSORRT_DIR=your_tensorrt_lib_dir
+
+BUILD_DIR=build
+rm -rf ${BUILD_DIR}
+mkdir ${BUILD_DIR}
+cd ${BUILD_DIR}
+cmake .. \
+ -DPADDLE_LIB=${LIB_DIR} \
+ -DWITH_MKL=ON \
+ -DDEMO_NAME=clas_system \
+ -DWITH_GPU=OFF \
+ -DWITH_STATIC_LIB=OFF \
+ -DWITH_TENSORRT=OFF \
+ -DTENSORRT_DIR=${TENSORRT_DIR} \
+ -DOPENCV_DIR=${OPENCV_DIR} \
+ -DCUDNN_LIB=${CUDNN_LIB_DIR} \
+ -DCUDA_LIB=${CUDA_LIB_DIR} \
+
+make -j
+```
+
+上述命令中,
+
+* `OPENCV_DIR`为opencv编译安装的地址(本例中为`opencv-3.4.7/opencv3`文件夹的路径);
+
+* `LIB_DIR`为下载的Paddle预测库(`paddle_inference`文件夹),或编译生成的Paddle预测库(`build/paddle_inference_install_dir`文件夹)的路径;
+
+* `CUDA_LIB_DIR`为cuda库文件地址,在docker中为`/usr/local/cuda/lib64`;
+
+* `CUDNN_LIB_DIR`为cudnn库文件地址,在docker中为`/usr/lib64`。
+
+* `TENSORRT_DIR`是tensorrt库文件地址,在dokcer中为`/usr/local/TensorRT-7.2.3.4/`,TensorRT需要结合GPU使用。
+
+在执行上述命令,编译完成之后,会在当前路径下生成`build`文件夹,其中生成一个名为`clas_system`的可执行文件。
+
+
+### 运行demo
+* 首先修改`tools/config.txt`中对应字段:
+ * use_gpu:是否使用GPU;
+ * gpu_id:使用的GPU卡号;
+ * gpu_mem:显存;
+ * cpu_math_library_num_threads:底层科学计算库所用线程的数量;
+ * use_mkldnn:是否使用MKLDNN加速;
+ * use_tensorrt: 是否使用tensorRT进行加速;
+ * use_fp16:是否使用半精度浮点数进行计算,该选项仅在use_tensorrt为true时有效;
+ * cls_model_path:预测模型结构文件路径;
+ * cls_params_path:预测模型参数文件路径;
+ * resize_short_size:预处理时图像缩放大小;
+ * crop_size:预处理时图像裁剪后的大小。
+
+* 然后修改`tools/run.sh`:
+ * `./build/clas_system ./tools/config.txt ../../images/demo.jpg`
+ * 上述命令中分别为:编译得到的可执行文件`clas_system`;运行时的配置文件`config.txt`;待预测的图像。
+
+* 最后执行以下命令,完成对一幅图像的分类。
+
+```shell
+sh tools/run.sh
+```
+对于下面的图像进行预测
+
+
+

+
+
+* 最终屏幕上会输出结果,如下所示
+```
+class id: 8
+
+score: 0.9014717937
+
+Current image path: ../../images/demo.jpg
+
+Current time cost: 0.0473620000 s, average time cost in all: 0.0473620000 s.
+
+```
+
+表示预测的类别ID是`8`,置信度为`0.901`,该结果与基于训练引擎的结果完全一致。
+其中`class id`表示置信度最高的类别对应的id,score表示图片属于该类别的概率。
diff --git a/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls.h b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls.h
new file mode 100644
index 0000000000..f7a8711e7d
--- /dev/null
+++ b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include "opencv2/core.hpp"
+#include "opencv2/imgcodecs.hpp"
+#include "opencv2/imgproc.hpp"
+#include "paddle_inference_api.h"
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+#include
+
+using namespace paddle_infer;
+
+namespace MobileNetV3 {
+
+class Classifier {
+public:
+ explicit Classifier(const std::string &model_path,
+ const std::string ¶ms_path, const bool &use_gpu,
+ const int &gpu_id, const int &gpu_mem,
+ const int &cpu_math_library_num_threads,
+ const bool &use_mkldnn, const bool &use_tensorrt,
+ const bool &use_fp16, const int &resize_short_size,
+ const int &crop_size) {
+ this->use_gpu_ = use_gpu;
+ this->gpu_id_ = gpu_id;
+ this->gpu_mem_ = gpu_mem;
+ this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
+ this->use_mkldnn_ = use_mkldnn;
+ this->use_tensorrt_ = use_tensorrt;
+ this->use_fp16_ = use_fp16;
+
+ this->resize_short_size_ = resize_short_size;
+ this->crop_size_ = crop_size;
+
+ LoadModel(model_path, params_path);
+ }
+
+ // Load Paddle inference model
+ void LoadModel(const std::string &model_path, const std::string ¶ms_path);
+
+ // Run predictor
+ double Run(cv::Mat &img);
+
+private:
+ std::shared_ptr predictor_;
+
+ bool use_gpu_ = false;
+ int gpu_id_ = 0;
+ int gpu_mem_ = 4000;
+ int cpu_math_library_num_threads_ = 4;
+ bool use_mkldnn_ = false;
+ bool use_tensorrt_ = false;
+ bool use_fp16_ = false;
+
+ std::vector mean_ = {0.485f, 0.456f, 0.406f};
+ std::vector scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
+ bool is_scale_ = true;
+
+ int resize_short_size_ = 256;
+ int crop_size_ = 224;
+
+ // pre-process
+ ResizeImg resize_op_;
+ Normalize normalize_op_;
+ Permute permute_op_;
+ CenterCropImg crop_op_;
+};
+
+} // namespace MobileNetV3
diff --git a/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls_config.h b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls_config.h
new file mode 100644
index 0000000000..231738b4b5
--- /dev/null
+++ b/tutorials/mobilenetv3_prod/Step6/deploy/inference_cpp/include/cls_config.h
@@ -0,0 +1,88 @@
+// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include
+#include
+#include