diff --git a/3rdparty/onnx-tensorrt b/3rdparty/onnx-tensorrt index f4745fcaff86..2eb74d933f89 160000 --- a/3rdparty/onnx-tensorrt +++ b/3rdparty/onnx-tensorrt @@ -1 +1 @@ -Subproject commit f4745fcaff868a519834917c657f105a8eef2f53 +Subproject commit 2eb74d933f89e1590fdbfc64971a36e5f72df720 diff --git a/CMakeLists.txt b/CMakeLists.txt index 162eeeade385..2e9c44b78189 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -228,6 +228,7 @@ if(USE_TENSORRT) include_directories(3rdparty/onnx-tensorrt/third_party/onnx/) add_definitions(-DMXNET_USE_TENSORRT=1) add_definitions(-DONNX_NAMESPACE=onnx) + add_definitions(-DONNX_ML=1) find_package(Protobuf REQUIRED) @@ -237,14 +238,11 @@ if(USE_TENSORRT) find_library(ONNX_PROTO_LIBRARY NAMES libonnx_proto.so REQUIRED PATHS ${ONNX_PATH} DOC "Path to onnx_proto library.") - find_library(ONNX_TRT_RUNTIME_LIBRARY NAMES libnvonnxparser_runtime.so REQUIRED - PATHS ${ONNX_TRT_PATH} - DOC "Path to onnx_proto library.") find_library(ONNX_TRT_PARSER_LIBRARY NAMES libnvonnxparser.so REQUIRED PATHS ${ONNX_TRT_PATH} - DOC "Path to onnx_proto library.") + DOC "Path to onnx_proto parser library.") - list(APPEND mxnet_LINKER_LIBS libnvinfer.so ${ONNX_TRT_PARSER_LIBRARY} ${ONNX_TRT_RUNTIME_LIBRARY} + list(APPEND mxnet_LINKER_LIBS libnvinfer.so ${ONNX_TRT_PARSER_LIBRARY} ${ONNX_PROTO_LIBRARY} ${ONNX_LIBRARY} ${PROTOBUF_LIBRARY}) endif() diff --git a/ci/docker/Dockerfile.build.ubuntu b/ci/docker/Dockerfile.build.ubuntu index c9ec3f5a04fc..4ac3f9149ea7 100644 --- a/ci/docker/Dockerfile.build.ubuntu +++ b/ci/docker/Dockerfile.build.ubuntu @@ -139,15 +139,22 @@ ARG BASE_IMAGE RUN export SHORT_CUDA_VERSION=${CUDA_VERSION%.*} && \ apt-get update && \ if [ ${SHORT_CUDA_VERSION} = 10.0 ]; then \ - apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.0"; \ + TRT_VERSION="7.0.0-1+cuda10.0"; \ + TRT_MAJOR_VERSION=7; \ elif [ ${SHORT_CUDA_VERSION} = 10.1 ]; then \ - apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.1"; \ + TRT_VERSION="6.0.1-1+cuda10.1"; \ + TRT_MAJOR_VERSION=6; \ elif [ ${SHORT_CUDA_VERSION} = 10.2 ]; then \ - apt-get install -y "libnvinfer-dev=6.0.1-1+cuda10.2"; \ + TRT_VERSION="7.0.0-1+cuda10.2"; \ + TRT_MAJOR_VERSION=7; \ else \ echo "ERROR: Cuda ${SHORT_CUDA_VERSION} not yet supported in Dockerfile.build.ubuntu"; \ exit 1; \ fi && \ + apt-get install -y libnvinfer${TRT_MAJOR_VERSION}=${TRT_VERSION} \ + libnvinfer-dev=${TRT_VERSION} \ + libnvinfer-plugin${TRT_MAJOR_VERSION}=${TRT_VERSION} \ + libnvinfer-plugin-dev=${TRT_VERSION} && \ rm -rf /var/lib/apt/lists/* FROM gpu as gpuwithcudaruntimelibs diff --git a/ci/docker/docker-compose.yml b/ci/docker/docker-compose.yml index cced098d7f11..865abc128167 100644 --- a/ci/docker/docker-compose.yml +++ b/ci/docker/docker-compose.yml @@ -108,6 +108,16 @@ services: BASE_IMAGE: nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 cache_from: - ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu101:latest + ubuntu_gpu_cu102: + image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu102:latest + build: + context: . + dockerfile: Dockerfile.build.ubuntu + target: gpu + args: + BASE_IMAGE: nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04 + cache_from: + - ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu102:latest ubuntu_build_cuda: image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_build_cuda:latest build: diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index 8752856177ea..5fb30279ea8a 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -544,6 +544,7 @@ build_ubuntu_gpu_tensorrt() { export CC=gcc-7 export CXX=g++-7 + export ONNX_NAMESPACE=onnx # Build ONNX pushd . @@ -552,29 +553,29 @@ build_ubuntu_gpu_tensorrt() { rm -rf build mkdir -p build cd build - cmake -DBUILD_SHARED_LIBS=ON -GNinja .. - ninja onnx/onnx.proto - ninja + cmake -DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER} -DBUILD_SHARED_LIBS=ON .. + make -j$(nproc) export LIBRARY_PATH=`pwd`:`pwd`/onnx/:$LIBRARY_PATH export CPLUS_INCLUDE_PATH=`pwd`:$CPLUS_INCLUDE_PATH + export CXXFLAGS=-I`pwd` + popd # Build ONNX-TensorRT export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib - export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.1/targets/x86_64-linux/include/ + export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.2/targets/x86_64-linux/include/ pushd . cd 3rdparty/onnx-tensorrt/ mkdir -p build cd build - cmake .. + cmake -DONNX_NAMESPACE=$ONNX_NAMESPACE .. make -j$(nproc) export LIBRARY_PATH=`pwd`:$LIBRARY_PATH popd mkdir -p /work/mxnet/lib/ cp 3rdparty/onnx-tensorrt/third_party/onnx/build/*.so /work/mxnet/lib/ - cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser_runtime.so.0 /work/mxnet/lib/ - cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so.0 /work/mxnet/lib/ + cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so /work/mxnet/lib/ cd /work/build cmake -DUSE_CUDA=1 \ diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index 8079420dd794..e2b0b04dea41 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -278,7 +278,7 @@ def compile_unix_tensorrt_gpu(lib_name) { ws('workspace/build-tensorrt') { timeout(time: max_time, unit: 'MINUTES') { utils.init_git() - utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_tensorrt', false) + utils.docker_run('ubuntu_gpu_cu102', 'build_ubuntu_gpu_tensorrt', false) utils.pack_lib(lib_name, mx_tensorrt_lib) } } diff --git a/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc b/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc index b02d1094183f..d82f7544a091 100644 --- a/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc +++ b/src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc @@ -35,13 +35,9 @@ #include #include #include -#include #include #include -#include -#include - using std::cout; using std::cerr; using std::endl;