diff --git a/Makefile b/Makefile index fcef885bee0a..19aab2885e68 100644 --- a/Makefile +++ b/Makefile @@ -453,7 +453,7 @@ ifeq ($(USE_DIST_KVSTORE), 1) LDFLAGS += $(PS_LDFLAGS_A) endif -.PHONY: clean all extra-packages test lint docs clean_all rcpplint rcppexport roxygen\ +.PHONY: clean all extra-packages test lint clean_all rcpplint rcppexport roxygen\ cython2 cython3 cython cyclean all: lib/libmxnet.a lib/libmxnet.so $(BIN) extra-packages sample_lib @@ -664,20 +664,6 @@ cpplint: pylint: python3 -m pylint --rcfile=$(ROOTDIR)/ci/other/pylintrc --ignore-patterns=".*\.so$$,.*\.dll$$,.*\.dylib$$" python/mxnet tools/caffe_converter/*.py -sample_lib: - $(CXX) -shared -fPIC example/lib_api/mylib.cc -o libsample_lib.so -I include/mxnet - -doc: docs - -docs: - make -C docs html - -clean_docs: - make -C docs clean - -doxygen: - doxygen docs/Doxyfile - # Cython build cython: cd python; $(PYTHON) setup.py build_ext --inplace --with-cython @@ -727,6 +713,10 @@ rpkgtest: Rscript -e 'require(testthat);res<-test_dir("R-package/tests/testthat");if(!testthat:::all_passed(res)){stop("Test failures", call. = FALSE)}' Rscript -e 'res<-covr:::package_coverage("R-package");fileConn<-file(paste("r-package_coverage_",toString(runif(1)),".json"));writeLines(covr:::to_codecov(res), fileConn);close(fileConn)' + +sample_lib: + $(CXX) -shared -fPIC example/lib_api/mylib.cc -o libsample_lib.so -I include/mxnet + scalaclean: (cd $(ROOTDIR)/scala-package && mvn clean) diff --git a/R-package/DESCRIPTION b/R-package/DESCRIPTION index b124877c8d3c..06a9bb03b6ae 100644 --- a/R-package/DESCRIPTION +++ b/R-package/DESCRIPTION @@ -33,3 +33,4 @@ Depends: LinkingTo: Rcpp VignetteBuilder: knitr RoxygenNote: 6.1.1 +Encoding: UTF-8 diff --git a/R-package/src/export.cc b/R-package/src/export.cc index e1a8be5536a1..ae64a1761966 100644 --- a/R-package/src/export.cc +++ b/R-package/src/export.cc @@ -76,6 +76,15 @@ std::string ExportDocString(const std::string& docstring) { return os.str(); } +std::string ReplaceAll(std::string str, const std::string& from, const std::string& to) { + size_t start_pos = 0; + while ((start_pos = str.find(from, start_pos)) != std::string::npos) { + str.replace(start_pos, from.length(), to); + start_pos += to.length(); // Handles case where 'to' is a substring of 'from' + } + return str; +} + void ExportVArgFunction(std::ostream& os, // NOLINT(*) const std::string& func_name, const std::string& docstr) { @@ -118,6 +127,10 @@ void Exporter::Export(const std::string& path) { || fname == "mx.varg.symbol.min") continue; Rcpp::List func_info(scope->get_function(fname)); std::string docstr = Rcpp::as(func_info[2]); + + docstr = ReplaceAll(docstr, std::string("\a"), std::string("\\a")); + docstr = ReplaceAll(docstr, std::string("\b"), std::string("\\b")); + if (docstr.find("@export") == std::string::npos) continue; if (fname.find("mx.varg.") == 0) { ExportVArgFunction(script, fname, docstr); diff --git a/ci/Jenkinsfile_utils.groovy b/ci/Jenkinsfile_utils.groovy index a5fe0c8a04b8..7fbf3692f6bc 100644 --- a/ci/Jenkinsfile_utils.groovy +++ b/ci/Jenkinsfile_utils.groovy @@ -67,7 +67,7 @@ def pack_lib(name, libs, include_gcov_data = false) { sh returnStatus: true, script: """ set +e echo "Packing ${libs} into ${name}" -for i in \$(echo ${libs} | sed -e 's/,/ /g'); do md5sum \$i; done +for i in \$(echo ${libs} | sed -e 's/,/ /g'); do md5sum \$i; ls -lh \$i; done return 0 """ stash includes: libs, name: name @@ -114,7 +114,7 @@ def get_git_commit_hash() { def publish_test_coverage() { // CodeCovs auto detection has trouble with our CIs PR validation due the merging strategy git_commit_hash = get_git_commit_hash() - + if (env.CHANGE_ID) { // PR execution codecovArgs = "-B ${env.CHANGE_TARGET} -C ${git_commit_hash} -P ${env.CHANGE_ID}" @@ -168,9 +168,9 @@ def get_repo_url() { def update_github_commit_status(state, message) { node(NODE_UTILITY) { // NOTE: https://issues.jenkins-ci.org/browse/JENKINS-39482 - //The GitHubCommitStatusSetter requires that the Git Server is defined under - //*Manage Jenkins > Configure System > GitHub > GitHub Servers*. - //Otherwise the GitHubCommitStatusSetter is not able to resolve the repository name + //The GitHubCommitStatusSetter requires that the Git Server is defined under + //*Manage Jenkins > Configure System > GitHub > GitHub Servers*. + //Otherwise the GitHubCommitStatusSetter is not able to resolve the repository name //properly and you would see an empty list of repos: //[Set GitHub commit status (universal)] PENDING on repos [] (sha:xxxxxxx) with context:test/mycontext //See https://cwiki.apache.org/confluence/display/MXNET/Troubleshooting#Troubleshooting-GitHubcommit/PRstatusdoesnotgetpublished @@ -182,7 +182,7 @@ def update_github_commit_status(state, message) { commitSha = get_git_commit_hash() echo "commitSha=${commitSha}" - + context = get_github_context() echo "context=${context}" @@ -216,29 +216,29 @@ def update_github_commit_status(state, message) { def get_github_context() { // Since we use multi-branch pipelines, Jenkins appends the branch name to the job name if (env.BRANCH_NAME) { - short_job_name = JOB_NAME.substring(0, JOB_NAME.lastIndexOf('/')) + short_job_name = JOB_NAME.substring(0, JOB_NAME.lastIndexOf('/')) } else { short_job_name = JOB_NAME } - + return "ci/jenkins/${short_job_name}" } def parallel_stage(stage_name, steps) { // Allow to pass an array of steps that will be executed in parallel in a stage new_map = [:] - + for (def step in steps) { new_map = new_map << step } - + stage(stage_name) { parallel new_map } } def assign_node_labels(args) { - // This function allows to assign instance labels to the generalized placeholders. + // This function allows to assign instance labels to the generalized placeholders. // This serves two purposes: // 1. Allow generalized placeholders (e.g. NODE_WINDOWS_CPU) in the job definition // in order to abstract away the underlying node label. This allows to schedule a job @@ -263,7 +263,7 @@ def main_wrapper(args) { // args: // - core_logic: Jenkins pipeline containing core execution logic // - failure_handler: Failure handler - + // assign any caught errors here err = null try { diff --git a/ci/docker/Dockerfile.build.test.arm_qemu b/ci/docker/Dockerfile.build.test.arm_qemu index 68891a72c284..5dc610a524b0 100644 --- a/ci/docker/Dockerfile.build.test.arm_qemu +++ b/ci/docker/Dockerfile.build.test.arm_qemu @@ -24,6 +24,7 @@ WORKDIR /work RUN apt-get update COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_arm_qemu.sh /work diff --git a/ci/docker/Dockerfile.build.ubuntu_blc b/ci/docker/Dockerfile.build.ubuntu_blc index 294740ce1392..a413648366b2 100644 --- a/ci/docker/Dockerfile.build.ubuntu_blc +++ b/ci/docker/Dockerfile.build.ubuntu_blc @@ -25,6 +25,7 @@ WORKDIR /work/deps COPY install/ubuntu_core.sh /work/ RUN /work/ubuntu_core.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_npm_blc.sh /work/ RUN /work/ubuntu_npm_blc.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_build_cuda b/ci/docker/Dockerfile.build.ubuntu_build_cuda index 47f1d1f9ca58..e085c2dc09a0 100644 --- a/ci/docker/Dockerfile.build.ubuntu_build_cuda +++ b/ci/docker/Dockerfile.build.ubuntu_build_cuda @@ -30,6 +30,7 @@ RUN /work/ubuntu_core.sh COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ COPY install/sbt.gpg /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu b/ci/docker/Dockerfile.build.ubuntu_cpu index 35dcf3ed7410..f41d629289a7 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_cpu @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -67,7 +68,6 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ RUN /work/ubuntu_docs.sh # Always last diff --git a/docs/api/r/Makefile b/ci/docker/Dockerfile.build.ubuntu_cpu_c similarity index 56% rename from docs/api/r/Makefile rename to ci/docker/Dockerfile.build.ubuntu_cpu_c index 435345daa3d3..c7969da1bb1d 100644 --- a/docs/api/r/Makefile +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_c @@ -1,3 +1,4 @@ +# -*- mode: dockerfile -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -14,21 +15,21 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh -# This is the makefile for compiling Rmarkdown files into the md file with results. -PKGROOT=../../R-package +RUN apt-get update && apt-get install -y doxygen graphviz -# ADD The Markdown to be built here, with suffix md -classifyRealImageWithPretrainedModel.md: -mnistCompetition.md: -ndarrayAndSymbolTutorial.md: -fiveMinutesNeuralNetwork.md: +COPY runtime_functions.sh /work/ -# General Rules for build rmarkdowns, need knitr -%.md: $(PKGROOT)/vignettes/%.Rmd - rm -rf "../../web-data/mxnet/knitr/$(basename $@)-"*; - Rscript -e \ - "require(knitr);"\ - "knitr::opts_knit\$$set(root.dir=\".\");"\ - "knitr::opts_chunk\$$set(fig.path=\"../../web-data/mxnet/knitr/$(basename $@)-\");"\ - "knitr::knit(\"$+\")" +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll new file mode 100644 index 000000000000..62db71e2429e --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_jekyll @@ -0,0 +1,68 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +SHELL ["/bin/bash", "-l", "-c" ] + +RUN apt-get update && apt-get install -y \ + build-essential \ + git \ + zlib1g-dev \ + gnupg2 \ + curl + +# Always last, except here to prevent conflicts with rvm +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +RUN curl -sSL https://rvm.io/mpapis.asc | gpg2 --import - && \ + curl -sSL https://rvm.io/pkuczynski.asc | gpg2 --import - && \ + curl -sSL https://get.rvm.io | bash -s stable + +RUN source /etc/profile.d/rvm.sh && \ + rvm requirements && \ + rvm install 2.6 && \ + rvm use 2.6.3 --default + +ENV BUNDLE_HOME=/work/deps/bundle +ENV BUNDLE_APP_CONFIG=/work/deps/bundle +ENV BUNDLE_BIN=/work/deps/bundle/bin +ENV GEM_BIN=/work/deps/gem/bin +ENV GEM_HOME=/work/deps/gem + +RUN echo "gem: --no-ri --no-rdoc" > ~/.gemrc +RUN yes | gem update --system +RUN yes | gem install --force bundler +RUN gem install jekyll + +ENV PATH=$BUNDLE_BIN:$GEM_BIN:$PATH + +COPY runtime_functions.sh /work/ + +RUN chown -R jenkins_slave /work/ && \ + chown -R jenkins_slave /usr/local/bin && \ + chown -R jenkins_slave /usr/local/rvm + +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_julia b/ci/docker/Dockerfile.build.ubuntu_cpu_julia new file mode 100644 index 000000000000..108869b680cd --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_julia @@ -0,0 +1,81 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ +RUN /work/ubuntu_python.sh + +COPY install/ubuntu_scala.sh /work/ +COPY install/sbt.gpg /work/ +RUN /work/ubuntu_scala.sh + +COPY install/ubuntu_clojure.sh /work/ +RUN /work/ubuntu_clojure.sh + +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + +COPY install/ubuntu_perl.sh /work/ +RUN /work/ubuntu_perl.sh + +COPY install/ubuntu_julia.sh /work/ +RUN /work/ubuntu_julia.sh + +COPY install/ubuntu_clang.sh /work/ +RUN /work/ubuntu_clang.sh + +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + +COPY install/ubuntu_mkl.sh /work/ +RUN /work/ubuntu_mkl.sh + +COPY install/ubuntu_mklml.sh /work/ +RUN /work/ubuntu_mklml.sh + +COPY install/ubuntu_caffe.sh /work/ +RUN /work/ubuntu_caffe.sh + +COPY install/ubuntu_onnx.sh /work/ +RUN /work/ubuntu_onnx.sh + +COPY install/ubuntu_docs.sh /work/ +RUN /work/ubuntu_docs.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_lite b/ci/docker/Dockerfile.build.ubuntu_cpu_lite new file mode 100644 index 000000000000..ca5618ac1cd7 --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_lite @@ -0,0 +1,45 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_clang.sh /work/ +RUN /work/ubuntu_clang.sh + +COPY install/ubuntu_gcc8.sh /work/ +RUN /work/ubuntu_gcc8.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_python b/ci/docker/Dockerfile.build.ubuntu_cpu_python new file mode 100644 index 000000000000..6b217d4d341d --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_python @@ -0,0 +1,49 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ +RUN /work/ubuntu_python.sh + +COPY install/ubuntu_onnx.sh /work/ +RUN /work/ubuntu_onnx.sh + +COPY install/ubuntu_docs.sh /work/ +RUN /work/ubuntu_docs.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_r b/ci/docker/Dockerfile.build.ubuntu_cpu_r new file mode 100644 index 000000000000..264d34cd6422 --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_r @@ -0,0 +1,43 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_r.sh /work/ +COPY install/r.gpg /work/ +RUN /work/ubuntu_r.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_scala b/ci/docker/Dockerfile.build.ubuntu_cpu_scala new file mode 100644 index 000000000000..38874d290e1d --- /dev/null +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_scala @@ -0,0 +1,50 @@ +# -*- mode: dockerfile -*- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# Dockerfile to build and run MXNet on Ubuntu 16.04 for CPU + +FROM ubuntu:16.04 + +WORKDIR /work/deps + +COPY install/ubuntu_core.sh /work/ +RUN /work/ubuntu_core.sh + +COPY install/deb_ubuntu_ccache.sh /work/ +RUN /work/deb_ubuntu_ccache.sh + +COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ +RUN /work/ubuntu_python.sh + +COPY install/ubuntu_scala.sh /work/ +COPY install/sbt.gpg /work/ +RUN /work/ubuntu_scala.sh + +COPY install/ubuntu_clojure.sh /work/ +RUN /work/ubuntu_clojure.sh + +# Always last +ARG USER_ID=0 +ARG GROUP_ID=0 +COPY install/ubuntu_adduser.sh /work/ +RUN /work/ubuntu_adduser.sh + +COPY runtime_functions.sh /work/ + +WORKDIR /work/mxnet \ No newline at end of file diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 index 46d27e35022b..894930bc6303 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -61,7 +62,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index 21a938f8f84a..9699b37aa45f 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -61,7 +62,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 index 9c7a8084b093..a1031af811cf 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -61,7 +62,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 index 19530a212424..56ebd55c94e0 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -61,7 +62,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 index f239eec4af27..f6008a5c09ca 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -61,7 +62,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt b/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt index 09591ee0b9c9..90bd772ecb17 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt @@ -27,6 +27,7 @@ RUN /work/ubuntu_core.sh COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/tensorrt.sh /work RUN /work/tensorrt.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu index 1b126c18be47..8e36a74cbde7 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu @@ -29,6 +29,7 @@ COPY install/deb_ubuntu_ccache.sh /work/ RUN /work/deb_ubuntu_ccache.sh COPY install/ubuntu_python.sh /work/ +COPY install/requirements /work/ RUN /work/ubuntu_python.sh COPY install/ubuntu_scala.sh /work/ @@ -55,7 +56,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_nightly_tests.sh /work/ diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu index 275a5a54fc66..ec811e5202ab 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu @@ -61,7 +61,7 @@ COPY install/ubuntu_onnx.sh /work/ RUN /work/ubuntu_onnx.sh COPY install/ubuntu_docs.sh /work/ -COPY install/docs_requirements /work/ +COPY install/requirements /work/ RUN /work/ubuntu_docs.sh COPY install/ubuntu_tutorials.sh /work/ diff --git a/ci/docker/install/docs_requirements b/ci/docker/install/requirements similarity index 91% rename from ci/docker/install/docs_requirements rename to ci/docker/install/requirements index f78dca2bc655..cbfc521e2c08 100644 --- a/ci/docker/install/docs_requirements +++ b/ci/docker/install/requirements @@ -18,19 +18,16 @@ # build and install are separated so changes to build don't invalidate # the whole docker cache for the image -beautifulsoup4==4.6.3 -breathe==4.10.0 +boto3==1.9.229 cpplint==1.3.0 -CommonMark==0.5.4 +Cython==0.29.7 +decorator==4.4.0 h5py==2.8.0rc1 mock==2.0.0 nose==1.3.7 nose-timer==0.7.3 numpy>1.16.0,<2.0.0 pylint==2.3.1; python_version >= '3.0' -pypandoc==1.4 -recommonmark==0.4.0 requests<2.19.0,>=2.18.4 scipy==1.0.1 six==1.11.0 -sphinx==1.5.6 diff --git a/ci/docker/install/ubuntu_clojure.sh b/ci/docker/install/ubuntu_clojure.sh index b20e35898552..dcaae6ba048d 100755 --- a/ci/docker/install/ubuntu_clojure.sh +++ b/ci/docker/install/ubuntu_clojure.sh @@ -24,7 +24,7 @@ set -ex # install libraries for mxnet's clojure package on ubuntu echo 'Installing Clojure...' -wget https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein +wget -q https://raw.githubusercontent.com/technomancy/leiningen/stable/bin/lein chmod 775 lein sudo cp lein /usr/local/bin echo "Y" | sudo lein downgrade 2.8.3 diff --git a/ci/docker/install/ubuntu_docs.sh b/ci/docker/install/ubuntu_docs.sh index 5dc201c15b52..10a0016a4125 100755 --- a/ci/docker/install/ubuntu_docs.sh +++ b/ci/docker/install/ubuntu_docs.sh @@ -28,7 +28,9 @@ apt-get install -y \ doxygen \ pandoc -pip3 install -r /work/docs_requirements -pip2 install -r /work/docs_requirements +# Can probably delete these and docs_requirements +wget -q https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh +chmod +x Miniconda2-latest-Linux-x86_64.sh +./Miniconda2-latest-Linux-x86_64.sh -b -p /work/miniconda echo 'Dependency installation complete.' diff --git a/ci/docker/install/ubuntu_python.sh b/ci/docker/install/ubuntu_python.sh index b8626d3037d5..12149121ba0b 100755 --- a/ci/docker/install/ubuntu_python.sh +++ b/ci/docker/install/ubuntu_python.sh @@ -30,5 +30,5 @@ wget -nv https://bootstrap.pypa.io/get-pip.py python3 get-pip.py python2 get-pip.py -pip2 install nose cpplint==1.3.0 'numpy>1.16.0,<2.0.0' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 Cython==0.29.7 -pip3 install nose cpplint==1.3.0 pylint==2.3.1 'numpy>1.16.0,<2.0.0' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 Cython==0.29.7 decorator +pip3 install -r /work/requirements +pip2 install -r /work/requirements diff --git a/ci/docker/install/ubuntu_r.sh b/ci/docker/install/ubuntu_r.sh index cefc4172f245..548b6c00b0eb 100755 --- a/ci/docker/install/ubuntu_r.sh +++ b/ci/docker/install/ubuntu_r.sh @@ -41,4 +41,7 @@ apt-get install -y --allow-unauthenticated \ libxml2-dev \ libxt-dev \ r-base \ - r-base-dev + r-base-dev \ + texinfo \ + texlive \ + texlive-fonts-extra diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index de753a56be7f..b2a50f30af4e 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -1350,34 +1350,6 @@ test_ubuntu_cpu_python3() { popd } -build_docs() { - set -ex - pushd . - - # Setup environment for Julia docs - export PATH="/work/julia10/bin:$PATH" - export MXNET_HOME='/work/mxnet' - export JULIA_DEPOT_PATH='/work/julia-depot' - - julia -e 'using InteractiveUtils; versioninfo()' - export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so' - export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH - - cd /work/mxnet/docs/build_version_doc - # Parameters are set in the Jenkins pipeline: restricted-website-build - # $1: the list of branches/tags to build - # $2: the list of tags to display - # So you can build from the 1.2.0 branch, but display 1.2.1 on the site - # $3: the fork URL - ./build_all_version.sh $1 $2 $3 - # $4: the default version tag for the website - # $5: the base URL - ./update_all_version.sh $2 $4 $5 - cd VersionedWeb - tar -zcvf ../artifacts.tgz . - popd -} - # Functions that run the nightly Tests: #Runs Apache RAT Check on MXNet Source for License Headers @@ -1560,8 +1532,7 @@ nightly_estimator() { nosetests test_sentiment_rnn.py } -# Deploy - +# For testing PRs deploy_docs() { set -ex pushd . @@ -1580,9 +1551,331 @@ deploy_docs() { export CC="ccache gcc" export CXX="ccache g++" - make docs SPHINXOPTS=-W USE_MKLDNN=0 + build_python_docs + + popd +} + + +build_docs_setup() { + build_folder="docs/_build" + mxnetlib_folder="/work/mxnet/lib" + + mkdir -p $build_folder + mkdir -p $mxnetlib_folder +} + +build_ubuntu_cpu_docs() { + set -ex + export CC="gcc" + export CXX="g++" + build_ccache_wrappers + make \ + DEV=1 \ + USE_CPP_PACKAGE=1 \ + USE_BLAS=openblas \ + USE_MKLDNN=0 \ + USE_DIST_KVSTORE=1 \ + USE_LIBJPEG_TURBO=1 \ + USE_SIGNAL_HANDLER=1 \ + -j$(nproc) +} + + +build_jekyll_docs() { + set -ex + source /etc/profile.d/rvm.sh + + pushd . + build_docs_setup + pushd docs/static_site + make clean + make html popd + + GZIP=-9 tar zcvf jekyll-artifacts.tgz -C docs/static_site/build html + mv jekyll-artifacts.tgz docs/_build/ + popd +} + + +build_python_docs() { + set -ex + pushd . + + build_docs_setup + + pushd docs/python_docs + eval "$(/work/miniconda/bin/conda shell.bash hook)" + conda env create -f environment.yml -p /work/conda_env + conda activate /work/conda_env + pip install themes/mx-theme + pip install -e /work/mxnet/python --user + + pushd python + make clean + make html EVAL=0 + + GZIP=-9 tar zcvf python-artifacts.tgz -C build/_build/html . + popd + + mv python/python-artifacts.tgz /work/mxnet/docs/_build/ + popd + + popd +} + + +build_c_docs() { + set -ex + pushd . + + build_docs_setup + doc_path="docs/cpp_docs" + pushd $doc_path + + make clean + make html + + doc_artifact="c-artifacts.tgz" + GZIP=-9 tar zcvf $doc_artifact -C build/html/html . + popd + + mv $doc_path/$doc_artifact docs/_build/ + + popd +} + + +build_r_docs() { + set -ex + pushd . + + build_docs_setup + r_root='R-package' + r_pdf='mxnet-r-reference-manual.pdf' + r_build='build' + docs_build_path="$r_root/$r_build/$r_pdf" + artifacts_path='docs/_build/r-artifacts.tgz' + + mkdir -p $r_root/$r_build + + unittest_ubuntu_minimal_R + + pushd $r_root + + R_LIBS=/tmp/r-site-library R CMD Rd2pdf . --no-preview --encoding=utf8 -o $r_build/$r_pdf + + popd + + GZIP=-9 tar zcvf $artifacts_path $docs_build_path + + popd +} + + +build_scala() { + set -ex + pushd . + + cd scala-package + mvn -B install -DskipTests + + popd +} + + +build_scala_docs() { + set -ex + pushd . + build_docs_setup + build_scala + + scala_path='scala-package' + docs_build_path='scala-package/docs/build/docs/scala' + artifacts_path='docs/_build/scala-artifacts.tgz' + + pushd $scala_path + + scala_doc_sources=`find . -type f -name "*.scala" | egrep "./core|./infer" | egrep -v "/javaapi" | egrep -v "Suite" | egrep -v "/mxnetexamples"` + jar_native=`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" ` + jar_macros=`find macros -name "*.jar" | tr "\\n" ":" ` + jar_core=`find core -name "*.jar" | tr "\\n" ":" ` + jar_infer=`find infer -name "*.jar" | tr "\\n" ":" ` + scala_doc_classpath=$jar_native:$jar_macros:$jar_core:$jar_infer + + scala_ignore_errors='' + legacy_ver=".*1.2|1.3.*" + # BUILD_VER needs to be pull from environment vars + if [[ $_BUILD_VER =~ $legacy_ver ]] + then + # There are unresolvable errors on mxnet 1.2.x. We are ignoring those + # errors while aborting the ci on newer versions + echo "We will ignoring unresolvable errors on MXNet 1.2/1.3." + scala_ignore_errors='; exit 0' + fi + + scaladoc $scala_doc_sources -classpath $scala_doc_classpath $scala_ignore_errors -doc-title MXNet + popd + + # Clean-up old artifacts + rm -rf $docs_build_path + mkdir -p $docs_build_path + + for doc_file in index index.html org lib index.js package.html; do + mv $scala_path/$doc_file $docs_build_path + done + + GZIP=-9 tar -zcvf $artifacts_path -C $docs_build_path . + + popd +} + + +build_julia_docs() { + set -ex + pushd . + + build_docs_setup + # Setup environment for Julia docs + export PATH="/work/julia10/bin:$PATH" + export MXNET_HOME='/work/mxnet' + export JULIA_DEPOT_PATH='/work/julia-depot' + export LD_PRELOAD='/usr/lib/x86_64-linux-gnu/libjemalloc.so' + export LD_LIBRARY_PATH=/work/mxnet/lib:$LD_LIBRARY_PATH + + julia_doc_path='julia/docs/site/' + julia_doc_artifact='docs/_build/julia-artifacts.tgz' + + echo "Julia will check for MXNet in $MXNET_HOME/lib" + + make -C julia/docs + + GZIP=-9 tar -zcvf $julia_doc_artifact -C $julia_doc_path . + + popd +} + + +build_java_docs() { + set -ex + pushd . + + build_docs_setup + build_scala + + # Re-use scala-package build artifacts. + java_path='scala-package' + docs_build_path='docs/scala-package/build/docs/java' + artifacts_path='docs/_build/java-artifacts.tgz' + + pushd $java_path + + java_doc_sources=`find . -type f -name "*.scala" | egrep "./core|./infer" | egrep "/javaapi" | egrep -v "Suite" | egrep -v "/mxnetexamples"` + jar_native=`find native -name "*.jar" | grep "target/lib/" | tr "\\n" ":" ` + jar_macros=`find macros -name "*.jar" | tr "\\n" ":" ` + jar_core=`find core -name "*.jar" | tr "\\n" ":" ` + jar_infer=`find infer -name "*.jar" | tr "\\n" ":" ` + java_doc_classpath=$jar_native:$jar_macros:$jar_core:$jar_infer + + scaladoc $java_doc_sources -classpath $java_doc_classpath -feature -deprecation -doc-title MXNet + popd + + # Clean-up old artifacts + rm -rf $docs_build_path + mkdir -p $docs_build_path + + for doc_file in index index.html org lib index.js package.html; do + mv $java_path/$doc_file $docs_build_path + done + + GZIP=-9 tar -zcvf $artifacts_path -C $docs_build_path . + + popd +} + + +build_clojure_docs() { + set -ex + pushd . + + build_docs_setup + build_scala + + clojure_path='contrib/clojure-package' + clojure_doc_path='contrib/clojure-package/target/doc' + clojure_doc_artifact='docs/_build/clojure-artifacts.tgz' + + pushd $clojure_path + lein codox + popd + + GZIP=-9 tar -zcvf $clojure_doc_artifact -C $clojure_doc_path . + + popd +} + +build_docs() { + pushd docs/_build + tar -xzf jekyll-artifacts.tgz + api_folder='html/api' + # Python has it's own landing page/site so we don't put it in /docs/api + mkdir -p $api_folder/python/docs && tar -xzf python-artifacts.tgz --directory $api_folder/python/docs + mkdir -p $api_folder/cpp/docs/api && tar -xzf c-artifacts.tgz --directory $api_folder/cpp/docs/api + mkdir -p $api_folder/r/docs/api && tar -xzf r-artifacts.tgz --directory $api_folder/r/docs/api + mkdir -p $api_folder/julia/docs/api && tar -xzf julia-artifacts.tgz --directory $api_folder/julia/docs/api + mkdir -p $api_folder/scala/docs/api && tar -xzf scala-artifacts.tgz --directory $api_folder/scala/docs/api + mkdir -p $api_folder/java/docs/api && tar -xzf java-artifacts.tgz --directory $api_folder/java/docs/api + mkdir -p $api_folder/clojure/docs/api && tar -xzf clojure-artifacts.tgz --directory $api_folder/clojure/docs/api + GZIP=-9 tar -zcvf full_website.tgz -C html . + popd +} + +build_docs_small() { + pushd docs/_build + tar -xzf jekyll-artifacts.tgz + api_folder='html/api' + mkdir -p $api_folder/python/docs && tar -xzf python-artifacts.tgz --directory $api_folder/python/docs + # The folder to be published is now in /docs/_build/html + popd +} + +create_repo() { + repo_folder=$1 + mxnet_url=$2 + git clone $mxnet_url $repo_folder --recursive + echo "Adding MXNet upstream repo..." + cd $repo_folder + git remote add upstream https://github.com/apache/incubator-mxnet + cd .. +} + + +refresh_branches() { + repo_folder=$1 + cd $repo_folder + git fetch + git fetch upstream + cd .. +} + +checkout() { + repo_folder=$1 + cd $repo_folder + # Overriding configs later will cause a conflict here, so stashing... + git stash + # Fails to checkout if not available locally, so try upstream + git checkout "$repo_folder" || git branch $repo_folder "upstream/$repo_folder" && git checkout "$repo_folder" || exit 1 + if [ $tag == 'master' ]; then + git pull + # master gets warnings as errors for Sphinx builds + OPTS="-W" + else + OPTS= + fi + git submodule update --init --recursive + cd .. } build_static_libmxnet() { diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index eda1d349c9e1..30db32252e66 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -1369,16 +1369,20 @@ def test_qemu_armv7_cpu() { }] } +// This is for running on PRs def docs_website() { return ['Docs': { node(NODE_LINUX_CPU) { ws('workspace/docs') { timeout(time: max_time, unit: 'MINUTES') { - utils.init_git() - utils.docker_run('ubuntu_cpu', 'deploy_docs', false) + + unstash 'jekyll-artifacts' + unstash 'python-artifacts' + utils.docker_run('ubuntu_cpu_jekyll', 'build_docs_small', false) master_url = utils.get_jenkins_master_url() if ( master_url == 'jenkins.mxnet-ci.amazon-ml.com') { + // TODO: Make sure this scripts publish the website from the right folder sh "ci/other/ci_deploy_doc.sh ${env.BRANCH_NAME} ${env.BUILD_NUMBER}" } else { print "Skipping staging documentation publishing since we are not running in prod. Host: {$master_url}" @@ -1389,6 +1393,253 @@ def docs_website() { }] } + +// This creates the MXNet binary needed for generating different docs sets +def compile_unix_lite() { + return ['MXNet lib': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run('ubuntu_cpu_lite', 'build_ubuntu_cpu_docs', false) + utils.pack_lib('libmxnet', 'lib/libmxnet.so', false) + } + } + } + }] +} + + +def should_pack_website() { + if (env.BRANCH_NAME) { + if (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("new_")) { + return true + } + } else { + return true + } + return false +} + +// Each of the docs_{lang} functions will build the docs... +// Stashing is only needed for master for website publishing or for testing "new_" + +// Call this function from Jenkins to generate just the Python API microsite artifacts. +def docs_python() { + return ['Python Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_python', 'build_python_docs', false) + if (should_pack_website()) { + utils.pack_lib('python-artifacts', 'docs/_build/python-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the C and C++ API microsite artifacts. +def docs_c() { + return ['C Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', 'lib/libmxnet.so', false) + utils.docker_run('ubuntu_cpu_c', 'build_c_docs', false) + if (should_pack_website()) { + utils.pack_lib('c-artifacts', 'docs/_build/c-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the Julia API microsite artifacts. +def docs_julia() { + return ['Julia Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_julia', 'build_julia_docs', false) + if (should_pack_website()) { + utils.pack_lib('julia-artifacts', 'docs/_build/julia-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the R API PDF artifact. +def docs_r() { + return ['R Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_r', 'build_r_docs', false) + if (should_pack_website()) { + utils.pack_lib('r-artifacts', 'docs/_build/r-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the Scala API microsite artifacts. +// It will also generate the Scala package. +def docs_scala() { + return ['Scala Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_scala', 'build_scala_docs', false) + if (should_pack_website()) { + utils.pack_lib('scala-artifacts', 'docs/_build/scala-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the Java API microsite artifacts. +// It will also generate the Scala package. +def docs_java() { + return ['Java Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_scala', 'build_java_docs', false) + if (should_pack_website()) { + utils.pack_lib('java-artifacts', 'docs/_build/java-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the Clojure API microsite artifacts. +// It will also generate the Scala package. +def docs_clojure() { + return ['Clojure Docs': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.unpack_and_init('libmxnet', mx_lib, false) + utils.docker_run('ubuntu_cpu_scala', 'build_clojure_docs', false) + if (should_pack_website()) { + utils.pack_lib('clojure-artifacts', 'docs/_build/clojure-artifacts.tgz', false) + } + } + } + } + }] +} + + +// Call this function from Jenkins to generate just the main website artifacts. +def docs_jekyll() { + return ['Main Jekyll Website': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + utils.docker_run('ubuntu_cpu_jekyll', 'build_jekyll_docs', false) + if (should_pack_website()) { + utils.pack_lib('jekyll-artifacts', 'docs/_build/jekyll-artifacts.tgz', false) + } + } + } + } + }] +} + + +// This is for publishing the full website +// Assumes you have run all of the docs generation functions +// Called from Jenkins_website_full and Jenkins_website_full_pr +def docs_prepare() { + return ['Prepare for publication of the full website': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + utils.init_git() + + unstash 'jekyll-artifacts' + unstash 'c-artifacts' + unstash 'python-artifacts' + unstash 'r-artifacts' + unstash 'julia-artifacts' + unstash 'scala-artifacts' + unstash 'java-artifacts' + unstash 'clojure-artifacts' + + utils.docker_run('ubuntu_cpu_jekyll', 'build_docs', false) + + // only stash if we're going to unstash later + // utils.pack_lib('full_website', 'docs/_build/full_website.tgz', false) + + // archive so the publish pipeline can access the artifact + archiveArtifacts 'docs/_build/full_website.tgz' + } + } + } + }] +} + + +def docs_archive() { + return ['Archive the full website': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + archiveArtifacts 'docs/_build/full_website.tgz' + } + } + } + }] +} + + +// This is for the full website +def docs_publish() { + return ['Publish the full website': { + node(NODE_LINUX_CPU) { + ws('workspace/docs') { + timeout(time: max_time, unit: 'MINUTES') { + // If used stashed files, you can retrieve them here + //unstash 'full_website' + //sh 'tar -xzf docs/_build/full_website.tgz --directory .' + try { + build 'restricted-website-publish-master' + } + catch (Exception e) { + println(e.getMessage()) + } + } + } + } + }] +} + + + def misc_asan_cpu() { return ['CPU ASAN': { node(NODE_LINUX_CPU) { diff --git a/docs/Jenkinsfile-dev b/ci/jenkins/Jenkinsfile_website_c_docs similarity index 66% rename from docs/Jenkinsfile-dev rename to ci/jenkins/Jenkinsfile_website_c_docs index 760a2f953061..e678920e3a4f 100644 --- a/docs/Jenkinsfile-dev +++ b/ci/jenkins/Jenkinsfile_website_c_docs @@ -16,39 +16,37 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. - +// // Jenkins pipeline // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ // timeout in minutes -max_time = 120 +max_time = 20 node('utility') { // Loading the utilities requires a node context unfortunately checkout scm utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') } utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') utils.main_wrapper( core_logic: { - stage('Build Docs') { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - utils.init_git() - timeout(time: max_time, unit: 'MINUTES') { - sh "ci/build.py -p ubuntu_cpu --docker-registry ${env.DOCKER_CACHE_REGISTRY} --docker-build-retries 3 /work/runtime_functions.sh build_docs ${params.tags_to_build} ${params.tag_list} ${params.fork} ${params.tag_default} ${params.domain}" - archiveArtifacts 'docs/build_version_doc/artifacts.tgz' - build 'test-website-publish' - } - } - } - } + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('C Docs', [ + custom_steps.docs_c() + ]) + } , failure_handler: { - if (currentBuild.result == "FAILURE") { - // Do nothing. + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' } } ) diff --git a/ci/jenkins/Jenkinsfile_website_clojure_docs b/ci/jenkins/Jenkinsfile_website_clojure_docs new file mode 100644 index 000000000000..697dca48e58b --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_clojure_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 20 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Clojure Docs', [ + custom_steps.docs_clojure() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_full b/ci/jenkins/Jenkinsfile_website_full new file mode 100644 index 000000000000..39cc6f4e2dc9 --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_full @@ -0,0 +1,67 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 180 + +node('restricted-utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} + +utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-mxnetlinux-cpu', linux_gpu: 'restricted-mxnetlinux-gpu', linux_gpu_p3: 'restricted-mxnetlinux-gpu-p3', windows_cpu: 'restricted-mxnetwindows-cpu', windows_gpu: 'restricted-mxnetwindows-gpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Build Docs', [ + custom_steps.docs_jekyll(), + custom_steps.docs_c(), + custom_steps.docs_python(), + custom_steps.docs_julia(), + custom_steps.docs_r(), + custom_steps.docs_scala(), + custom_steps.docs_java(), + custom_steps.docs_clojure() + ]) + + utils.parallel_stage('Prepare', [ + custom_steps.docs_prepare() + ]) + + utils.parallel_stage('Publish', [ + custom_steps.docs_publish() + ]) +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_full_pr b/ci/jenkins/Jenkinsfile_website_full_pr new file mode 100644 index 000000000000..133c6c204964 --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_full_pr @@ -0,0 +1,62 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 180 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Build Docs', [ + // Optimization would be to flag these not to stash if not previewing them + custom_steps.docs_jekyll(), + custom_steps.docs_c(), + custom_steps.docs_python(), + custom_steps.docs_julia(), + custom_steps.docs_r(), + custom_steps.docs_scala(), + custom_steps.docs_java(), + custom_steps.docs_clojure() + ]) + + // TODO: add a website preview function + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_java_docs b/ci/jenkins/Jenkinsfile_website_java_docs new file mode 100644 index 000000000000..4453b444039b --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_java_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 20 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Java Docs', [ + custom_steps.docs_java() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_jekyll_docs b/ci/jenkins/Jenkinsfile_website_jekyll_docs new file mode 100644 index 000000000000..c4c5ff89dd86 --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_jekyll_docs @@ -0,0 +1,49 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 20 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Jekyll Website Docs', [ + custom_steps.docs_jekyll() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_julia_docs b/ci/jenkins/Jenkinsfile_website_julia_docs new file mode 100644 index 000000000000..e1e9aaa983fb --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_julia_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 60 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Julia Docs', [ + custom_steps.docs_julia() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/docs/Jenkinsfile b/ci/jenkins/Jenkinsfile_website_mxnet_build similarity index 56% rename from docs/Jenkinsfile rename to ci/jenkins/Jenkinsfile_website_mxnet_build index 676204291893..a3c3330d0349 100644 --- a/docs/Jenkinsfile +++ b/ci/jenkins/Jenkinsfile_website_mxnet_build @@ -16,39 +16,33 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. - +// // Jenkins pipeline // See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ // timeout in minutes max_time = 180 -node('restricted-utility') { +node('utility') { // Loading the utilities requires a node context unfortunately checkout scm utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') } -utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-mxnetlinux-cpu') +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') utils.main_wrapper( core_logic: { - stage('Build Docs') { - node(NODE_LINUX_CPU) { - ws('workspace/docs') { - utils.init_git() - timeout(time: max_time, unit: 'MINUTES') { - sh "ci/build.py -p ubuntu_cpu --docker-registry ${env.DOCKER_CACHE_REGISTRY} --docker-build-retries 3 /work/runtime_functions.sh build_docs ${params.tags_to_build} ${params.tag_list} ${params.fork} ${params.tag_default} ${params.domain}" - archiveArtifacts 'docs/build_version_doc/artifacts.tgz' - build 'restricted-website-publish' - } - } - } - } + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + } , failure_handler: { - if (currentBuild.result == "FAILURE") { - emailext body: 'Generating the website has failed. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[WEBSITE FAILED] Build ${BUILD_NUMBER}', to: '${EMAIL}' + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' } } ) diff --git a/ci/jenkins/Jenkinsfile_website_python_docs b/ci/jenkins/Jenkinsfile_website_python_docs new file mode 100644 index 000000000000..baaf4519541f --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_python_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 60 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Python Docs', [ + custom_steps.docs_python() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_r_docs b/ci/jenkins/Jenkinsfile_website_r_docs new file mode 100644 index 000000000000..5b9f6630563f --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_r_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 60 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('R Docs', [ + custom_steps.docs_r() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/jenkins/Jenkinsfile_website_scala_docs b/ci/jenkins/Jenkinsfile_website_scala_docs new file mode 100644 index 000000000000..6a083dae7957 --- /dev/null +++ b/ci/jenkins/Jenkinsfile_website_scala_docs @@ -0,0 +1,53 @@ +// -*- mode: groovy -*- + +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Jenkins pipeline +// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/ + +// timeout in minutes +max_time = 20 + +node('utility') { + // Loading the utilities requires a node context unfortunately + checkout scm + utils = load('ci/Jenkinsfile_utils.groovy') + custom_steps = load('ci/jenkins/Jenkins_steps.groovy') +} +utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu') + +utils.main_wrapper( +core_logic: { + utils.parallel_stage('Build', [ + custom_steps.compile_unix_lite() + ]) + + utils.parallel_stage('Scala Docs', [ + custom_steps.docs_scala() + + ]) + +} +, +failure_handler: { + // Only send email if master or release branches failed + if (currentBuild.result == "FAILURE" && (env.BRANCH_NAME == "master" || env.BRANCH_NAME.startsWith("v"))) { + emailext body: 'Build for MXNet branch ${BRANCH_NAME} has broken. Please view the build at ${BUILD_URL}', replyTo: '${EMAIL}', subject: '[BUILD FAILED] Branch ${BRANCH_NAME} build ${BUILD_NUMBER}', to: '${EMAIL}' + } +} +) diff --git a/ci/other/ci_deploy_doc.sh b/ci/other/ci_deploy_doc.sh index a300794b55da..3d5c85fb794e 100755 --- a/ci/other/ci_deploy_doc.sh +++ b/ci/other/ci_deploy_doc.sh @@ -28,5 +28,5 @@ # set -ex -aws s3 sync --delete docs/_build/html/ s3://mxnet-ci-doc/$1/$2 \ +aws s3 sync --delete . s3://mxnet-ci-doc/$1/$2 \ && echo "Doc is hosted at http://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/$1/$2/index.html" diff --git a/ci/publish/website/README.md b/ci/publish/website/README.md new file mode 100644 index 000000000000..967dbe67ea16 --- /dev/null +++ b/ci/publish/website/README.md @@ -0,0 +1,20 @@ + + + + + + + + + + + + + + + + + +# Website Deployment + +Refer to the [MXNet Developer Wiki](https://cwiki.apache.org/confluence/display/MXNET/Building+the+New+Website). diff --git a/ci/publish/website/beta-deploy.sh b/ci/publish/website/beta-deploy.sh new file mode 100755 index 000000000000..88b19927480f --- /dev/null +++ b/ci/publish/website/beta-deploy.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# A yaml file is written to trigger a staging deployment. +# This file must be placed in the root of the site repo. +# profile: sets custom the url using the pattern 'mxnet-PROFILE' +# Example using 'beta': https://mxnet-beta.staged.apache.org/ +# Documentation: https://www.staged.apache.org/ + +set -ex + +if [ ! -f ./.asf.yaml ]; then + echo -e "\nGenerating .asf.yaml file" + cat > ./.asf.yaml < date.txt +git add date.txt +git commit -m "Bump the publish timestamp." +git push origin asf-site diff --git a/ci/publish/website/publish_artifacts.sh b/ci/publish/website/publish_artifacts.sh new file mode 100755 index 000000000000..015a1bcfb843 --- /dev/null +++ b/ci/publish/website/publish_artifacts.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# build and install are separated so changes to build don't invalidate +# the whole docker cache for the image + +# This script requires that APACHE_PASSWORD and APACHE_USERNAME are set +# environment variables. Also, artifacts must be previously uploaded to S3 +# in the MXNet public bucket (mxnet-public.s3.us-east-2.amazonaws.com). + +set -ex + +api_list=("cpp" "clojure" "java" "julia" "python" "r" "scala") +version=v1.5.0 +for i in "${api_list[@]}" +do + tar cvf $i-artifacts.tgz $i && aws s3 cp $i-artifacts.tgz s3://mxnet-public/docs/$version/$i-artifacts.tgz +done diff --git a/docs/README.md b/docs/README.md index b98d15eecb49..7993d07f026f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -30,6 +30,8 @@ Each language documentation is built in a modular way, so that if you are a cont You can also use the project's CI tools to emulate any changes with Docker. You can use these tools to install dependencies and run the parts of the build you want to test. +Refer to the [MXNet Developer Wiki](https://cwiki.apache.org/confluence/display/MXNET/Building+the+New+Website) for instructions on building the docs locally. + If you plan to contribute changes to the documentation or website, please submit a pull request. Contributions are welcome! ## Python Docs diff --git a/docs/python_docs/_static/mxnet.css b/docs/python_docs/_static/mxnet.css index 7f6380bf6733..3909aedc06fa 100644 --- a/docs/python_docs/_static/mxnet.css +++ b/docs/python_docs/_static/mxnet.css @@ -15,7 +15,7 @@ } } -.mdl-layout__container { +.mdl-layout { visibility: hidden; } diff --git a/docs/python_docs/python/tutorials/deploy/run-on-aws/cloud.rst b/docs/python_docs/python/tutorials/deploy/run-on-aws/cloud.rst index 9562cea2b95a..20f69a80e118 100644 --- a/docs/python_docs/python/tutorials/deploy/run-on-aws/cloud.rst +++ b/docs/python_docs/python/tutorials/deploy/run-on-aws/cloud.rst @@ -101,6 +101,5 @@ credentials: http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSGettingStartedGuide/AWSCredentials.html .. \_s3cmd: http://s3tools.org/s3cmd .. *Deep Learning AMI: https://aws.amazon.com/marketplace/pp/B01M0AXXQB?qid=1475211685369&sr=0-1&ref*\ =srh\_res\_product\_title -.. \_MXNet-v0.9.3 tag: https://github.com/dmlc/mxnet .. \_Jupyter: -http://jupyter.org .. \_good tutorial: -https://github.com/dmlc/mxnet-notebooks +.. \_MXNet-v0.9.3 tag: https://github.com/apache/incubator-mxnet .. \_Jupyter: +http://jupyter.org diff --git a/docs/python_docs/python/tutorials/deploy/run-on-aws/index.rst b/docs/python_docs/python/tutorials/deploy/run-on-aws/index.rst index d59c1a1fca16..c22b7b68d189 100644 --- a/docs/python_docs/python/tutorials/deploy/run-on-aws/index.rst +++ b/docs/python_docs/python/tutorials/deploy/run-on-aws/index.rst @@ -34,7 +34,13 @@ The following tutorials will help you learn how to deploy MXNet on various AWS p How to run MXNet using Amazon SageMaker. - .. card:: + .. card:: + :title: MXNet on the Cloud + :link: cloud.html + + How to run MXNet in the cloud. + + .. card:: :title: MXNet on the Cloud :link: cloud.html diff --git a/docs/python_docs/python/tutorials/extend/custom_layer.md b/docs/python_docs/python/tutorials/extend/custom_layer.md index 402752d067dc..875961acc226 100644 --- a/docs/python_docs/python/tutorials/extend/custom_layer.md +++ b/docs/python_docs/python/tutorials/extend/custom_layer.md @@ -26,6 +26,7 @@ In this article, I will cover how to create a new layer from scratch, how to use To create a new layer in Gluon API, one must create a class that inherits from [Block](https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/block.py#L123) class. This class provides the most basic functionality, and all pre-defined layers inherit from it directly or via other subclasses. Because each layer in Apache MxNet inherits from `Block`, words "layer" and "block" are used interchangeable inside of the Apache MxNet community. +- MXNet [7b24137](https://github.com/apache/incubator-mxnet/commit/7b24137ed45df605defa4ce72ec91554f6e445f0). See Instructions in [Setup and Installation]({{'/get_started'|relative_url}}). The only instance method needed to be implemented is [forward(self, x)](https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/block.py#L415), which defines what exactly your layer is going to do during forward propagation. Notice, that it doesn't require to provide what the block should do during back propogation. Back propogation pass for blocks is done by Apache MxNet for you. In the example below, we define a new layer and implement `forward()` method to normalize input data by fitting it into a range of [0, 1]. @@ -86,12 +87,11 @@ layer = NormalizationHybridLayer() layer(nd.array([1, 2, 3], ctx=mx.cpu())) ``` +Output: - - -```text - [0. 0.5 1. ] - +```bash +[0. 0.5 1. ] + ``` @@ -125,15 +125,15 @@ net(input) ``` +Output: - -```text - [[-0.13601446] - [ 0.26103732] - [-0.05046433] - [-1.2375476 ] - [-0.15506986]] - +```bash +[[-0.13601446] + [ 0.26103732] + [-0.05046433] + [-1.2375476 ] + [-0.15506986]] + ``` @@ -223,37 +223,38 @@ loss.backward() # Backward compute trainer.step(input.shape[0]) # Trainer updates parameters of every block, using .grad field using oprimization method (sgd in this example) # We provide batch size that is used as a divider in cost function formula print_params("=========== Parameters after backward pass ===========\n", net) - ``` -```text - =========== Parameters after forward pass =========== - - hybridsequential94_normalizationhybridlayer0_weights = - [[-0.3983642 -0.505708 -0.02425683 -0.3133553 -0.35161012] - [ 0.6467543 0.3918715 -0.6154656 -0.20702496 -0.4243446 ] - [ 0.6077331 0.03922009 0.13425875 0.5729856 -0.14446527] - [-0.3572498 0.18545026 -0.09098256 0.5106366 -0.35151464] - [-0.39846328 0.22245121 0.13075739 0.33387476 -0.10088372]] - - - hybridsequential94_normalizationhybridlayer0_scales = - [2.] - - - =========== Parameters after backward pass =========== - - hybridsequential94_normalizationhybridlayer0_weights = - [[-0.29839832 -0.47213346 0.08348035 -0.2324698 -0.27368504] - [ 0.76268613 0.43080837 -0.49052125 -0.11322092 -0.3339738 ] - [ 0.48665082 -0.00144657 0.00376363 0.47501418 -0.23885089] - [-0.22626656 0.22944227 0.05018325 0.6166192 -0.24941102] - [-0.44946212 0.20532274 0.07579394 0.29261002 -0.14063817]] - - - hybridsequential94_normalizationhybridlayer0_scales = - [2.] - +Output: + +```bash +=========== Parameters after forward pass =========== + +hybridsequential94_normalizationhybridlayer0_weights = +[[-0.3983642 -0.505708 -0.02425683 -0.3133553 -0.35161012] + [ 0.6467543 0.3918715 -0.6154656 -0.20702496 -0.4243446 ] + [ 0.6077331 0.03922009 0.13425875 0.5729856 -0.14446527] + [-0.3572498 0.18545026 -0.09098256 0.5106366 -0.35151464] + [-0.39846328 0.22245121 0.13075739 0.33387476 -0.10088372]] + + +hybridsequential94_normalizationhybridlayer0_scales = +[2.] + + +=========== Parameters after backward pass =========== + +hybridsequential94_normalizationhybridlayer0_weights = +[[-0.29839832 -0.47213346 0.08348035 -0.2324698 -0.27368504] + [ 0.76268613 0.43080837 -0.49052125 -0.11322092 -0.3339738 ] + [ 0.48665082 -0.00144657 0.00376363 0.47501418 -0.23885089] + [-0.22626656 0.22944227 0.05018325 0.6166192 -0.24941102] + [-0.44946212 0.20532274 0.07579394 0.29261002 -0.14063817]] + + +hybridsequential94_normalizationhybridlayer0_scales = +[2.] + ``` @@ -262,5 +263,3 @@ As it is seen from the output above, `weights` parameter has been changed by the ## Conclusion One important quality of a Deep learning framework is extensibility. Empowered by flexible abstractions, like `Block` and `HybridBlock`, one can easily extend Apache MxNet functionality to match its needs. - - \ No newline at end of file diff --git a/docs/python_docs/python/tutorials/packages/gluon/custom_layer_beginners.md b/docs/python_docs/python/tutorials/packages/gluon/custom_layer_beginners.md index 958736b99090..cbc35c07ad7b 100644 --- a/docs/python_docs/python/tutorials/packages/gluon/custom_layer_beginners.md +++ b/docs/python_docs/python/tutorials/packages/gluon/custom_layer_beginners.md @@ -141,7 +141,7 @@ class NormalizationHybridLayer(gluon.HybridBlock): self.scales = self.params.get('scales', shape=scales.shape, - init=mx.init.Constant(scales.asnumpy().tolist()), # Convert to regular list to make this object serializable + init=mx.init.Constant(scales.asnumpy()), differentiable=False) def hybrid_forward(self, F, x, weights, scales): diff --git a/docs/python_docs/python/tutorials/packages/gluon/gluon_from_experiment_to_deployment.md b/docs/python_docs/python/tutorials/packages/gluon/gluon_from_experiment_to_deployment.md new file mode 100644 index 000000000000..f0111240188d --- /dev/null +++ b/docs/python_docs/python/tutorials/packages/gluon/gluon_from_experiment_to_deployment.md @@ -0,0 +1,327 @@ + + + + + + + + + + + + + + + + + + +# Gluon: from experiment to deployment, an end to end tutorial + +## Overview +MXNet Gluon API comes with a lot of great features, and it can provide you everything you need: from experimentation to deploying the model. In this tutorial, we will walk you through a common use case on how to build a model using gluon, train it on your data, and deploy it for inference. +This tutorial covers training and inference in Python, please continue to [C++ inference part](https://mxnet.incubator.apache.org/versions/master/tutorials/c++/mxnet_cpp_inference_tutorial.html) after you finish. + +Let's say you need to build a service that provides flower species recognition. A common problem is that you don't have enough data to train a good model. In such cases, a technique called Transfer Learning can be used to make a more robust model. +In Transfer Learning we make use of a pre-trained model that solves a related task, and was trained on a very large standard dataset, such as ImageNet. ImageNet is from a different domain, but we can utilize the knowledge in this pre-trained model to perform the new task at hand. + +Gluon provides State of the Art models for many of the standard tasks such as Classification, Object Detection, Segmentation, etc. In this tutorial we will use the pre-trained model [ResNet50 V2](https://arxiv.org/abs/1603.05027) trained on ImageNet dataset. This model achieves 77.11% top-1 accuracy on ImageNet. We seek to transfer as much knowledge as possible for our task of recognizing different species of flowers. + + + + +## Prerequisites + +To complete this tutorial, you need: + +- [Build MXNet from source](https://mxnet.incubator.apache.org/install/ubuntu_setup.html#build-mxnet-from-source) with Python(Gluon) and C++ Packages +- Learn the basics about Gluon with [A 60-minute Gluon Crash Course](https://gluon-crash-course.mxnet.io/) + + +## The Data + +We will use the [Oxford 102 Category Flower Dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/) as an example to show you the steps. +We have prepared a utility file to help you download and organize your data into train, test, and validation sets. Run the following Python code to download and prepare the data: + + +```python +import mxnet as mx +data_util_file = "oxford_102_flower_dataset.py" +base_url = "https://raw.githubusercontent.com/apache/incubator-mxnet/master/docs/tutorial_utils/data/{}?raw=true" +mx.test_utils.download(base_url.format(data_util_file), fname=data_util_file) +import oxford_102_flower_dataset + +# download and move data to train, test, valid folders +path = './data' +oxford_102_flower_dataset.get_data(path) +``` + +Now your data will be organized into train, test, and validation sets, images belong to the same class are moved to the same folder. + +## Training using Gluon + +### Define Hyper-parameters + +Now let's first import necessary packages: + + +```python +import math +import os +import time + +from mxnet import autograd +from mxnet import gluon, init +from mxnet.gluon import nn +from mxnet.gluon.data.vision import transforms +from mxnet.gluon.model_zoo.vision import resnet50_v2 +``` + +Next, we define the hyper-parameters that we will use for fine-tuning. We will use the [MXNet learning rate scheduler](https://mxnet.incubator.apache.org/tutorials/gluon/learning_rate_schedules.html) to adjust learning rates during training. +Here we set the `epochs` to 1 for quick demonstration, please change to 40 for actual training. + +```python +classes = 102 +epochs = 1 +lr = 0.001 +per_device_batch_size = 32 +momentum = 0.9 +wd = 0.0001 + +lr_factor = 0.75 +# learning rate change at following epochs +lr_epochs = [10, 20, 30] + +num_gpus = mx.context.num_gpus() +# you can replace num_workers with the number of cores on you device +num_workers = 8 +ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()] +batch_size = per_device_batch_size * max(num_gpus, 1) +``` + +Now we will apply data augmentations on training images. This makes minor alterations on the training images, and our model will consider them as distinct images. This can be very useful for fine-tuning on a relatively small dataset, and it will help improve the model. We can use the Gluon [DataSet API](https://mxnet.incubator.apache.org/tutorials/gluon/datasets.html), [DataLoader API](https://mxnet.incubator.apache.org/tutorials/gluon/datasets.html), and [Transform API](https://mxnet.incubator.apache.org/tutorials/gluon/data_augmentation.html) to load the images and apply the following data augmentations: +1. Randomly crop the image and resize it to 224x224 +2. Randomly flip the image horizontally +3. Randomly jitter color and add noise +4. Transpose the data from `[height, width, num_channels]` to `[num_channels, height, width]`, and map values from [0, 255] to [0, 1] +5. Normalize with the mean and standard deviation from the ImageNet dataset. + +For validation and inference, we only need to apply step 1, 4, and 5. We also need to save the mean and standard deviation values for [inference using C++](https://mxnet.incubator.apache.org/versions/master/tutorials/c++/mxnet_cpp_inference_tutorial.html). + +```python +jitter_param = 0.4 +lighting_param = 0.1 + +# mean and std for normalizing image value in range (0,1) +mean = [0.485, 0.456, 0.406] +std = [0.229, 0.224, 0.225] + +training_transformer = transforms.Compose([ + transforms.RandomResizedCrop(224), + transforms.RandomFlipLeftRight(), + transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param, + saturation=jitter_param), + transforms.RandomLighting(lighting_param), + transforms.ToTensor(), + transforms.Normalize(mean, std) +]) + +validation_transformer = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + transforms.Normalize(mean, std) +]) + +# save mean and std NDArray values for inference +mean_img = mx.nd.stack(*[mx.nd.full((224, 224), m) for m in mean]) +std_img = mx.nd.stack(*[mx.nd.full((224, 224), s) for s in std]) +mx.nd.save('mean_std_224.nd', {"mean_img": mean_img, "std_img": std_img}) + +train_path = os.path.join(path, 'train') +val_path = os.path.join(path, 'valid') +test_path = os.path.join(path, 'test') + +# loading the data and apply pre-processing(transforms) on images +train_data = gluon.data.DataLoader( + gluon.data.vision.ImageFolderDataset(train_path).transform_first(training_transformer), + batch_size=batch_size, shuffle=True, num_workers=num_workers) + +val_data = gluon.data.DataLoader( + gluon.data.vision.ImageFolderDataset(val_path).transform_first(validation_transformer), + batch_size=batch_size, shuffle=False, num_workers=num_workers) + +test_data = gluon.data.DataLoader( + gluon.data.vision.ImageFolderDataset(test_path).transform_first(validation_transformer), + batch_size=batch_size, shuffle=False, num_workers=num_workers) +``` + +### Loading pre-trained model + + +We will use pre-trained ResNet50_v2 model which was pre-trained on the [ImageNet Dataset](http://www.image-net.org/) with 1000 classes. To match the classes in the Flower dataset, we must redefine the last softmax (output) layer to be 102, then initialize the parameters. + +Before we go to training, one unique Gluon feature you should be aware of is hybridization. It allows you to convert your imperative code to a static symbolic graph, which is much more efficient to execute. There are two main benefits of hybridizing your model: better performance and easier serialization for deployment. The best part is that it's as simple as just calling `net.hybridize()`. To know more about Gluon hybridization, please follow the [hybridization tutorial](https://mxnet.incubator.apache.org/tutorials/gluon/hybrid.html). + + + +```python +# load pre-trained resnet50_v2 from model zoo +finetune_net = resnet50_v2(pretrained=True, ctx=ctx) + +# change last softmax layer since number of classes are different +with finetune_net.name_scope(): + finetune_net.output = nn.Dense(classes) +finetune_net.output.initialize(init.Xavier(), ctx=ctx) +# hybridize for better performance +finetune_net.hybridize() + +num_batch = len(train_data) + +# setup learning rate scheduler +iterations_per_epoch = math.ceil(num_batch) +# learning rate change at following steps +lr_steps = [epoch * iterations_per_epoch for epoch in lr_epochs] +schedule = mx.lr_scheduler.MultiFactorScheduler(step=lr_steps, factor=lr_factor, base_lr=lr) + +# setup optimizer with learning rate scheduler, metric, and loss function +sgd_optimizer = mx.optimizer.SGD(learning_rate=lr, lr_scheduler=schedule, momentum=momentum, wd=wd) +metric = mx.metric.Accuracy() +softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss() +``` + +### Fine-tuning model on your custom dataset + +Now let's define the test metrics and start fine-tuning. + + + +```python +def test(net, val_data, ctx): + metric = mx.metric.Accuracy() + for i, (data, label) in enumerate(val_data): + data = gluon.utils.split_and_load(data, ctx_list=ctx, even_split=False) + label = gluon.utils.split_and_load(label, ctx_list=ctx, even_split=False) + outputs = [net(x) for x in data] + metric.update(label, outputs) + return metric.get() + +trainer = gluon.Trainer(finetune_net.collect_params(), optimizer=sgd_optimizer) + +# start with epoch 1 for easier learning rate calculation +for epoch in range(1, epochs + 1): + + tic = time.time() + train_loss = 0 + metric.reset() + + for i, (data, label) in enumerate(train_data): + # get the images and labels + data = gluon.utils.split_and_load(data, ctx_list=ctx, even_split=False) + label = gluon.utils.split_and_load(label, ctx_list=ctx, even_split=False) + with autograd.record(): + outputs = [finetune_net(x) for x in data] + loss = [softmax_cross_entropy(yhat, y) for yhat, y in zip(outputs, label)] + for l in loss: + l.backward() + + trainer.step(batch_size) + train_loss += sum([l.mean().asscalar() for l in loss]) / len(loss) + metric.update(label, outputs) + + _, train_acc = metric.get() + train_loss /= num_batch + _, val_acc = test(finetune_net, val_data, ctx) + + print('[Epoch %d] Train-acc: %.3f, loss: %.3f | Val-acc: %.3f | learning-rate: %.3E | time: %.1f' % + (epoch, train_acc, train_loss, val_acc, trainer.learning_rate, time.time() - tic)) + +_, test_acc = test(finetune_net, test_data, ctx) +print('[Finished] Test-acc: %.3f' % (test_acc)) +``` + +Following is the training result: +```text +[Epoch 40] Train-acc: 0.945, loss: 0.354 | Val-acc: 0.955 | learning-rate: 4.219E-04 | time: 17.8 +[Finished] Test-acc: 0.952 +``` +In the previous example output, we trained the model using an [AWS p3.8xlarge instance](https://aws.amazon.com/ec2/instance-types/p3/) with 4 Tesla V100 GPUs. We were able to reach a test accuracy of 95.5% with 40 epochs in around 12 minutes. This was really fast because our model was pre-trained on a much larger dataset, ImageNet, with around 1.3 million images. It worked really well to capture features on our small dataset. + + +### Save the fine-tuned model + + +We now have a trained our custom model. This can be serialized into model files using the export function. The export function will export the model architecture into a `.json` file and model parameters into a `.params` file. + + + +```python +finetune_net.export("flower-recognition", epoch=epochs) + +``` + +`export` creates `flower-recognition-symbol.json` and `flower-recognition-0040.params` (`0040` is for 40 epochs we ran) in the current directory. These files can be used for model deployment in the next section. + +## Load the model and run inference using the MXNet Module API + +MXNet provides various useful tools and interfaces for deploying your model for inference. For example, you can use [MXNet Model Server](https://github.com/awslabs/mxnet-model-server) to start a service and host your trained model easily. +Besides that, you can also use MXNet's different language APIs to integrate your model with your existing service. We provide [Python](https://mxnet.incubator.apache.org/api/python/module/module.html), [Java](https://mxnet.incubator.apache.org/api/java/index.html), [Scala](https://mxnet.incubator.apache.org/api/scala/index.html), and [C++](https://mxnet.incubator.apache.org/api/c++/index.html) APIs. + +Here we will briefly introduce how to run inference using Module API in Python. There is more detailed explanation available in the [Predict Image Tutorial](https://mxnet.incubator.apache.org/tutorials/python/predict_image.html). +In general, prediction consists of the following steps: +1. Load the model architecture (symbol file) and trained parameter values (params file) +2. Load the synset file for label names +3. Load the image and apply the same transformation we did on validation dataset during training +4. Run a forward pass on the image data +5. Convert output probabilities to predicted label name + +```python +import numpy as np +from collections import namedtuple + +ctx = mx.cpu() +# load model symbol and params +sym, arg_params, aux_params = mx.model.load_checkpoint('flower-recognition', epochs) +mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None) +mod.bind(for_training=False, data_shapes=[('data', (1, 3, 224, 224))], label_shapes=mod._label_shapes) +mod.set_params(arg_params, aux_params, allow_missing=True) + +# load synset for label names +with open('synset.txt', 'r') as f: + labels = [l.rstrip() for l in f] + +# load an image for prediction +img = mx.image.imread('./data/test/lotus/image_01832.jpg') +# apply transform we did during training +img = validation_transformer(img) +# batchify +img = img.expand_dims(axis=0) +Batch = namedtuple('Batch', ['data']) +mod.forward(Batch([img])) +prob = mod.get_outputs()[0].asnumpy() +prob = np.squeeze(prob) +idx = np.argmax(prob) +print('probability=%f, class=%s' % (prob[idx], labels[idx])) +``` + +Following is the output, you can see the image has been classified as lotus correctly. +```text +probability=9.798435, class=lotus +``` + +## What's next + +You can continue to the [next tutorial](https://mxnet.incubator.apache.org/versions/master/tutorials/c++/mxnet_cpp_inference_tutorial.html) on how to load the model we just trained and run inference using MXNet C++ API. + +You can also find more ways to run inference and deploy your models here: +1. [Java Inference examples](https://github.com/apache/incubator-mxnet/tree/master/scala-package/examples/src/main/java/org/apache/mxnetexamples/javaapi/infer) +2. [Scala Inference examples](https://mxnet.incubator.apache.org/tutorials/scala/) +4. [MXNet Model Server Examples](https://github.com/awslabs/mxnet-model-server/tree/master/examples) + +## References + +1. [Transfer Learning for Oxford102 Flower Dataset](https://github.com/Arsey/keras-transfer-learning-for-oxford102) +2. [Gluon book on fine-tuning](https://www.d2l.ai/chapter_computer-vision/fine-tuning.html) +3. [Gluon CV transfer learning tutorial](https://gluon-cv.mxnet.io/build/examples_classification/transfer_learning_minc.html) +4. [Gluon crash course](https://gluon-crash-course.mxnet.io/) +5. [Gluon CPP inference example](https://github.com/apache/incubator-mxnet/blob/master/cpp-package/example/inference/) \ No newline at end of file diff --git a/docs/python_docs/python/tutorials/packages/gluon/index.rst b/docs/python_docs/python/tutorials/packages/gluon/index.rst index 85bb04faad42..01f2d7c398d5 100644 --- a/docs/python_docs/python/tutorials/packages/gluon/index.rst +++ b/docs/python_docs/python/tutorials/packages/gluon/index.rst @@ -37,7 +37,7 @@ Getting started .. card:: :title: Gluon: from experiment to deployment - :link: https://mxnet.incubator.apache.org/versions/master/tutorials/gluon/gluon_from_experiment_to_deployment.html + :link: gluon_from_experiment_to_deployment.html An end to end tutorial on working with the MXNet Gluon API. diff --git a/docs/python_docs/python/tutorials/performance/backend/profiler.md b/docs/python_docs/python/tutorials/performance/backend/profiler.md index 91a74e4f49cf..d7abd1b4eb1c 100644 --- a/docs/python_docs/python/tutorials/performance/backend/profiler.md +++ b/docs/python_docs/python/tutorials/performance/backend/profiler.md @@ -57,6 +57,7 @@ from mxnet import profiler profiler.set_config(profile_all=True, aggregate_stats=True, + continuous_dump=True, filename='profile_output.json') ``` @@ -185,6 +186,8 @@ There are a few ways to view the information collected by the profiler. You can You can use the `profiler.dumps()` method to view the information collected by the profiler in the console. The collected information contains time taken by each operator, time taken by each C API and memory consumed in both CPU and GPU. ```python +profiler.set_state('run') +profiler.set_state('stop') print(profiler.dumps()) ``` diff --git a/docs/python_docs/themes/mx-theme/mxtheme/drawer.html b/docs/python_docs/themes/mx-theme/mxtheme/drawer.html index f5f4a016c18c..ea0280159820 100644 --- a/docs/python_docs/themes/mx-theme/mxtheme/drawer.html +++ b/docs/python_docs/themes/mx-theme/mxtheme/drawer.html @@ -2,7 +2,7 @@ {% block menu %}
{{ _('Table Of Contents') }} - {% set toctree = toctree(maxdepth=3, collapse=False, includehidden=True, titles_only=True) %} + {% set toctree = toctree(maxdepth=6, collapse=False, includehidden=True, titles_only=True) %} {% if toctree %} {% set lines = toctree.split('\n') %}