Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'origin/master' into mkldnn-v1.0
Browse files Browse the repository at this point in the history
Conflicts:
	3rdparty/mkldnn
	ci/docker/install/ubuntu_mkl.sh
	ci/docker/install/ubuntu_mklml.sh
	cmake/DownloadMKLML.cmake
	src/operator/nn/mkldnn/mkldnn_act-inl.h
	src/operator/nn/mkldnn/mkldnn_act.cc
  • Loading branch information
TaoLv committed Sep 29, 2019
2 parents ce5ec06 + 512d25a commit 4fba4c3
Show file tree
Hide file tree
Showing 719 changed files with 16,040 additions and 53,459 deletions.
2 changes: 1 addition & 1 deletion .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Please feel free to remove inapplicable items for your PR.
- For user-facing API changes, API doc string has been updated.
- For new C++ functions in header files, their functionalities and arguments are documented.
- For new examples, README.md is added to explain the what the example does, the source of the dataset, expected performance on test set and reference to the original paper if applicable
- Check the API doc at http://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
- Check the API doc at https://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
- [ ] To the my best knowledge, examples are either not affected by this change, or have been fixed to be compatible with this change

### Changes ###
Expand Down
43 changes: 41 additions & 2 deletions 3rdparty/mshadow/mshadow/base.h
Original file line number Diff line number Diff line change
Expand Up @@ -645,6 +645,25 @@ MSHADOW_XINLINE int64_t MinValue<int64_t>(void) {
return LLONG_MIN;
}

/*!
* \brief negative infinity of certain types
* \tparam DType data type
*/
template<typename DType>
MSHADOW_XINLINE DType NegInfValue(void) {
return MinValue<DType>();
}
/*! \brief negative infinity value of float */
template<>
MSHADOW_XINLINE float NegInfValue<float>(void) {
return -HUGE_VALF;
}
/*! \brief negative infinity value of double */
template<>
MSHADOW_XINLINE double NegInfValue<double>(void) {
return -HUGE_VAL;
}

/*!
* \brief maximum value of certain types
* \tparam DType data type
Expand Down Expand Up @@ -686,6 +705,26 @@ template<>
MSHADOW_XINLINE int64_t MaxValue<int64_t>(void) {
return LLONG_MAX;
}

/*!
* \brief positive infinity of certain types
* \tparam DType data type
*/
template<typename DType>
MSHADOW_XINLINE DType PosInfValue(void) {
return MaxValue<DType>();
}
/*! \brief positive infinity value of float */
template<>
MSHADOW_XINLINE float PosInfValue<float>(void) {
return HUGE_VALF;
}
/*! \brief positive infinity value of double */
template<>
MSHADOW_XINLINE double PosInfValue<double>(void) {
return HUGE_VAL;
}

} // namespace limits

/*! \brief sum reducer */
Expand Down Expand Up @@ -793,7 +832,7 @@ struct maximum {
*/
template<typename DType>
MSHADOW_XINLINE static void SetInitValue(DType &initv) { // NOLINT(*)
initv = limits::MinValue<DType>();
initv = limits::NegInfValue<DType>();
}
/*!
*\brief set the initial value during reduction
Expand Down Expand Up @@ -849,7 +888,7 @@ struct minimum {
*/
template<typename DType>
MSHADOW_XINLINE static void SetInitValue(DType &initv) { // NOLINT(*)
initv = limits::MaxValue<DType>();
initv = limits::PosInfValue<DType>();
}
/*!
*\brief set the initial value during reduction
Expand Down
2 changes: 1 addition & 1 deletion MKLDNN_README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->

File is moved to [docs/tutorials/mkldnn/MKLDNN_README.md](docs/tutorials/mkldnn/MKLDNN_README.md).
File is moved to [docs/tutorials/mkldnn/MKLDNN_README.md](docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md).
28 changes: 11 additions & 17 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -213,10 +213,12 @@ ifeq ($(USE_LAPACK), 1)
ifeq ($(USE_BLAS),$(filter $(USE_BLAS),blas openblas atlas mkl))
ifeq (,$(wildcard $(USE_LAPACK_PATH)/liblapack.a))
ifeq (,$(wildcard $(USE_LAPACK_PATH)/liblapack.so))
ifeq (,$(wildcard $(USE_LAPACK_PATH)/liblapack.dylib))
ifeq (,$(wildcard /lib/liblapack.a))
ifeq (,$(wildcard /lib/liblapack.so))
ifeq (,$(wildcard /usr/lib/liblapack.a))
ifeq (,$(wildcard /usr/lib/liblapack.so))
ifeq (,$(wildcard /usr/lib/liblapack.dylib))
ifeq (,$(wildcard /usr/lib64/liblapack.a))
ifeq (,$(wildcard /usr/lib64/liblapack.so))
USE_LAPACK = 0
Expand All @@ -231,6 +233,8 @@ endif
endif
endif
endif
endif
endif

# lapack settings.
ifeq ($(USE_LAPACK), 1)
Expand Down Expand Up @@ -446,7 +450,7 @@ ifeq ($(USE_DIST_KVSTORE), 1)
LDFLAGS += $(PS_LDFLAGS_A)
endif

.PHONY: clean all extra-packages test lint docs clean_all rcpplint rcppexport roxygen\
.PHONY: clean all extra-packages test lint clean_all rcpplint rcppexport roxygen\
cython2 cython3 cython cyclean

all: lib/libmxnet.a lib/libmxnet.so $(BIN) extra-packages sample_lib
Expand Down Expand Up @@ -655,20 +659,6 @@ cpplint:
pylint:
python3 -m pylint --rcfile=$(ROOTDIR)/ci/other/pylintrc --ignore-patterns=".*\.so$$,.*\.dll$$,.*\.dylib$$" python/mxnet tools/caffe_converter/*.py

sample_lib:
$(CXX) -shared -fPIC example/lib_api/mylib.cc -o libsample_lib.so -I include/mxnet

doc: docs

docs:
make -C docs html

clean_docs:
make -C docs clean

doxygen:
doxygen docs/Doxyfile

# Cython build
cython:
cd python; $(PYTHON) setup.py build_ext --inplace --with-cython
Expand Down Expand Up @@ -716,6 +706,10 @@ rpkgtest:
Rscript -e 'require(testthat);res<-test_dir("R-package/tests/testthat");if(!testthat:::all_passed(res)){stop("Test failures", call. = FALSE)}'
Rscript -e 'res<-covr:::package_coverage("R-package");fileConn<-file(paste("r-package_coverage_",toString(runif(1)),".json"));writeLines(covr:::to_codecov(res), fileConn);close(fileConn)'


sample_lib:
$(CXX) -shared -fPIC example/lib_api/mylib.cc -o libsample_lib.so -I include/mxnet

scalaclean:
(cd $(ROOTDIR)/scala-package && mvn clean)

Expand Down Expand Up @@ -760,7 +754,7 @@ ratcheck: build/rat/apache-rat/target/apache-rat-0.13.jar

ifneq ($(EXTRA_OPERATORS),)
clean: rclean cyclean $(EXTRA_PACKAGES_CLEAN)
$(RM) -r build lib bin deps *~ */*~ */*/*~ */*/*/*~
$(RM) -r build lib bin deps *~ */*~ */*/*~ */*/*/*~
(cd scala-package && mvn clean) || true
cd $(DMLC_CORE); $(MAKE) clean; cd -
cd $(PS_PATH); $(MAKE) clean; cd -
Expand All @@ -771,7 +765,7 @@ clean: rclean cyclean $(EXTRA_PACKAGES_CLEAN)
$(RM) -r $(patsubst %, %/*.o, $(EXTRA_OPERATORS)) $(patsubst %, %/*/*.o, $(EXTRA_OPERATORS))
else
clean: rclean mkldnn_clean cyclean testclean $(EXTRA_PACKAGES_CLEAN)
$(RM) -r build lib bin *~ */*~ */*/*~ */*/*/*~
$(RM) -r build lib bin *~ */*~ */*/*~ */*/*/*~
(cd scala-package && mvn clean) || true
cd $(DMLC_CORE); $(MAKE) clean; cd -
cd $(PS_PATH); $(MAKE) clean; cd -
Expand Down
14 changes: 7 additions & 7 deletions NEWS.md
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ MXNet now supports Dynamic Shape in both imperative and symbolic mode. MXNet use
* while_loop: its output size depends on the number of iterations in the loop.
* boolean indexing: its output size depends on the value of the input data.
* many operators can be extended to take a shape symbol as input and the shape symbol can determine the output shape of these operators (with this extension, the symbol interface of MXNet can fully support shape).
To support dynamic shape and such operators, we have modified MXNet backend. Now MXNet supports operators with dynamic shape such as [`contrib.while_loop`](https://mxnet.incubator.apache.org/api/python/ndarray/contrib.html#mxnet.ndarray.contrib.while_loop), [`contrib.cond`](https://mxnet.incubator.apache.org/api/python/ndarray/contrib.html#mxnet.ndarray.contrib.cond), and [`mxnet.ndarray.contrib.boolean_mask`](https://mxnet.incubator.apache.org/api/python/ndarray/contrib.html#contrib)
To support dynamic shape and such operators, we have modified MXNet backend. Now MXNet supports operators with dynamic shape such as [`contrib.while_loop`](https://mxnet.apache.org/api/python/ndarray/contrib.html#mxnet.ndarray.contrib.while_loop), [`contrib.cond`](https://mxnet.apache.org/api/python/ndarray/contrib.html#mxnet.ndarray.contrib.cond), and [`mxnet.ndarray.contrib.boolean_mask`](https://mxnet.apache.org/api/python/ndarray/contrib.html#contrib)
Note: Currently dynamic shape does not work with Gluon deferred initialization.

#### Large Tensor Support
Expand All @@ -233,7 +233,7 @@ For more details please refer to the [design document](https://cwiki.apache.org/

#### Dependency Update
MXNet has added support for CUDA 10, CUDA 10.1, cudnn7.5, NCCL 2.4.2, and numpy 1.16.0.
These updates are available through PyPI packages and build from source, refer to [installation guid](https://mxnet.incubator.apache.org/versions/master/install/index.html) for more details.
These updates are available through PyPI packages and build from source, refer to [installation guid](https://mxnet.apache.org/versions/master/install/index.html) for more details.

#### Gluon Fit API(experimental)
Training a model in Gluon requires users to write the training loop. This is useful because of its imperative nature, however repeating the same code across multiple models can become tedious and repetitive with boilerplate code.
Expand Down Expand Up @@ -1213,7 +1213,7 @@ MKLDNN backend takes advantage of MXNet subgraph to implement the most of possib
##### Quantization
Performance of reduced-precision (INT8) computation is also dramatically improved after the graph optimization feature is applied on CPU Platforms. Various models are supported and can benefit from reduced-precision computation, including symbolic models, Gluon models and even custom models. Users can run most of the pre-trained models with only a few lines of commands and a new quantization script imagenet_gen_qsym_mkldnn.py. The observed accuracy loss is less than 0.5% for popular CNN networks, like ResNet-50, Inception-BN, MobileNet, etc.

Please find detailed information and performance/accuracy numbers here: [MKLDNN README](https://github.com/apache/incubator-mxnet/blob/master/docs/tutorials/mkldnn/MKLDNN_README.md), [quantization README](https://github.com/apache/incubator-mxnet/tree/master/example/quantization#1) and [design proposal](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN)
Please find detailed information and performance/accuracy numbers here: [MKLDNN README](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html), [quantization README](https://github.com/apache/incubator-mxnet/tree/master/example/quantization#1) and [design proposal](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN)

### New Operators

Expand Down Expand Up @@ -1624,7 +1624,7 @@ Please find detailed information and performance/accuracy numbers here: [MKLDNN
* Updated CONTRIBUTORS.md to include mxnet-label-bot (#13048)

### How to build MXNet
Please follow the instructions at https://mxnet.incubator.apache.org/install/index.html
Please follow the instructions at https://mxnet.apache.org/install/index.html

### List of submodules used by Apache MXNet (Incubating) and when they were updated last
Submodule@commit ID::Last updated by MXNet:: Last update in submodule
Expand Down Expand Up @@ -1756,7 +1756,7 @@ For more information and examples, see [full release notes](https://cwiki.apache

### New Features - Clojure package (experimental)
- MXNet now supports the Clojure programming language. The MXNet Clojure package brings flexible and efficient GPU computing and state-of-art deep learning to Clojure. It enables you to write seamless tensor/matrix computation with multiple GPUs in Clojure. It also lets you construct and customize the state-of-art deep learning models in Clojure, and apply them to tasks, such as image classification and data science challenges.([#11205](https://github.com/apache/incubator-mxnet/pull/11205))
- Checkout examples and API documentation [here](http://mxnet.incubator.apache.org/api/clojure/index.html).
- Checkout examples and API documentation [here](https://mxnet.apache.org/api/clojure/index.html).

### New Features - Synchronized Cross-GPU Batch Norm (experimental)
- Gluon now supports Synchronized Batch Normalization (#11502).
Expand Down Expand Up @@ -1786,8 +1786,8 @@ For more information and examples, see [full release notes](https://cwiki.apache
- Set environment variable `MXNET_KVSTORE_USETREE=1` to enable.

### New Features - Export MXNet models to ONNX format (experimental)
- With this feature, now MXNet models can be exported to ONNX format([#11213](https://github.com/apache/incubator-mxnet/pull/11213)). Currently, MXNet supports ONNX v1.2.1. [API documentation](http://mxnet.incubator.apache.org/api/python/contrib/onnx.html).
- Checkout this [tutorial](http://mxnet.incubator.apache.org/tutorials/onnx/export_mxnet_to_onnx.html) which shows how to use MXNet to ONNX exporter APIs. ONNX protobuf so that those models can be imported in other frameworks for inference.
- With this feature, now MXNet models can be exported to ONNX format([#11213](https://github.com/apache/incubator-mxnet/pull/11213)). Currently, MXNet supports ONNX v1.2.1. [API documentation](https://mxnet.apache.org/api/python/contrib/onnx.html).
- Checkout this [tutorial](https://mxnet.apache.org/tutorials/onnx/export_mxnet_to_onnx.html) which shows how to use MXNet to ONNX exporter APIs. ONNX protobuf so that those models can be imported in other frameworks for inference.

### New Features - TensorRT Runtime Integration (experimental)
- [TensorRT](https://developer.nvidia.com/tensorrt) provides significant acceleration of model inference on NVIDIA GPUs compared to running the full graph in MxNet using unfused GPU operators. In addition to faster fp32 inference, TensorRT optimizes fp16 inference, and is capable of int8 inference (provided the quantization steps are performed). Besides increasing throughput, TensorRT significantly reduces inference latency, especially for small batches.
Expand Down
1 change: 1 addition & 0 deletions R-package/DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,4 @@ Depends:
LinkingTo: Rcpp
VignetteBuilder: knitr
RoxygenNote: 6.1.1
Encoding: UTF-8
2 changes: 1 addition & 1 deletion R-package/R/zzz.R
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ NULL

tips <- c(
"Need help? Feel free to open an issue on https://github.com/dmlc/mxnet/issues",
"For more documents, please visit http://mxnet.io",
"For more documents, please visit https://mxnet.io",
"Use suppressPackageStartupMessages() to eliminate package startup messages."
)

Expand Down
2 changes: 1 addition & 1 deletion R-package/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ options(repos = cran)
install.packages("mxnet")
```

To use the GPU version or to use it on Linux, please follow [Installation Guide](http://mxnet.io/install/index.html)
To use the GPU version or to use it on Linux, please follow [Installation Guide](https://mxnet.io/install/index.html)

License
-------
Expand Down
13 changes: 13 additions & 0 deletions R-package/src/export.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,15 @@ std::string ExportDocString(const std::string& docstring) {
return os.str();
}

std::string ReplaceAll(std::string str, const std::string& from, const std::string& to) {
size_t start_pos = 0;
while ((start_pos = str.find(from, start_pos)) != std::string::npos) {
str.replace(start_pos, from.length(), to);
start_pos += to.length(); // Handles case where 'to' is a substring of 'from'
}
return str;
}

void ExportVArgFunction(std::ostream& os, // NOLINT(*)
const std::string& func_name,
const std::string& docstr) {
Expand Down Expand Up @@ -118,6 +127,10 @@ void Exporter::Export(const std::string& path) {
|| fname == "mx.varg.symbol.min") continue;
Rcpp::List func_info(scope->get_function(fname));
std::string docstr = Rcpp::as<std::string>(func_info[2]);

docstr = ReplaceAll(docstr, std::string("\a"), std::string("\\a"));
docstr = ReplaceAll(docstr, std::string("\b"), std::string("\\b"));

if (docstr.find("@export") == std::string::npos) continue;
if (fname.find("mx.varg.") == 0) {
ExportVArgFunction(script, fname, docstr);
Expand Down
2 changes: 1 addition & 1 deletion R-package/vignettes/MultidimLstm.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -299,4 +299,4 @@ We also repeated the above experiments to generate the next 100 samples to 301st

The above tutorial is just for demonstration purposes and has not been tuned extensively for accuracy.

For more tutorials on MXNet-R, head on to [MXNet-R tutorials](https://mxnet.incubator.apache.org/tutorials/r/index.html)
For more tutorials on MXNet-R, head on to [MXNet-R tutorials](https://mxnet.apache.org/tutorials/r/index.html)
Loading

0 comments on commit 4fba4c3

Please sign in to comment.