Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
yzhliu committed Jan 25, 2016
2 parents 10f1d5a + 9844ebe commit f9c1f09
Show file tree
Hide file tree
Showing 86 changed files with 2,823 additions and 585 deletions.
4 changes: 1 addition & 3 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ matrix:
exclude:
- os: osx
env: TASK=lint
- os: osx
env: TASK=doc
- os: linux
env: TASK=r_test
- os: osx
Expand Down Expand Up @@ -73,7 +71,7 @@ cache:

before_cache:
- dmlc-core/scripts/travis/travis_before_cache.sh

after_failure:
- tests/travis/travis_after_failure.sh

Expand Down
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Copyright (c) 2015 by Contributors
Copyright (c) 2015-2016 by Contributors

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down
38 changes: 29 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,11 @@ endif
all: lib/libmxnet.a lib/libmxnet.so $(BIN)

SRC = $(wildcard src/*.cc src/*/*.cc)
OBJ = $(patsubst src/%.cc, build/%.o, $(SRC))
OBJ = $(patsubst %.cc, build/%.o, $(SRC))
CUSRC = $(wildcard src/*/*.cu)
CUOBJ = $(patsubst src/%.cu, build/%_gpu.o, $(CUSRC))
CUOBJ = $(patsubst %.cu, build/%_gpu.o, $(CUSRC))

# extra operators
ifneq ($(EXTRA_OPERATORS),)
EXTRA_SRC = $(wildcard $(EXTRA_OPERATORS)/*.cc $(EXTRA_OPERATORS)/*/*.cc)
EXTRA_OBJ = $(patsubst $(EXTRA_OPERATORS)/%.cc, $(EXTRA_OPERATORS)/build/%.o, $(EXTRA_SRC))
Expand All @@ -110,10 +111,16 @@ else
EXTRA_CUOBJ =
endif

# plugin
PLUGIN_OBJ =
PLUGIN_CUOBJ =
include $(MXNET_PLUGINS)

# all dep
LIB_DEP += $(DMLC_CORE)/libdmlc.a
ALL_DEP = $(OBJ) $(EXTRA_OBJ) $(LIB_DEP)
ALL_DEP = $(OBJ) $(EXTRA_OBJ) $(PLUGIN_OBJ) $(LIB_DEP)
ifeq ($(USE_CUDA), 1)
ALL_DEP += $(CUOBJ) $(EXTRA_CUOBJ)
ALL_DEP += $(CUOBJ) $(EXTRA_CUOBJ) $(PLUGIN_CUOBJ)
LDFLAGS += -lcuda
endif

Expand All @@ -125,14 +132,26 @@ else
endif


build/%.o: src/%.cc
build/src/%.o: src/%.cc
@mkdir -p $(@D)
$(CXX) -std=c++0x $(CFLAGS) -MM -MT build/src/$*.o $< >build/src/$*.d
$(CXX) -std=c++0x -c $(CFLAGS) -c $< -o $@

build/src/%_gpu.o: src/%.cu
@mkdir -p $(@D)
$(NVCC) $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" -M -MT build/src/$*_gpu.o $< >build/src/$*_gpu.d
$(NVCC) -c -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" $<

build/plugin/%.o: plugin/%.cc
@mkdir -p $(@D)
$(CXX) -std=c++0x $(CFLAGS) -MM -MT build/$*.o $< >build/$*.d
$(CXX) -std=c++0x $(CFLAGS) -MM -MT build/plugin/$*.o $< >build/plugin/$*.d
$(CXX) -std=c++0x -c $(CFLAGS) -c $< -o $@

build/%_gpu.o: src/%.cu
# A nvcc bug cause it to generate "generic/xxx.h" dependencies from torch headers.
# Use CXX to generate dependency instead.
build/plugin/%_gpu.o: plugin/%.cu
@mkdir -p $(@D)
$(NVCC) $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" -M -MT build/$*_gpu.o $< >build/$*_gpu.d
$(CXX) -std=c++0x $(CFLAGS) -MM -MT build/plugin/$*_gpu.o $< >build/plugin/$*_gpu.d
$(NVCC) -c -o $@ $(NVCCFLAGS) -Xcompiler "$(CFLAGS)" $<

$(EXTRA_OPERATORS)/build/%.o: $(EXTRA_OPERATORS)/%.cc
Expand Down Expand Up @@ -173,7 +192,7 @@ include tests/cpp/unittest.mk
test: $(TEST)

lint: rcpplint
python2 dmlc-core/scripts/lint.py mxnet ${LINT_LANG} include src scripts python predict/python
python2 dmlc-core/scripts/lint.py mxnet ${LINT_LANG} include src plugin scripts python predict/python

doc: doxygen

Expand Down Expand Up @@ -216,6 +235,7 @@ clean_all: clean

-include build/*.d
-include build/*/*.d
-include build/*/*/*.d
ifneq ($(EXTRA_OPERATORS),)
-include $(EXTRA_OPERATORS)/build/*.d
endif
3 changes: 2 additions & 1 deletion R-package/DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ Suggests:
mlbench,
knitr,
rmarkdown,
imager
imager,
roxygen2
LinkingTo: Rcpp
RoxygenNote: 5.0.1
VignetteBuilder: knitr
10 changes: 6 additions & 4 deletions R-package/R/executor.R
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#' with information from input shapes.
#'
#' @export
mx.simple.bind <- function(symbol, ctx, grad.req=FALSE, ...) {
mx.simple.bind <- function(symbol, ctx, grad.req = "null", ...) {
if (!is.MXSymbol(symbol)) stop("symbol need to be MXSymbol")
slist <- symbol$infer.shape(list(...))

Expand All @@ -16,9 +16,11 @@ mx.simple.bind <- function(symbol, ctx, grad.req=FALSE, ...) {
mx.nd.zeros(shape, ctx)
}, simplify = FALSE, USE.NAMES = TRUE)
grad.reqs <- lapply(names(slist$arg.shapes), function(nm) {
grad.req &&
!mx.util.str.endswith(nm, "label") &&
!mx.util.str.endswith(nm, "data")
if (!mx.util.str.endswith(nm, "label") && !mx.util.str.endswith(nm, "data")) {
grad.req
} else {
"null"
}
})
mx.symbol.bind(symbol, ctx,
arg.arrays=arg.arrays,
Expand Down
4 changes: 2 additions & 2 deletions R-package/R/model.R
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ mx.model.train <- function(symbol, ctx, input.shape,
# create the executors
sliceinfo <- mx.model.slice.shape(input.shape, ndevice)
train.execs <- lapply(1:ndevice, function(i) {
mx.simple.bind(symbol, ctx=ctx[[i]], data=sliceinfo[[i]]$shape, grad.req=TRUE)
mx.simple.bind(symbol, ctx=ctx[[i]], data=sliceinfo[[i]]$shape, grad.req="write")
})
# set the parameters into executors
for (texec in train.execs) {
Expand Down Expand Up @@ -469,7 +469,7 @@ predict.MXFeedForwardModel <- function(model, X, ctx=NULL, array.batch.size=128,
X$reset()
if (!X$iter.next()) stop("Cannot predict on empty iterator")
dlist = X$value()
pexec <- mx.simple.bind(model$symbol, ctx=ctx, data=dim(dlist$data), grad.req=FALSE)
pexec <- mx.simple.bind(model$symbol, ctx=ctx, data=dim(dlist$data), grad.req="null")
mx.exec.update.arg.arrays(pexec, model$arg.params, match.name=TRUE)
mx.exec.update.aux.arrays(pexec, model$aux.params, match.name=TRUE)
packer <- mx.nd.arraypacker()
Expand Down
6 changes: 6 additions & 0 deletions R-package/R/mxnet_generated.R
Original file line number Diff line number Diff line change
Expand Up @@ -413,6 +413,8 @@ mx.symbol.Cast <- function(...) {

#' Perform an feature concat on channel dim (dim 1) over all the inputs.
#'
#' @param data Symbol[]
#' List of tensors to concatenate
#' @param num.args int, required
#' Number of inputs to be concated.
#' @param dim int, optional, default='1'
Expand Down Expand Up @@ -792,6 +794,8 @@ mx.symbol.SoftmaxActivation <- function(...) {
#'
#' @param data Symbol
#' Input data to softmax.
#' @param label Symbol
#' Label data.
#' @param grad.scale float, optional, default=1
#' Scale the gradient by a float factor
#' @param ignore.label float, optional, default=-1
Expand Down Expand Up @@ -828,6 +832,8 @@ mx.symbol.SwapAxis <- function(...) {

#' Perform nearest neighboor/bilinear up sampling to inputs
#'
#' @param data Symbol[]
#' Array of tensors to upsample
#' @param scale int (non-negative), required
#' Up sampling scale
#' @param num.filter int (non-negative), optional, default=0
Expand Down
2 changes: 1 addition & 1 deletion R-package/demo/basic_executor.R
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ exec = mxnet:::mx.symbol.bind(
ctx=mx.cpu(),
arg.arrays = list(A=a, B=b),
aux.arrays = list(),
grad.reqs = list(FALSE, FALSE))
grad.reqs = list("null", "null"))

# calculate outputs
mx.exec.forward(exec)
Expand Down
2 changes: 1 addition & 1 deletion R-package/man/mx.io.ImageRecordIter.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R-package/man/mx.io.MNISTIter.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion R-package/man/mx.simple.bind.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions R-package/man/mx.symbol.Concat.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions R-package/man/mx.symbol.SoftmaxOutput.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions R-package/man/mx.symbol.UpSampling.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

16 changes: 12 additions & 4 deletions R-package/src/executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,14 +141,22 @@ inline Rcpp::List* CreateGradList(const Rcpp::List& source_array,
ret->names() = names;
handles->resize(grad_reqs.size(), nullptr);
grad_req_type->resize(grad_reqs.size(), 0);
std::map<std::string, int> req_map;
req_map["null"] = 0;
req_map["write"] = 1;
req_map["add"] = 3;

for (size_t i = 0; i < grad_reqs.size(); ++i) {
RCHECK(Rcpp::is<bool>(grad_reqs[i]))
<< "Expect input grad_reqs to be list of booleans";
if (Rcpp::as<bool>(grad_reqs[i])) {
if (Rcpp::as<std::string>(grad_reqs[i]) != "null"
&& Rcpp::as<std::string>(grad_reqs[i]) != "write"
&& Rcpp::as<std::string>(grad_reqs[i]) != "add") {
RLOG_FATAL << "grad_req must be one of 'null', 'write' or 'add'";
}

if (Rcpp::as<std::string>(grad_reqs[i]) != "null") {
ret->at(i) = NDArray::Empty(NDArray::FromRObject(source_array[i]).dim(), ctx);
handles->at(i) = NDArray::FromRObject(ret->at(i))->handle;
grad_req_type->at(i) = 1;
grad_req_type->at(i) = req_map[Rcpp::as<std::string>(grad_reqs[i])];
}
}
} catch(const Rcpp::exception& ex) {
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ deep learning system, and interesting insights of DL systems for hackers.

What's New
----------
* [Embedding Torch layers and functions in MXNet](https://mxnet.readthedocs.org/en/latest/tutorial/torch_howto.html)
* [MXNet.js: Javascript Package for Deep Learning in Browser (without server)
](https://github.com/dmlc/mxnet.js/)
* [Design Note: Design Efficient Deep Learning Data Loading Module](http://mxnet.readthedocs.org/en/latest/developer-guide/note_data_loading.html)
Expand All @@ -34,6 +35,7 @@ Contents
* [Documentation and Tutorials](http://mxnet.readthedocs.org/en/latest/)
* [Open Source Design Notes](http://mxnet.readthedocs.org/en/latest/#open-source-design-notes)
* [Code Examples](example)
* [Installation](http://mxnet.readthedocs.org/en/latest/build.html)
* [Pretrained Models](https://github.com/dmlc/mxnet-model-gallery)
* [Contribute to MXNet](http://mxnet.readthedocs.org/en/latest/contribute.html)
* [Frequent Asked Questions](http://mxnet.readthedocs.org/en/latest/faq.html)
Expand Down
18 changes: 10 additions & 8 deletions doc/build.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ even better to send pull request if you can fix the problem.
- [Python Package Installation](#python-package-installation)
- [R Package Installation](#r-package-installation)
- [Docker Images](#docker-images)
- [Frequently asked questions](#frequently-asked-questions)
- [Trouble Shooting](#trouble-shooting)

## Build the Shared Library

Expand Down Expand Up @@ -240,10 +240,10 @@ R CMD INSTALL mxnet_0.5.tar.gz
```
To install the package using GPU on Windows without building the package from scratch. Note that you need a couple of programs installed already:
To install the package using GPU on Windows without building the package from scratch. Note that you need a couple of programs installed already:
- You'll need the [CUDA Toolkit](https://developer.nvidia.com/cuda-toolkit). This depends on Visual Studio, and a free compatible version would be [Visual Studio Community 2013](https://www.visualstudio.com/en-us/news/vs2013-community-vs.aspx). For instructions and compatibility checks, read http://docs.nvidia.com/cuda/cuda-getting-started-guide-for-microsoft-windows/ .

- You will also need to register as a developer at nvidia and download CUDNN V3, https://developer.nvidia.com/cudnn .
- You will also need to register as a developer at nvidia and download CUDNN V3, https://developer.nvidia.com/cudnn .


1. Download the mxnet package as a ZIP from the Github repository https://github.com/dmlc/mxnet and unpack it. You will be editing the `/mxnet/R-package` folder.
Expand All @@ -256,7 +256,7 @@ To install the package using GPU on Windows without building the package from sc

4. Create the folder `R-package/inst/libs/x64`. We only support 64-bit operating system now, so you need the x64 folder;

5. Put dll files in `R-package/inst/libs/x64`.
5. Put dll files in `R-package/inst/libs/x64`.

The first dll file you need is `nocudnn/lib/libmxnet.dll`. The other dll files you need are the ones in all 4 subfolders of `nocudnn/3rdparty/`, for the `cudnn` and `openblas` you'll need to look in the `/bin` folders. There should be 11 dll files now in `R-package/inst/libs/x64`.

Expand Down Expand Up @@ -284,17 +284,17 @@ sudo docker run -it --device /dev/nvidiactl --device /dev/nvidia-uvm --device /d
For a guide to Docker, see the [official docs](https://docs.docker.com/userguide/). For more details on how to use the
MXNet Docker images, including requirements for CUDA support, consult the [source project](https://github.com/Kaixhin/dockerfiles).

## Frequently Asked Questions
## Trouble Shooting

1. **Compile failed after `git pull`**
### Compile failed after git pull

Please first update the submodules, clean all and recompile:

```bash
git submodule update && make clean_all && make -j4
```

2. **Compile failed after `config.mk` is modified**
### Compile failed after config.mk is modified

This often happens if `USE_CUDA` or `USE_DIST_KVSTORE` has been changed. You
need to clean all first:
Expand All @@ -303,7 +303,9 @@ MXNet Docker images, including requirements for CUDA support, consult the [sourc
make clean_all && make -j4
```

3. **Still get the error message e.g. `compile with USE_DIST_KVSTORE=1 to use
### Still get the error message after re-installation

e.g. `compile with USE_DIST_KVSTORE=1 to use
dist` after recomplied with `USE_DIST_KVSTORE=1`**

It is often because mxnet is failed to load the new built library. If you
Expand Down
Loading

0 comments on commit f9c1f09

Please sign in to comment.