From e6a44da273affc0da6c8fa4f22202b8a4385784c Mon Sep 17 00:00:00 2001 From: Bing Xu Date: Tue, 12 Jan 2016 22:53:10 -0700 Subject: [PATCH 1/3] rename plugin.mk --- make/config.mk | 2 +- plugin/sframe/{SFrame.mk => plugin.mk} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename plugin/sframe/{SFrame.mk => plugin.mk} (100%) diff --git a/make/config.mk b/make/config.mk index 223e4edc056c..484f60066a72 100644 --- a/make/config.mk +++ b/make/config.mk @@ -118,4 +118,4 @@ TORCH_PATH = $(HOME)/torch # whether to use sframe integration. This requires build sframe # git@github.com:dato-code/SFrame.git # SFRAME_PATH = $(HOME)/SFrame -# MXNET_PLUGINS += plugin/sframe/SFrame.mk +# MXNET_PLUGINS += plugin/sframe/plugin.mk diff --git a/plugin/sframe/SFrame.mk b/plugin/sframe/plugin.mk similarity index 100% rename from plugin/sframe/SFrame.mk rename to plugin/sframe/plugin.mk From 27f38ae6d4958db0aea2b198749bb7a3bda678ff Mon Sep 17 00:00:00 2001 From: Bing Xu Date: Wed, 13 Jan 2016 02:36:10 -0700 Subject: [PATCH 2/3] [op] fix --- src/operator/mshadow_op.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operator/mshadow_op.h b/src/operator/mshadow_op.h index fa7dc0474ed5..ce209c2ed9e7 100644 --- a/src/operator/mshadow_op.h +++ b/src/operator/mshadow_op.h @@ -88,7 +88,7 @@ struct elu { struct elu_grad { template MSHADOW_XINLINE static DType Map(DType x, DType a) { - return DType(x > 0.0f ? 1.0f : a * expf(x)); + return DType(x > 0.0f ? 1.0f : a + x); } }; From 4288a3d1e1d4b31d257ecfc0fc3b488d353ca386 Mon Sep 17 00:00:00 2001 From: Bing Xu Date: Mon, 18 Jan 2016 22:28:36 +0000 Subject: [PATCH 3/3] fix 3546 --- python/mxnet/model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/mxnet/model.py b/python/mxnet/model.py index 39a2c584be96..7458fd29e83b 100644 --- a/python/mxnet/model.py +++ b/python/mxnet/model.py @@ -80,7 +80,6 @@ def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, for idx in range(len(param_arrays)): param_on_devs = param_arrays[idx] kvstore.init(idx, arg_params[param_names[idx]]) - if update_on_kvstore: kvstore.pull(idx, param_on_devs, priority=-idx) @@ -202,7 +201,6 @@ def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names, if update_on_kvstore: kvstore.set_optimizer(optimizer) - # Now start training for epoch in range(begin_epoch, end_epoch): # Training phase @@ -416,6 +414,9 @@ def __init__(self, symbol, ctx=None, ctx = [cpu()] elif isinstance(ctx, Context): ctx = [ctx] + # disable multi-cpu data parallelism because blas will use all cpu resource + if ctx[0].device_type == "cpu" and len(ctx) > 1: + ctx = [cpu()] self.ctx = ctx # training parameters self.num_epoch = num_epoch