From 4ce679367839988032f94226e8832e2c0c75e8ed Mon Sep 17 00:00:00 2001 From: Vamshidhar Dantu Date: Fri, 9 Nov 2018 08:35:17 -0800 Subject: [PATCH 1/2] Addressed doc issues --- python/mxnet/gluon/nn/basic_layers.py | 9 +++++---- python/mxnet/ndarray/ndarray.py | 2 +- python/mxnet/optimizer/optimizer.py | 24 ++++++++++++------------ 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/python/mxnet/gluon/nn/basic_layers.py b/python/mxnet/gluon/nn/basic_layers.py index c95601c6f796..199e396ac824 100644 --- a/python/mxnet/gluon/nn/basic_layers.py +++ b/python/mxnet/gluon/nn/basic_layers.py @@ -663,20 +663,21 @@ class HybridLambda(HybridBlock): ---------- function : str or function Function used in lambda must be one of the following: - 1) the name of an operator that is available in both symbol and ndarray. For example:: + 1) The name of an operator that is available in both symbol and ndarray. For example:: block = HybridLambda('tanh') - 2) a function that conforms to "def function(F, data, *args)". For example:: + 2) A function that conforms to ``def function(F, data, *args)``. For example:: block = HybridLambda(lambda F, x: F.LeakyReLU(x, slope=0.1)) Inputs: - - ** *args **: one or more input data. First argument must be symbol or ndarray. - Their shapes depend on the function. + - ** *args **: one or more input data. First argument must be symbol or ndarray. Their \ + shapes depend on the function. Output: - ** *outputs **: one or more output data. Their shapes depend on the function. + """ def __init__(self, function, prefix=None): super(HybridLambda, self).__init__(prefix=prefix) diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py index bf1140d2071b..1da2ebe5252e 100644 --- a/python/mxnet/ndarray/ndarray.py +++ b/python/mxnet/ndarray/ndarray.py @@ -998,7 +998,7 @@ def reshape(self, *shape, **kwargs): Example:: - - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be + - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be \ (40,5). - with reverse=1, output shape will be (50,4). diff --git a/python/mxnet/optimizer/optimizer.py b/python/mxnet/optimizer/optimizer.py index bc03497fc99f..fb452341e76b 100644 --- a/python/mxnet/optimizer/optimizer.py +++ b/python/mxnet/optimizer/optimizer.py @@ -692,18 +692,18 @@ class LBSGD(Optimizer): Parameters ---------- momentum : float, optional - The momentum value. - multi_precision: bool, optional - Flag to control the internal precision of the optimizer. - ``False`` results in using the same precision as the weights (default), - ``True`` makes internal 32-bit copy of the weights and applies gradients - in 32-bit precision even if actual weights used in the model have lower precision.`< - Turning this on can improve convergence and accuracy when training with float16. - warmup_strategy: string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear') - warmup_epochs: unsigned, default: 5 - batch_scale: unsigned, default: 1 (same as batch size*numworkers) - updates_per_epoch: updates_per_epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.) - begin_epoch: unsigned, default 0, starting epoch. + The momentum value. + multi_precision : bool, optional + Flag to control the internal precision of the optimizer. + "False" results in using the same precision as the weights (default). + "True" makes internal 32-bit copy of the weights and applies gradients \ + in 32-bit precision even if actual weights used in the model have lower precision. \ + Turning this on can improve convergence and accuracy when training with float16. + warmup_strategy : string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear') + warmup_epochs : unsigned, default: 5 + batch_scale : unsigned, default: 1 (same as batch size*numworkers) + updates_per_epoch : updates per epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.) + begin_epoch : unsigned, default 0, starting epoch. """ def __init__(self, momentum=0.0, multi_precision=False, warmup_strategy='linear', From 52e4c31b4648af69d15665db06f7d35dcef2493b Mon Sep 17 00:00:00 2001 From: vdantu <36211508+vdantu@users.noreply.github.com> Date: Tue, 13 Nov 2018 15:50:07 -0800 Subject: [PATCH 2/2] Update optimizer.py --- python/mxnet/optimizer/optimizer.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/mxnet/optimizer/optimizer.py b/python/mxnet/optimizer/optimizer.py index 6cec599b4a3a..d632a8c7c640 100644 --- a/python/mxnet/optimizer/optimizer.py +++ b/python/mxnet/optimizer/optimizer.py @@ -695,7 +695,6 @@ class LBSGD(Optimizer): ---------- momentum : float, optional The momentum value. - multi_precision: bool, optional Flag to control the internal precision of the optimizer.:: @@ -709,7 +708,6 @@ class LBSGD(Optimizer): batch_scale: unsigned, default: 1 (same as batch size*numworkers) updates_per_epoch: updates_per_epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.) begin_epoch: unsigned, default 0, starting epoch. - """ def __init__(self, momentum=0.0, multi_precision=False, warmup_strategy='linear', warmup_epochs=5, batch_scale=1, updates_per_epoch=32, begin_epoch=0, num_epochs=60,