From 47591f923873bad36ad2769e6b5d7d84d5a74a20 Mon Sep 17 00:00:00 2001 From: Anirudh Acharya Date: Fri, 11 Jan 2019 20:45:18 -0800 Subject: [PATCH] beta doc fixes --- R-package/R/context.R | 2 +- R-package/R/model.R | 2 +- R-package/R/optimizer.R | 2 +- R-package/R/rnn.graph.R | 2 +- src/operator/optimizer_op.cc | 2 +- src/operator/tensor/elemwise_unary_op_basic.cc | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/R-package/R/context.R b/R-package/R/context.R index 6bbb9aa6a926..1c5a56ed919e 100644 --- a/R-package/R/context.R +++ b/R-package/R/context.R @@ -22,7 +22,7 @@ init.context.default <- function() { #' Set/Get default context for array creation. #' -#' @param new, optional takes \code{mx.cpu()} or \code{mx.gpu(id)}, new default ctx. +#' @param new optional takes \code{mx.cpu()} or \code{mx.gpu(id)}, new default ctx. #' @return The default context. #' #' @export diff --git a/R-package/R/model.R b/R-package/R/model.R index d71bc5cae67d..51d1705ba5f0 100644 --- a/R-package/R/model.R +++ b/R-package/R/model.R @@ -562,7 +562,7 @@ mx.model.FeedForward.create <- #' #' @param model The MXNet Model. #' @param X The dataset to predict. -#' @param ctx mx.cpu() or mx.gpu(i) The device used to generate the prediction. +#' @param ctx mx.cpu() or mx.gpu(). The device used to generate the prediction. #' @param array.batch.size The batch size used in batching. Only used when X is R's array. #' @param array.layout can be "auto", "colmajor", "rowmajor", (detault=auto) #' The layout of array. "rowmajor" is only supported for two dimensional array. diff --git a/R-package/R/optimizer.R b/R-package/R/optimizer.R index c0af11dc922a..9a858d5a6c56 100644 --- a/R-package/R/optimizer.R +++ b/R-package/R/optimizer.R @@ -21,7 +21,7 @@ #' @param learning.rate float, default=0.01 #' The initial learning rate. #' @param momentum float, default=0 -#' The momentumvalue +#' The momentum value #' @param wd float, default=0.0 #' L2 regularization coefficient add to all the weights. #' @param rescale.grad float, default=1.0 diff --git a/R-package/R/rnn.graph.R b/R-package/R/rnn.graph.R index 2ceefb57d352..1225fa511b51 100644 --- a/R-package/R/rnn.graph.R +++ b/R-package/R/rnn.graph.R @@ -195,7 +195,7 @@ gru.cell <- function(num_hidden, indata, prev.state, param, seqidx, layeridx, dr } -#' unroll representation of RNN running on non CUDA device +#' Unroll representation of RNN running on non CUDA device #' #' @param config Either seq-to-one or one-to-one #' @param cell_type Type of RNN cell: either gru or lstm diff --git a/src/operator/optimizer_op.cc b/src/operator/optimizer_op.cc index 6c44f99c1443..a52a6f32907c 100644 --- a/src/operator/optimizer_op.cc +++ b/src/operator/optimizer_op.cc @@ -316,7 +316,7 @@ inline bool SGDStorageType(const nnvm::NodeAttrs& attrs, NNVM_REGISTER_OP(sgd_update) MXNET_ADD_SPARSE_OP_ALIAS(sgd_update) -.describe(R"code(Update function for Stochastic Gradient Descent (SDG) optimizer. +.describe(R"code(Update function for Stochastic Gradient Descent (SGD) optimizer. It updates the weights using:: diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc index 7f69395d1c87..c0d420f9599b 100644 --- a/src/operator/tensor/elemwise_unary_op_basic.cc +++ b/src/operator/tensor/elemwise_unary_op_basic.cc @@ -71,7 +71,7 @@ static bool IdentityAttrLikeRhsStorageType(const nnvm::NodeAttrs& attrs, // relu MXNET_OPERATOR_REGISTER_UNARY_WITH_RSP_CSR(relu, cpu, mshadow_op::relu) -.describe(R"code(Computes rectified linear. +.describe(R"code(Computes rectified linear activation. .. math:: max(features, 0)