Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
rename returns/heads -> outputs
Browse files Browse the repository at this point in the history
  • Loading branch information
tqchen committed Sep 13, 2015
1 parent 99c7975 commit 4471fc8
Show file tree
Hide file tree
Showing 18 changed files with 69 additions and 68 deletions.
2 changes: 1 addition & 1 deletion example/cifar10/cifar10.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def RandomInit(narray):
in_data = mx.nd.empty(data_shape, mx.gpu())
executor = loss.simple_bind(mx.gpu(), data = in_data)

out_narray = executor.heads()[0]
out_narray = executor.outputs[0]
pred = mx.nd.zeros(out_narray.shape, mx.cpu())

arg_narrays, grad_narrays = executor.list_arguments()
Expand Down
8 changes: 4 additions & 4 deletions example/mnist/mlp_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def CalAcc(out, label):
arg_shapes, out_shapes, aux_shapes = softmax.infer_shape(data=data_shape)

# create GPU NArray for data
arg_narrays = [mx.nd.zeros(shape, ctx=mx.Context("gpu")) for shape in arg_shapes]
grad_narrays = [mx.nd.zeros(shape, ctx=mx.Context("gpu")) for shape in arg_shapes]
arg_narrays = [mx.nd.zeros(shape, ctx=mx.gpu()) for shape in arg_shapes]
grad_narrays = [mx.nd.zeros(shape, ctx=mx.gpu()) for shape in arg_shapes]
inputs = dict(zip(args_list, arg_narrays))

# create CPU NArray for result stat
Expand All @@ -50,8 +50,8 @@ def CalAcc(out, label):
# TODO(bing): think of a better bind interface
executor = softmax.bind(mx.Context('gpu'), arg_narrays, grad_narrays)
# create gradient NArray
out_narray = executor.heads()[0]
grad_narray = mx.nd.zeros(out_narray.shape, ctx=mx.Context("gpu"))
out_narray = executor.outputs[0]
grad_narray = mx.nd.zeros(out_narray.shape, ctx=mx.gpu())


# update
Expand Down
8 changes: 4 additions & 4 deletions include/mxnet/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ MXNET_DLL int MXSymbolListArguments(SymbolHandle symbol,
* \param out_str_array pointer to hold the output string array
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXSymbolListReturns(SymbolHandle symbol,
MXNET_DLL int MXSymbolListOutputs(SymbolHandle symbol,
mx_uint *out_size,
const char ***out_str_array);
/*!
Expand Down Expand Up @@ -502,9 +502,9 @@ MXNET_DLL int MXExecutorBackward(ExecutorHandle handle,
* \param out out put narray handles
* \return 0 when success, -1 when failure happens
*/
MXNET_DLL int MXExecutorHeads(ExecutorHandle handle,
mx_uint *out_size,
NDArrayHandle **out);
MXNET_DLL int MXExecutorOutputs(ExecutorHandle handle,
mx_uint *out_size,
NDArrayHandle **out);

/*!
* \brief Generate Executor from symbol
Expand Down
20 changes: 10 additions & 10 deletions include/mxnet/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ class Operator {
*
* \note
* Convention:
* out_grad.size() == OperatorProperty.NumVisibleReturns()
* out_data.size() == OperatorProperty.NumReturns()
* out_grad.size() == OperatorProperty.NumVisibleOutputs()
* out_data.size() == OperatorProperty.NumOutputs()
* out_data can contain additional invisible returns that remembers the
* state carried from the Forward pass. For example mask in the dropout.
* The gradients are passed from visible returns in this function.
Expand Down Expand Up @@ -157,10 +157,10 @@ class OperatorProperty {
return {"data"};
}
/*!
* \brief Get name of return values of Operator
* \return name of return values.
* \brief Get name of output values of Operator
* \return name of output values.
*/
virtual std::vector<std::string> ListReturns() const {
virtual std::vector<std::string> ListOutputs() const {
return {"output"};
}
/*!
Expand All @@ -171,23 +171,23 @@ class OperatorProperty {
return {};
}
/*! \return number of real return values of the Operator */
virtual int NumReturns() const {
virtual int NumOutputs() const {
return 1;
}
/*!
* \brief get number of visible return values during Symbol creation.
* If NumVisibleReturns() = k, and NumReturns() = n.
* If NumVisibleOutputs() = k, and NumOutputs() = n.
* The first k returns will be presented in the resulting symbol.
*
* The rest of the returns can be used for auxiliary states for Backward.
* For example, Dropout will return [data, mask], with NumVisibleReturns() == 1.
* For example, Dropout will return [data, mask], with NumVisibleOutputs() == 1.
* So when user call sym = Dropout(input), only data is presented in sym.
* But all the returns will be presented in out_data parameter of Backward if requested.
*
* \return number of default return values
*/
virtual int NumVisibleReturns() const {
return NumReturns();
virtual int NumVisibleOutputs() const {
return NumOutputs();
}
/*!
* \brief infer the shapes of outputs and unknown input arguments
Expand Down
10 changes: 5 additions & 5 deletions include/mxnet/symbolic.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ class Symbol {
*/
std::vector<std::string> ListArguments() const;
/*! \return get the descriptions of outputs for this symbol */
std::vector<std::string> ListReturns() const;
std::vector<std::string> ListOutputs() const;
/*! \return get the descriptions of auxiliary data for this symbol */
std::vector<std::string> ListAuxiliaryStates() const;
/*!
Expand Down Expand Up @@ -303,7 +303,7 @@ class Symbol {
* \brief get number of outputs of this symbol
* \return number of outputs
*/
inline size_t NumReturns() const {
inline size_t NumOutputs() const {
return heads_.size();
}
/*!
Expand Down Expand Up @@ -401,10 +401,10 @@ class Executor {
*/
virtual void Backward(const std::vector<NDArray> &head_grads) = 0;
/*!
* \brief get array of heads in the executor.
* \return array of heads in the executor.
* \brief get array of outputs in the executor.
* \return array of outputs in the executor.
*/
virtual const std::vector<NDArray> &heads() const = 0;
virtual const std::vector<NDArray> &outputs() const = 0;
/*!
* \brief Create an operator by bind symbol with context and arguments.
* If user do not want to compute the gradients of i-th argument, grad_req_type[i] can be kNullOp.
Expand Down
5 changes: 3 additions & 2 deletions python/mxnet/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ def backward(self, head_grads=None):
ndarray = c_array(NDArrayHandle, [item.handle for item in head_grads])
check_call(_LIB.MXExecutorBackward(self.handle, len(head_grads), ndarray))

def heads(self):
@property
def outputs(self):
"""list all heads' output ndarray
Returns
Expand All @@ -94,5 +95,5 @@ def heads(self):
# if user set the content of the head, the backward behavior can be incorrect.
out_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
check_call(_LIB.MXExecutorHeads(self.handle, ctypes.byref(out_size), ctypes.byref(handles)))
check_call(_LIB.MXExecutorOutputs(self.handle, ctypes.byref(out_size), ctypes.byref(handles)))
return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
10 changes: 5 additions & 5 deletions python/mxnet/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,17 +145,17 @@ def list_arguments(self):
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]

def list_returns(self):
"""List all returns in the symbol.
def list_outputs(self):
"""List all outputs in the symbol.
Returns
-------
returns : list of string
List of all the returns.
List of all the outputs.
"""
size = ctypes.c_uint()
sarr = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXSymbolListReturns(
check_call(_LIB.MXSymbolListOutputs(
self.handle, ctypes.byref(size), ctypes.byref(sarr)))
return [py_str(sarr[i]) for i in range(size.value)]

Expand Down Expand Up @@ -203,7 +203,7 @@ def infer_shape(self, *args, **kwargs):
The order is in the same order as list_arguments()
out_shapes : list of tuple or None
List of shapes of outputs.
The order is in the same order as list_returns()
The order is in the same order as list_outputs()
aux_shapes : list of tuple or None
List of shapes of outputs.
The order is in the same order as list_auxiliary()
Expand Down
12 changes: 6 additions & 6 deletions src/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -556,13 +556,13 @@ int MXSymbolListArguments(SymbolHandle symbol,
API_END();
}

int MXSymbolListReturns(SymbolHandle symbol,
int MXSymbolListOutputs(SymbolHandle symbol,
mx_uint *out_size,
const char ***out_str_array) {
Symbol *s = static_cast<Symbol*>(symbol);
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
ret->ret_vec_str = std::move(s->ListReturns());
ret->ret_vec_str = std::move(s->ListOutputs());
ret->ret_vec_charp.clear();
for (size_t i = 0; i < ret->ret_vec_str.size(); ++i) {
ret->ret_vec_charp.push_back(ret->ret_vec_str[i].c_str());
Expand Down Expand Up @@ -705,13 +705,13 @@ int MXExecutorBackward(ExecutorHandle handle,
API_END();
}

int MXExecutorHeads(ExecutorHandle handle,
mx_uint *out_size,
NDArrayHandle **out) {
int MXExecutorOutputs(ExecutorHandle handle,
mx_uint *out_size,
NDArrayHandle **out) {
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
Executor *exec = static_cast<Executor*>(handle);
std::vector<NDArray> heads = exec->heads();
std::vector<NDArray> heads = exec->outputs();
ret->ret_handles.resize(heads.size());
for (size_t i = 0; i < heads.size(); ++i) {
NDArray *ptr = new NDArray();
Expand Down
6 changes: 3 additions & 3 deletions src/operator/batch_norm-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -239,19 +239,19 @@ class BatchNormProp : public OperatorProperty {
return {{out_grad[kOut], in_grad[kData]}};
}

int NumVisibleReturns() const override {
int NumVisibleOutputs() const override {
return 1;
}

int NumReturns() const override {
int NumOutputs() const override {
return 4;
}

std::vector<std::string> ListArguments() const override {
return {"data", "gamma", "beta"};
}

std::vector<std::string> ListReturns() const override {
std::vector<std::string> ListOutputs() const override {
return {"output", "output_no_affine", "mean", "var"};
}

Expand Down
10 changes: 5 additions & 5 deletions src/symbol/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ class GraphExecutor::BackwardOpWrapper : public Operator {
explicit BackwardOpWrapper(const OperatorProperty *prop,
std::shared_ptr<Operator> forward_op)
: op_(forward_op) {
out_grad_.resize(prop->NumVisibleReturns());
out_grad_.resize(prop->NumVisibleOutputs());
in_data_.resize(prop->ListArguments().size());
out_data_.resize(prop->NumReturns());
out_data_.resize(prop->NumOutputs());

std::vector<TBlob*> out_grad_ptr(out_grad_.size());
for (size_t i = 0; i < out_grad_.size(); ++i) {
Expand Down Expand Up @@ -88,7 +88,7 @@ GraphExecutor::GetResource(uint32_t node_id) const {
inline int GraphExecutor::GetNumOutputs(uint32_t node_id) const {
const StaticGraph::Node &node = graph_.nodes[node_id];
if (node.is_forward()) {
return node.op->NumReturns();
return node.op->NumOutputs();
} else if (node.is_backward()) {
return static_cast<int>(
graph_.nodes[node.backward_source_id].op->ListArguments().size());
Expand Down Expand Up @@ -128,9 +128,9 @@ inline std::vector<std::pair<T, T> > GraphExecutor::GetInplaceOption(
// forward property
const OperatorProperty *fwd = graph_.nodes[node.backward_source_id].op.get();

std::vector<int> out_grad_index(fwd->NumVisibleReturns());
std::vector<int> out_grad_index(fwd->NumVisibleOutputs());
std::vector<int> in_data_index(fwd->ListArguments().size());
std::vector<int> out_data_index(fwd->NumReturns());
std::vector<int> out_data_index(fwd->NumOutputs());
CHECK_EQ(in_data_index.size(), out_data.size());
int counter = 0;
for (size_t i = 0; i < out_grad_index.size(); ++i) {
Expand Down
2 changes: 1 addition & 1 deletion src/symbol/graph_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class GraphExecutor : public Executor {
virtual ~GraphExecutor();
virtual void Forward(bool is_train);
virtual void Backward(const std::vector<NDArray> &head_grads);
virtual const std::vector<NDArray> &heads() const {
virtual const std::vector<NDArray> &outputs() const {
return heads_ndarray_;
}
// implement Executor::Bind, only call it once.
Expand Down
6 changes: 3 additions & 3 deletions src/symbol/static_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ bool StaticGraph::InferShape(std::vector<TShape> *in_shape,
for (size_t i = 0; i < nodes.size(); ++i) {
int nout = 1;
if (nodes[i].is_forward()) {
nout = nodes[i].op->NumReturns();
nout = nodes[i].op->NumOutputs();
} else if (nodes[i].is_backward()) {
nout = static_cast<int>(nodes[nodes[i].backward_source_id].inputs.size());
}
Expand Down Expand Up @@ -215,9 +215,9 @@ void StaticGraph::MakeBackwardPass(std::vector<uint32_t> *head_grad_nodes,
// get out_grad and out_data entry
std::vector<DataEntry> out_grad, out_data;
// nvisible is out_grad.size()
int nvisible = nodes[nid].op->NumVisibleReturns();
int nvisible = nodes[nid].op->NumVisibleOutputs();
// ntotal is out_data.size()
int ntotal = nodes[nid].op->NumReturns();
int ntotal = nodes[nid].op->NumOutputs();
// check all outpus
for (int i = 0; i < ntotal; ++i) {
DataEntry odata(nid, static_cast<uint32_t>(i));
Expand Down
18 changes: 9 additions & 9 deletions src/symbol/symbol.cc
Original file line number Diff line number Diff line change
Expand Up @@ -192,15 +192,15 @@ std::vector<std::string> Symbol::ListArguments() const {
}
}

std::vector<std::string> Symbol::ListReturns() const {
std::vector<std::string> Symbol::ListOutputs() const {
std::vector<std::string> ret;
for (auto &head : heads_) {
if (head.source->is_variable()) {
ret.push_back(head.source->name);
} else {
// TODO(bing) rethink about output naming
auto &hname = head.source->name;
std::string rname = head.source->op->ListReturns()[head.index];
std::string rname = head.source->op->ListOutputs()[head.index];
if (hname.length() == 0) {
ret.push_back(std::move(rname));
} else {
Expand Down Expand Up @@ -233,7 +233,7 @@ std::vector<std::string> Symbol::ListAuxiliaryStates() const {
}

Symbol Symbol::operator[] (size_t index) const {
size_t nreturn = NumReturns();
size_t nreturn = NumOutputs();
CHECK_LT(index, nreturn) << "Symbol only accept nonnegative index";
if (nreturn == 1) {
return *this;
Expand All @@ -246,12 +246,12 @@ Symbol Symbol::operator[] (size_t index) const {

void Symbol::Compose(const std::vector<Symbol>& args,
const std::string& name) {
CHECK_EQ(NumReturns(), 1) << "Only composition of value function is supported currently";
CHECK_EQ(NumOutputs(), 1) << "Only composition of value function is supported currently";
CHECK(!heads_[0].source->is_variable()) << "Variable cannot be composed";
heads_[0].source->name = name;
for (size_t i = 0; i < args.size(); ++i) {
CHECK_EQ(args[i].NumReturns(), 1)
<< "Argument " << i << " is a tuple with " << args[i].NumReturns()
CHECK_EQ(args[i].NumOutputs(), 1)
<< "Argument " << i << " is a tuple with " << args[i].NumOutputs()
<< " elements, scalar is required";
}
// positional arguments requires all arguments for now.
Expand Down Expand Up @@ -305,11 +305,11 @@ void Symbol::Compose(const std::vector<Symbol>& args,

void Symbol::Compose(const std::unordered_map<std::string, Symbol>& kwargs,
const std::string& name) {
CHECK_EQ(NumReturns(), 1) << "Only composition of value function is supported currently";
CHECK_EQ(NumOutputs(), 1) << "Only composition of value function is supported currently";
CHECK(!heads_[0].source->is_variable()) << "Variable cannot be composed";
heads_[0].source->name = name;
for (const auto& kv : kwargs) {
CHECK_EQ(kv.second.NumReturns(), 1)
CHECK_EQ(kv.second.NumOutputs(), 1)
<< "Keyword Argument " << kv.first << " is a tuple, scalar is required";
}
size_t nmatched = 0;
Expand Down Expand Up @@ -483,7 +483,7 @@ bool Symbol::InferShape(const std::unordered_map<std::string, TShape>& known_arg
Symbol Symbol::Create(OperatorProperty *op) {
// use special representation for atomic symbol
auto node = std::make_shared<Node>(op, "");
size_t nret = op->NumVisibleReturns();
size_t nret = op->NumVisibleOutputs();
Symbol s;
for (uint32_t i = 0; i < nret; ++i) {
s.heads_.push_back(DataEntry(node, i));
Expand Down
2 changes: 1 addition & 1 deletion tests/python/train/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def CalAcc(out, label):
executor = softmax.bind(mx.Context('cpu'), arg_narrays, grad_narrays, 'write', aux_narrays)
# update

out_narray = executor.heads()[0]
out_narray = executor.outputs[0]
grad_narray = mx.nd.empty(out_narray.shape)

epoch = 1
Expand Down
2 changes: 1 addition & 1 deletion tests/python/train/test_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def CalAcc(out, label):
executor = softmax.bind(mx.Context('cpu'), arg_narrays, grad_narrays)
# update

out_narray = executor.heads()[0]
out_narray = executor.outputs[0]
grad_narray = mx.nd.empty(out_narray.shape)

epoch = 9
Expand Down
Loading

0 comments on commit 4471fc8

Please sign in to comment.