Skip to content

Commit

Permalink
Change kUndefinedStorage to -1. Pass lint (apache#27)
Browse files Browse the repository at this point in the history
  • Loading branch information
eric-haibin-lin committed Apr 29, 2017
1 parent 961da4e commit eb44e7d
Show file tree
Hide file tree
Showing 14 changed files with 52 additions and 62 deletions.
21 changes: 13 additions & 8 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@ enum RowSparseAuxType {kIdx};
}

enum NDArrayStorageType {
kUndefinedStorage, // undefined storage
kDefaultStorage, // dense
kRowSparseStorage, // row sparse
kCSRStorage, // csr
kUndefinedStorage = -1, // undefined storage
kDefaultStorage, // dense
kRowSparseStorage, // row sparse
kCSRStorage, // csr
};

/*!
Expand Down Expand Up @@ -120,9 +120,13 @@ class NDArray {
: shape_(shape), offset_(0), dtype_(dtype), entry_({nullptr, 0, 0}) {
// Assign default aux types if not given
if (aux_types.size() == 0) {
if (storage_type == kRowSparseStorage) aux_types = {ROW_SPARSE_IDX_TYPE};
else if (storage_type == kCSRStorage) aux_types = {CSR_IND_PTR_TYPE, CSR_IDX_DTYPE};
else LOG(FATAL) << "Unknown storage type";
if (storage_type == kRowSparseStorage) {
aux_types = {ROW_SPARSE_IDX_TYPE};
} else if (storage_type == kCSRStorage) {
aux_types = {CSR_IND_PTR_TYPE, CSR_IDX_DTYPE};
} else {
LOG(FATAL) << "Unknown storage type";
}
}
ptr_ = std::make_shared<Chunk>(ctx, delay_alloc, aux_types, storage_type);
#if MKL_EXPERIMENTAL == 1
Expand Down Expand Up @@ -631,7 +635,8 @@ class NDArray {
delay_alloc = false;
}
}
inline void CheckAndAlloc(const TShape &shape, const std::vector<TShape> &aux_shapes, int dtype) {
inline void CheckAndAlloc(const TShape &shape, const std::vector<TShape> &aux_shapes,
int dtype) {
// calculate size, perform allocation
if (delay_alloc) {
CHECK_EQ(storage_type, kRowSparseStorage) << "Not yet implemented";
Expand Down
2 changes: 1 addition & 1 deletion nnvm
8 changes: 5 additions & 3 deletions python/mxnet/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from .base import mx_uint, NDArrayHandle, ExecutorHandle
from .base import check_call, c_array, py_str
from .ndarray import NDArray
from .sparse_ndarray import SparseNDArray
from .sparse_ndarray import SparseNDArray, _STORAGE_TYPE_STR_TO_ID
from . import ndarray as nd

# those functions are not used here, we just import them to keep backward compatibility
Expand Down Expand Up @@ -88,8 +88,10 @@ def _get_outputs(self):
storage_type = ctypes.c_int(0)
check_call(_LIB.MXNDArrayGetStorageType(ctypes.cast(handles[i], NDArrayHandle),
ctypes.byref(storage_type)))
output = NDArray(NDArrayHandle(handles[i])) if storage_type.value == 1 \
else SparseNDArray(NDArrayHandle(handles[i]))
assert(storage_type != _STORAGE_TYPE_STR_TO_ID['undefined'])
output = NDArray(NDArrayHandle(handles[i])) \
if storage_type.value == _STORAGE_TYPE_STR_TO_ID['default'] \
else SparseNDArray(NDArrayHandle(handles[i]))
outputs.append(output)
return outputs

Expand Down
24 changes: 12 additions & 12 deletions python/mxnet/sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,17 @@

# pylint: enable= no-member
_STORAGE_TYPE_ID_TO_STR = {
0 : 'undefined',
1 : 'default',
2 : 'row_sparse',
3 : 'csr',
-1 : 'undefined',
0 : 'default',
1 : 'row_sparse',
2 : 'csr',
}

_STORAGE_TYPE_STR_TO_ID = {
'undefined' : 0,
'default' : 1,
'row_sparse' : 2,
'csr' : 3,
'undefined' : -1,
'default' : 0,
'row_sparse' : 1,
'csr' : 2,
}

_STORAGE_AUX_TYPES = {
Expand Down Expand Up @@ -244,7 +244,7 @@ def array(values, index_list, storage_type, shape, ctx=None, dtype=mx_real_t, au
# TODO also specify auxtypes
assert(storage_type == 'row_sparse')
if not isinstance(values, NDArray):
values = ndarray.array(values)
values = ndarray.array(values)
for i, index in enumerate(index_list):
if not isinstance(index, NDArray):
index_list[i] = ndarray.array(index)
Expand Down Expand Up @@ -298,8 +298,8 @@ def zeros(shape, storage_type, ctx=None, dtype=mx_real_t, aux_types=None):
# pylint: enable= no-member, protected-access

_STORAGE_TYPE_TO_ND_CLASS = {
1 : ndarray.NDArray,
2 : SparseNDArray,
3 : SparseNDArray
_STORAGE_TYPE_STR_TO_ID['default'] : ndarray.NDArray,
_STORAGE_TYPE_STR_TO_ID['row_sparse'] : SparseNDArray,
_STORAGE_TYPE_STR_TO_ID['csr'] : SparseNDArray,
}
_init_ndarray_module(_STORAGE_TYPE_TO_ND_CLASS, "mxnet")
5 changes: 2 additions & 3 deletions python/mxnet/symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def list_auxiliary_states(self):
def infer_storage_type(self, *args, **kwargs):
# TODO(haibin) refactor with dtype
# FIXME update doc
"""Infer the chunk type of outputs and arguments of given known types of arguments.
"""Infer the storage type of outputs and arguments of given known types of arguments.
User can either pass in the known types in positional way or keyword argument way.
Tuple of Nones is returned if there is not enough information passed in.
Expand Down Expand Up @@ -548,8 +548,7 @@ def infer_storage_type(self, *args, **kwargs):
raise TypeError('Argument need to be one of '+str(_STORAGE_TYPE_STR_TO_ID))
sdata.append(_STORAGE_TYPE_STR_TO_ID[s])
else:
#FIXME -1 or 0 for unknown / error chunk type?
sdata.append(-1)
sdata.append(_STORAGE_TYPE_STR_TO_ID['undefined'])
else:
keys = []
for k, v in kwargs.items():
Expand Down
4 changes: 2 additions & 2 deletions src/c_api/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ int MXNDArrayCreate(const mx_uint *shape,
API_END();
}

// TODO remove this API
// TODO(haibin) remove this API
int MXNDArrayCreateSparse(NDArrayHandle data,
mx_uint num_aux,
NDArrayHandle *aux_vec,
Expand Down Expand Up @@ -357,7 +357,7 @@ int MXNDArrayGetStorageType(NDArrayHandle handle,
if (!arr->is_none()) {
*out_storage_type = arr->storage_type();
} else {
*out_storage_type = -1;
*out_storage_type = kUndefinedStorage;
}
API_END();
}
Expand Down
3 changes: 2 additions & 1 deletion src/c_api/c_api_symbolic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,8 @@ int MXSymbolInferStorageType(SymbolHandle sym,
MXAPIThreadLocalEntry *ret = MXAPIThreadLocalStore::Get();
API_BEGIN();
nnvm::Graph g = Symbol2Graph(*s);
nnvm::StorageTypeVector arg_storage_types(g.indexed_graph().input_nodes().size());
nnvm::StorageTypeVector arg_storage_types(g.indexed_graph().input_nodes().size(),
kUndefinedStorage);
if (keys == nullptr && num_args != 0) {
std::vector<uint32_t> read_only_args = mxnet::ReadOnlyArgIndices(g.indexed_graph());
CHECK_LE(num_args, read_only_args.size());
Expand Down
16 changes: 10 additions & 6 deletions src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -435,12 +435,12 @@ Graph GraphExecutor::InitGraph(nnvm::Symbol symbol,
const auto& vstorage_type = g.GetAttr<nnvm::StorageTypeVector>("storage_type");

// dispatch on a per op basis
nnvm::StorageTypeVector dispatch_stypes(idx.num_nodes());
nnvm::StorageTypeVector dispatch_stypes(idx.num_nodes(), kUndefinedStorage);
for (size_t nid = 0; nid < idx.num_nodes(); nid++) {
const auto& inode = idx[nid];
auto num_outputs = inode.source->num_outputs();
auto num_inputs = inode.inputs.size();
nnvm::StorageTypeVector vs(num_inputs + num_outputs);
nnvm::StorageTypeVector vs(num_inputs + num_outputs, kUndefinedStorage);
for (size_t i = 0; i < num_inputs; i++) {
auto e = inode.inputs[i];
vs[i] = vstorage_type[idx.entry_id(e)];
Expand Down Expand Up @@ -492,11 +492,11 @@ void GraphExecutor::InitDataEntryMemory(std::vector<NDArray>* shared_pool) {
CHECK_EQ(idx.num_node_entries(), vstorage.size());
CHECK_EQ(data_entry_.size(), vshape.size());
std::vector<Context> data_context(idx.num_node_entries());
std::vector<NDArrayStorageType> data_storage_type(idx.num_node_entries());
std::vector<NDArrayStorageType> data_storage_type(idx.num_node_entries(), kUndefinedStorage);
for (uint32_t nid = 0; nid < idx.num_nodes(); ++nid) {
for (uint32_t i = 0; i < idx[nid].source->num_outputs(); ++i) {
data_context[idx.entry_id(nid, i)] = vctx[nid];
CHECK_NE(vstorage_type[nid], -1);
CHECK_NE(vstorage_type[nid], kUndefinedStorage);
data_storage_type[idx.entry_id(nid, i)] = (NDArrayStorageType) vstorage_type[nid];
}
}
Expand Down Expand Up @@ -863,7 +863,9 @@ void GraphExecutor::RunOps(bool is_train, size_t topo_start, size_t topo_end) {
#else
bool profiling = false;
#endif
// LOG(INFO) << "Running " << nid;
#if EXECUTOR_DEBUG
LOG(INFO) << "Running node " << nid << " - " << seg_op.topo_end - 1;
#endif
Engine::Get()->Push(seg_op.opr, seg_op.ctx, 0, profiling);
nid = seg_op.topo_end - 1;
continue;
Expand All @@ -874,7 +876,9 @@ void GraphExecutor::RunOps(bool is_train, size_t topo_start, size_t topo_end) {
OpNode& opnode = op_nodes_[nid];
if (op_nodes_[nid].skip_exec_node) continue;
opnode.exec->op_ctx.is_train = is_train;
// LOG(INFO) << "Running " << nid;
#if EXECUTOR_DEBUG
LOG(INFO) << "Running node " << nid;
#endif
if (opnode.exec->exec_type() == Operator::kCrossDeviceCopy) {
CHECK_EQ(inode.inputs.size(), 1U);
CHECK_EQ(opnode.exec->in_array.size(), 1U);
Expand Down
2 changes: 1 addition & 1 deletion src/ndarray/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ void CopyFromTo(const NDArray &from, NDArray *to, int priority, bool alloc_outpu
// skip to copy to itself
return;
}
CHECK(from.storage_type() == to->storage_type()) << "Copying with different storage type";
CHECK_EQ(from.storage_type(), to->storage_type()) << "Copying with different storage type";
CHECK(from.shape() == to->shape())
<< "operands shape mismatch"
<< "from.shape = " << from.shape() << " to.shape=" << to->shape();
Expand Down
4 changes: 2 additions & 2 deletions src/operator/elemwise_op_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ inline bool ElemwiseStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(n_in)) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), static_cast<size_t>(n_out)) << " in operator " << attrs.name;
return ElemwiseStorageAttr<int, storage_type_is_none, storage_type_assign, false, true>(
return ElemwiseStorageAttr<int, type_is_none, type_assign, false, true>(
attrs, in_attrs, out_attrs);
}

Expand All @@ -123,7 +123,7 @@ inline bool IdentityAttrLikeRhsStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(2)) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), static_cast<size_t>(1)) << " in operator " << attrs.name;
return ElemwiseStorageAttr<int, storage_type_is_none, storage_type_assign, false, false>(
return ElemwiseStorageAttr<int, type_is_none, type_assign, false, false>(
attrs, in_attrs, out_attrs);
}

Expand Down
20 changes: 0 additions & 20 deletions src/operator/operator_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,6 @@ inline bool type_is_none(const int& x) {
return x == -1;
}

/*! \brief check if storage type is none (-1) */
inline bool storage_type_is_none(const int& x) {
return x == kUndefinedStorage;
}
/*!
* \brief Assign x to y. Checks for compatiblity when y is not empty.
* Allow missing dim in both x and y (as 0).
Expand Down Expand Up @@ -125,21 +121,6 @@ inline bool type_assign(int *y, const int& x) {
return true;
}

/*!
* \brief Assign x to y. Checks for compatiblity when y is not -1.
* \param y target type.
* \param x source type.
* \return whether x and y are compatible.
*/
inline bool storage_type_assign(int *y, const int& x) {
if (*y == kUndefinedStorage) {
*y = x;
return true;
} else if (*y != x && x != kUndefinedStorage) {
return false;
}
return true;
}
/*!
* \brief macro assign shape to out if out is unknown otherwise check consistency
* Use macro so we can see the error file more clearly
Expand Down Expand Up @@ -306,7 +287,6 @@ inline void ParamParser(nnvm::NodeAttrs* attrs) {
attrs->parsed = std::move(param);
}

// TODO move to op_util.h
template <typename xpu>
void FComputeExFallback(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
Expand Down
1 change: 0 additions & 1 deletion src/operator/tensor/elemwise_binary_op_basic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ NNVM_REGISTER_OP(_backward_mul)
[](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{0, 1}};
})
//.set_attr<nnvm::FInferStorageType>("FInferStorageType", ElemwiseSameStorageType<1, 2>)
.set_attr<FCompute>("FCompute<cpu>", BinaryBackwardUseIn<cpu, mshadow_op::right,
mshadow_op::left>);

Expand Down
3 changes: 2 additions & 1 deletion src/operator/tensor/elemwise_unary_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,8 @@ void CastStorageComputeEx(const nnvm::NodeAttrs& attrs,
CHECK_EQ(inputs.size(), 1);
CHECK_EQ(outputs.size(), 1);
auto stype = inputs[0].storage_type();
if (stype == kRowSparseStorage) {
auto out_stype = outputs[0].storage_type();
if (stype == kRowSparseStorage && out_stype == kDefaultStorage) {
CastStorageComputeRspDns<xpu>(attrs, ctx, inputs, req, outputs);
} else {
LOG(FATAL) << "Not implemented";
Expand Down
1 change: 0 additions & 1 deletion tests/python/unittest/test_sparse_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,4 +119,3 @@ def test_cast_storage():
test_elemwise_add_dense_sparse()
test_elemwise_add_sparse_sparse()
test_elemwise_add_multiple_stages()
test_cast_storage()

0 comments on commit eb44e7d

Please sign in to comment.