Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Change np_compat to np_shape
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed May 31, 2019
1 parent 24eaed2 commit 70ef881
Show file tree
Hide file tree
Showing 10 changed files with 36 additions and 39 deletions.
2 changes: 1 addition & 1 deletion python/mxnet/gluon/block.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ def __call__(self, *args):

for hook in self._forward_hooks.values():
hook(self, args, out)
if _mx_np.is_np_compat():
if _mx_np.is_np_shape():
_check_all_np_ndarrays(_flatten(out, "output")[0])
return out

Expand Down
10 changes: 5 additions & 5 deletions python/mxnet/gluon/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
from ..context import Context, cpu
from .. import autograd
from .utils import _indent, _brief_print_list, shape_is_known
from ..util import is_np_compat
from ..util import is_np_shape

# pylint: disable= invalid-name
tensor_types = (symbol.Symbol, ndarray.NDArray)
Expand Down Expand Up @@ -163,7 +163,7 @@ def shape(self, new_shape):
if self._shape is None:
self._shape = new_shape
return
unknown_dim_size = -1 if is_np_compat() else 0
unknown_dim_size = -1 if is_np_shape() else 0
assert len(self._shape) == len(new_shape) and \
all(j in (unknown_dim_size, i) for i, j in zip(new_shape, self._shape)), \
"Expected shape %s is incompatible with given shape %s."%(
Expand Down Expand Up @@ -282,7 +282,7 @@ def _finish_deferred_init(self):
initializer.create(default_init)(
initializer.InitDesc(self.name, {'__init__': init}), data)
# TODO(junwu): use np random operators when available
if is_np_compat():
if is_np_shape():
data = data.as_np_ndarray() # convert to np.ndarray

self._init_impl(data, ctx)
Expand All @@ -309,7 +309,7 @@ def _init_grad(self):
self._grad = [ndarray.zeros(shape=i.shape, dtype=i.dtype, ctx=i.context,
stype=self._grad_stype) for i in self._data]
# TODO(junwu): use np.zeros
if is_np_compat():
if is_np_shape():
self._grad = [arr.as_np_ndarray() for arr in self._grad]

autograd.mark_variables(self._check_and_get(self._data, list),
Expand Down Expand Up @@ -558,7 +558,7 @@ def var(self):
self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype,
lr_mult=self.lr_mult, wd_mult=self.wd_mult,
init=self.init, stype=self._stype)
if is_np_compat():
if is_np_shape():
self._var = self._var.as_np_ndarray()
return self._var

Expand Down
4 changes: 2 additions & 2 deletions python/mxnet/gluon/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class requests_failed_to_import(object):
import numpy as np

from .. import ndarray
from ..util import is_np_compat
from ..util import is_np_shape


def split_data(data, num_slice, batch_axis=0, even_split=True):
Expand Down Expand Up @@ -453,7 +453,7 @@ def shape_is_known(shape):
"""Check whether a shape is completely known w/ or w/o np semantics."""
if shape is None:
return False
unknown_dim_size = -1 if is_np_compat() else 0
unknown_dim_size = -1 if is_np_shape() else 0
if len(shape) == 0:
return unknown_dim_size == -1
for dim_size in shape:
Expand Down
3 changes: 1 addition & 2 deletions python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from __future__ import absolute_import
import numpy as _np
from ...base import numeric_types
from ...util import _sanity_check_params, use_np_compat, set_module
from ...util import _sanity_check_params, set_module
from ...context import current_context
from . import _internal as _npi

Expand Down Expand Up @@ -90,7 +90,6 @@ def ones(shape, dtype=None, **kwargs):


#pylint: disable= too-many-arguments, no-member, protected-access
@use_np_compat
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Expand Down
4 changes: 2 additions & 2 deletions python/mxnet/ndarray/register.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from ..ndarray_doc import _build_doc

from ..base import mx_uint, check_call, _LIB, py_str, _init_op_module, _Null, _is_np_op # pylint: disable=unused-import
from ..util import use_np_compat # pylint: disable=unused-import
from ..util import use_np_shape # pylint: disable=unused-import


def _verify_all_np_ndarrays(op_name, func_name, args, out):
Expand Down Expand Up @@ -176,7 +176,7 @@ def _generate_ndarray_function_code(handle, op_name, func_name, signature_only=F
if is_np_op:
doc_str_idx = 2
code.append("""
@use_np_compat""")
@use_np_shape""")
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
Expand Down
2 changes: 1 addition & 1 deletion python/mxnet/numpy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,6 @@
from . import _op
from . import _register
from ._op import * # pylint: disable=wildcard-import
from ..util import use_np_compat, set_np_compat, np_compat, is_np_compat
from ..util import use_np_shape, set_np_shape, np_shape, is_np_shape

__all__ = []
8 changes: 3 additions & 5 deletions python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types
from ..util import _sanity_check_params, set_module, use_np_compat
from ..util import _sanity_check_params, set_module, use_np_shape
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
Expand Down Expand Up @@ -75,7 +75,7 @@ def _np_ndarray_cls(handle, writable=True, stype=0):


@set_module('mxnet.numpy') # pylint: disable=invalid-name
@use_np_compat
@use_np_shape
class ndarray(NDArray):
"""An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
Expand Down Expand Up @@ -1140,7 +1140,6 @@ def ndim(self):
return len(self.shape)

@property
@use_np_compat
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
Expand All @@ -1150,7 +1149,6 @@ def tostype(self, stype):


@set_module('mxnet.numpy')
@use_np_compat
def empty(shape, dtype=None, **kwargs):
"""Return a new array of given shape and type, without initializing entries.
Expand Down Expand Up @@ -1183,7 +1181,7 @@ def empty(shape, dtype=None, **kwargs):


@set_module('mxnet.numpy')
@use_np_compat
@use_np_shape
def array(object, dtype=None, **kwargs):
"""
Create an array.
Expand Down
6 changes: 3 additions & 3 deletions tests/python/unittest/test_numpy_gluon.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def __init__(self):
def hybrid_forward(self, F, x, w):
return F.dot(x, w)

@np.use_np_compat
@np.use_np_shape
class TestBlock2(gluon.HybridBlock):
def __init__(self):
super(TestBlock2, self).__init__()
Expand All @@ -62,7 +62,7 @@ def hybrid_forward(self, F, x, w):


def test_optimizer_with_np_ndarrays():
@np.use_np_compat
@np.use_np_shape
class LinearRegression(gluon.HybridBlock):
def __init__(self, num_input_dim=-1, num_hidden_dim=100, num_output_dim=10):
super(LinearRegression, self).__init__()
Expand All @@ -78,7 +78,7 @@ def hybrid_forward(self, F, x, w1, w2):
y_pred = h_relu.dot(w2) # equivalent to F.np.dot(h_relu, w2)
return y_pred

@np.use_np_compat
@np.use_np_shape
class TotalLoss(gluon.HybridBlock):
def hybrid_forward(self, F, pred, label):
return ((pred - label) ** 2).sum() # equivalent to F.np.sum(F.np.square(pred - label))
Expand Down
20 changes: 10 additions & 10 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def test_array_creation():
@with_seed()
def test_zeros():
# test np.zeros in Gluon
@np.use_np_compat
@np.use_np_shape
class TestZeros(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestZeros, self).__init__()
Expand All @@ -57,13 +57,13 @@ def __init__(self, shape, dtype=None):
def hybrid_forward(self, F, x, *args, **kwargs):
return x + F.np.zeros(shape, dtype)

@np.use_np_compat
@np.use_np_shape
class TestZerosOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.zeros(shape=())

# test np.zeros in imperative
@np.use_np_compat
@np.use_np_shape
def check_zero_array_creation(shape, dtype):
np_out = _np.zeros(shape=shape, dtype=dtype)
mx_out = np.zeros(shape=shape, dtype=dtype)
Expand Down Expand Up @@ -97,7 +97,7 @@ def check_zero_array_creation(shape, dtype):
@with_seed()
def test_ones():
# test np.ones in Gluon
@np.use_np_compat
@np.use_np_shape
class TestOnes(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestOnes, self).__init__()
Expand All @@ -107,13 +107,13 @@ def __init__(self, shape, dtype=None):
def hybrid_forward(self, F, x, *args, **kwargs):
return x * F.np.ones(shape, dtype)

@np.use_np_compat
@np.use_np_shape
class TestOnesOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.ones(shape=())

# test np.ones in imperative
@np.use_np_compat
@np.use_np_shape
def check_ones_array_creation(shape, dtype):
np_out = _np.ones(shape=shape, dtype=dtype)
mx_out = np.ones(shape=shape, dtype=dtype)
Expand Down Expand Up @@ -156,7 +156,7 @@ def test_ndarray_binary_element_wise_ops():
def get_np_ret(x1, x2, op):
return np_op_map[op](x1, x2)

@np.use_np_compat
@np.use_np_shape
class TestBinaryElementWiseOp(HybridBlock):
def __init__(self, op, scalar=None, reverse=False):
super(TestBinaryElementWiseOp, self).__init__()
Expand Down Expand Up @@ -219,7 +219,7 @@ def hybrid_forward(self, F, x, *args):
print(self._op)
assert False

@np.use_np_compat
@np.use_np_shape
def check_binary_op_result(shape1, shape2, op, dtype=None):
if shape1 is None:
mx_input1 = abs(_np.random.uniform()) + 1
Expand Down Expand Up @@ -289,7 +289,7 @@ def check_binary_op_result(shape1, shape2, op, dtype=None):

@with_seed()
def test_hybrid_block_multiple_outputs():
@np.use_np_compat
@np.use_np_shape
class TestAllNumpyOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.npe.relu(x), F.np.sum(x)
Expand All @@ -309,7 +309,7 @@ def hybrid_forward(self, F, x, *args, **kwargs):
assert type(out1) is expected_out_type
assert type(out2) is expected_out_type

@np.use_np_compat
@np.use_np_shape
class TestMixedTypeOutputsFailure(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.relu(x.as_classic_ndarray()), F.np.sum(x)
Expand Down
16 changes: 8 additions & 8 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def is_int(dtype):


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_np_dot():
shapes = [
((3, 0), (0, 4)),
Expand Down Expand Up @@ -132,7 +132,7 @@ def test_np_dot():

@with_seed()
def test_np_mean():
@np.use_np_compat
@np.use_np_shape
class TestMean(HybridBlock):
def __init__(self, axis=None, dtype=None, keepdims=False):
super(TestMean, self).__init__()
Expand Down Expand Up @@ -194,7 +194,7 @@ def is_int(dtype):


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_np_transpose():
# TODO(junwu): Add more test cases
data = mx.sym.var('a').as_np_ndarray()
Expand Down Expand Up @@ -224,7 +224,7 @@ def test_np_transpose():


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_relu():
# TODO(junwu): Add more test cases
data = mx.sym.var('data').as_np_ndarray()
Expand All @@ -240,7 +240,7 @@ def test_relu():


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_sigmoid():
# TODO(junwu): Add more test cases
data = mx.sym.var('data').as_np_ndarray()
Expand All @@ -256,7 +256,7 @@ def test_sigmoid():


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_np_reshape():
# TODO(junwu): Add more test cases
data = mx.sym.var('a').as_np_ndarray()
Expand All @@ -272,7 +272,7 @@ def test_np_reshape():


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_np_maximum():
# TODO(junwu): Add more test cases
x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray()
Expand All @@ -293,7 +293,7 @@ def check_maximum(x1, x2):


@with_seed()
@np.use_np_compat
@np.use_np_shape
def test_np_minimum():
# TODO(junwu): Add more test cases
x1, x2 = mx.sym.var('x1').as_np_ndarray(), mx.sym.var('x2').as_np_ndarray()
Expand Down

0 comments on commit 70ef881

Please sign in to comment.