Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix build failure
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce authored and haojin2 committed Jul 26, 2019
1 parent 31fbd2f commit 47f4cd3
Show file tree
Hide file tree
Showing 11 changed files with 66 additions and 213 deletions.
2 changes: 0 additions & 2 deletions python/mxnet/gluon/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
from .. import ndarray
from ..base import numeric_types
from .block import HybridBlock
from .utils import _adapt_np_array
from ..util import is_np_array


Expand Down Expand Up @@ -188,7 +187,6 @@ class L1Loss(Loss):
def __init__(self, weight=None, batch_axis=0, **kwargs):
super(L1Loss, self).__init__(weight, batch_axis, **kwargs)

@_adapt_np_array
def hybrid_forward(self, F, pred, label, sample_weight=None):
label = _reshape_like(F, label, pred)
loss = F.abs(label - pred)
Expand Down
4 changes: 1 addition & 3 deletions python/mxnet/gluon/nn/basic_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

from .activations import Activation
from ..block import Block, HybridBlock
from ..utils import _indent, _adapt_np_array
from ..utils import _indent
from ... import nd, sym
from ...util import is_np_array

Expand Down Expand Up @@ -522,7 +522,6 @@ def __init__(self, axis=1, epsilon=1e-5, center=True, scale=False,
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True)

@_adapt_np_array
def hybrid_forward(self, F, x, gamma, beta):
if self._axis == 1:
return F.InstanceNorm(x, gamma, beta,
Expand Down Expand Up @@ -795,7 +794,6 @@ def __init__(self, function, prefix=None):
"Unrecognized function in lambda: {} of type {}"
.format(function, type(function)))

@_adapt_np_array
def hybrid_forward(self, F, x, *args):
return self._func(F, x, *args)

Expand Down
52 changes: 1 addition & 51 deletions python/mxnet/gluon/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class requests_failed_to_import(object):
import numpy as np

from .. import ndarray
from ..util import is_np_shape, is_np_array, wraps_safely
from ..util import is_np_shape, is_np_array
from .. import numpy as _mx_np # pylint: disable=reimported


Expand Down Expand Up @@ -484,53 +484,3 @@ def _check_all_np_ndarrays(out):
for i in out:
_check_all_np_ndarrays(i)
# pylint: enable=no-else-raise


def _to_classic_arrays(*args, **kwargs):
"""Convert arrays to classic arrays. This is used in a Gluon layer for converting
inputs of np arrays to classic arrays so that the layer built with legacy ops can still
be used in np_array semantics."""
from ..numpy import ndarray as np_ndarray
from ..symbol.numpy import _Symbol as np_symbol
num_inputs = len(args)
assert num_inputs != 0
if not is_np_array():
return args, kwargs
in_arrs = [arr if arr is None else arr.as_nd_ndarray() for arr in args]
new_kwargs = {}
for k, v in kwargs.items():
if isinstance(v, (np_ndarray, np_symbol)):
new_kwargs[k] = v.as_nd_ndarray()
else:
new_kwargs[k] = v
return in_arrs, new_kwargs


def _to_np_arrays(*args):
"""Convert arrays to np arrays. This is used in a Gluon layer for converting
outputs of classic arrays to np arrays so that the layer built with legacy ops can still
be used in np_array semantics."""
num_outputs = len(args)
assert num_outputs != 0
if not is_np_array():
return args[0] if num_outputs == 1 else args
out = [arr.as_np_ndarray() for arr in args]
return out[0] if num_outputs == 1 else out


# TODO(junwu): This is a temp solution for allowing basic layers
# implemented using legacy ops to accept np.ndarrays as inputs and return
# np.ndarrays as outputs. We should remove it after changing all the layers
# to use np ops in np_array semantics in the future.
def _adapt_np_array(func):
@wraps_safely(func)
def _with_np_array(*args, **kwargs):
assert len(args) > 2, "expect at least three arguments in args"
if is_np_array():
input_args, kwargs = _to_classic_arrays(*args[2:], **kwargs)
input_args = list(args[0:2]) + list(input_args)
out = func(*input_args, **kwargs)
return _to_np_arrays(out)
else:
return func(*args, **kwargs)
return _with_np_array
5 changes: 1 addition & 4 deletions python/mxnet/numpy_extension/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,7 @@
from . import _register
from ._op import * # pylint: disable=wildcard-import
from ..context import * # pylint: disable=wildcard-import
# TODO(junwu): revisit what functions should be exposed to users
from ..util import use_np_shape, np_shape, is_np_shape
from ..util import use_np_array, np_array, is_np_array
from ..util import set_np, use_np, reset_np
from ..util import is_np_shape, is_np_array, set_np, reset_np
from ..ndarray import waitall
from .utils import * # pylint: disable=wildcard-import
from .random import * # pylint: disable=wildcard-import
Expand Down
1 change: 1 addition & 0 deletions python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from .ndarray import array
from .symbol import Symbol
from .symbol.numpy import _Symbol as np_symbol
from .util import use_np # pylint: disable=unused-import


def default_context():
Expand Down
2 changes: 1 addition & 1 deletion src/operator/numpy/np_init_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ NNVM_REGISTER_OP(_npi_arange)
.set_attr_parser(RangeParamParser)
.set_attr<mxnet::FInferShape>("FInferShape", NumpyRangeShape)
.set_attr<nnvm::FInferType>("FInferType", InitType<RangeParam>)
.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu>)
.set_attr<FCompute>("FCompute<cpu>", RangeCompute<cpu, RangeParam>)
.add_arguments(RangeParam::__FIELDS__());

NNVM_REGISTER_OP(_npi_eye)
Expand Down
2 changes: 1 addition & 1 deletion src/operator/numpy/np_init_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ NNVM_REGISTER_OP(_np_ones_like)
.set_attr<FCompute>("FCompute<gpu>", FillCompute<gpu, 1>);

NNVM_REGISTER_OP(_npi_arange)
.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu>);
.set_attr<FCompute>("FCompute<gpu>", RangeCompute<gpu, RangeParam>);

NNVM_REGISTER_OP(_npi_eye)
.set_attr<FCompute>("FCompute<gpu>", NumpyEyeFill<gpu>);
Expand Down
86 changes: 0 additions & 86 deletions tests/python/unittest/test_contrib_amp.py

This file was deleted.

7 changes: 4 additions & 3 deletions tests/python/unittest/test_numpy_gluon.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
from __future__ import division

import mxnet as mx
from mxnet import gluon, autograd, np, npx
from mxnet import gluon, autograd, np
from mxnet.test_utils import use_np


def test_create_np_param():
Expand All @@ -45,7 +46,7 @@ def __init__(self):
def hybrid_forward(self, F, x, w):
return F.dot(x, w)

@npx.use_np
@use_np
class TestBlock2(gluon.HybridBlock):
def __init__(self):
super(TestBlock2, self).__init__()
Expand All @@ -62,7 +63,7 @@ def hybrid_forward(self, F, x, w):
check_block_params(x.as_np_ndarray(), TestBlock2, True, np.ndarray)


@npx.use_np
@use_np
def test_optimizer_with_np_ndarrays():
class LinearRegression(gluon.HybridBlock):
def __init__(self, num_input_dim=0, num_hidden_dim=100, num_output_dim=10):
Expand Down
24 changes: 12 additions & 12 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,12 @@
import mxnet as mx
from mxnet import np, npx, autograd
from mxnet.gluon import HybridBlock
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, assert_exception
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, assert_exception, use_np
from common import with_seed, TemporaryDirectory


@with_seed()
@npx.use_np_shape
@use_np
def test_array_creation():
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
objects = [
Expand All @@ -53,7 +53,7 @@ def test_array_creation():


@with_seed()
@npx.use_np_shape
@use_np
def test_zeros():
# test np.zeros in Gluon
class TestZeros(HybridBlock):
Expand Down Expand Up @@ -101,7 +101,7 @@ def check_zero_array_creation(shape, dtype):


@with_seed()
@npx.use_np_shape
@use_np
def test_ones():
# test np.ones in Gluon
class TestOnes(HybridBlock):
Expand Down Expand Up @@ -167,7 +167,7 @@ def test_ndarray_binary_element_wise_ops():
def get_np_ret(x1, x2, op):
return np_op_map[op](x1, x2)

@npx.use_np_shape
@use_np
class TestBinaryElementWiseOp(HybridBlock):
def __init__(self, op, scalar=None, reverse=False):
super(TestBinaryElementWiseOp, self).__init__()
Expand Down Expand Up @@ -235,7 +235,7 @@ def hybrid_forward(self, F, x, *args):
print(self._op)
assert False

@npx.use_np_shape
@use_np
def check_binary_op_result(shape1, shape2, op, dtype=None):
if shape1 is None:
mx_input1 = abs(_np.random.uniform()) + 1
Expand Down Expand Up @@ -305,7 +305,7 @@ def check_binary_op_result(shape1, shape2, op, dtype=None):

@with_seed()
def test_hybrid_block_multiple_outputs():
@npx.use_np_shape
@use_np
class TestAllNumpyOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.npx.relu(x), F.np.sum(x)
Expand All @@ -325,7 +325,7 @@ def hybrid_forward(self, F, x, *args, **kwargs):
assert type(out1) is expected_out_type
assert type(out2) is expected_out_type

@npx.use_np_array
@use_np
class TestMixedTypeOutputsFailure(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.relu(x.as_nd_ndarray()), F.np.sum(x)
Expand All @@ -337,7 +337,7 @@ def hybrid_forward(self, F, x, *args, **kwargs):


@with_seed()
@npx.use_np_shape
@use_np
def test_grad_ndarray_type():
data = np.array(2, dtype=_np.float32)
data.attach_grad()
Expand Down Expand Up @@ -375,7 +375,7 @@ def test_np_ndarray_copy():


@with_seed()
@npx.use_np_shape
@use_np
def test_np_ndarray_indexing():
def test_getitem(np_array, index):
"""`is_scalar` indicates whether we should expect a scalar for the result.
Expand Down Expand Up @@ -627,7 +627,7 @@ def convert(num):


@with_seed()
@npx.use_np
@use_np
def test_np_save_load_ndarrays():
shapes = [(2, 0, 1), (0,), (), (), (0, 4), (), (3, 0, 0, 0), (2, 1), (0, 5, 0), (4, 5, 6), (0, 0, 0)]
array_list = [_np.random.randint(0, 10, size=shape) for shape in shapes]
Expand Down Expand Up @@ -671,7 +671,7 @@ def test_np_save_load_ndarrays():

@retry(5)
@with_seed()
@npx.use_np_shape
@use_np
def test_np_multinomial():
pvals_list = [[0.0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0.0]]
sizes = [None, (), (3,), (2, 5, 7), (4, 9)]
Expand Down
Loading

0 comments on commit 47f4cd3

Please sign in to comment.