Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[NumPy][Operator] NumPy operator may_share_memory and shares_memory #16533

Merged
merged 4 commits into from
Oct 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 75 additions & 1 deletion python/mxnet/ndarray/numpy/_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip',
'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take',
'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal',
'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero']
'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory', 'may_share_memory']


@set_module('mxnet.ndarray.numpy')
Expand Down Expand Up @@ -4909,3 +4909,77 @@ def nonzero(a):
"""
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])


@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()


@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
79 changes: 77 additions & 2 deletions python/mxnet/numpy/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming',
'blackman', 'flip', 'around', 'arctan2', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril',
'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less',
'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero']
'greater_equal', 'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'nonzero', 'shares_memory',
'may_share_memory']

# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
Expand Down Expand Up @@ -1330,7 +1331,7 @@ def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-d
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
take(self, indices, axis, mode=mode)
return take(self, indices, axis, mode=mode)

def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
Expand Down Expand Up @@ -6900,3 +6901,77 @@ def nonzero(a):
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
return _mx_nd_np.nonzero(a)


@set_module('mxnet.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.shares_memory(a, b, max_work)


@set_module('mxnet.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.may_share_memory(a, b, max_work)
4 changes: 3 additions & 1 deletion python/mxnet/numpy_dispatch_protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,9 @@ def _run_with_array_ufunc_proto(*args, **kwargs):
'tril',
'meshgrid',
'outer',
'einsum'
'einsum',
'shares_memory',
'may_share_memory',
]


Expand Down
41 changes: 40 additions & 1 deletion python/mxnet/symbol/numpy/_symbol.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
'std', 'var', 'indices', 'copysign', 'ravel', 'hanning', 'hamming', 'blackman', 'flip',
'around', 'hypot', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take',
'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal',
'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide']
'less_equal', 'hsplit', 'rot90', 'einsum', 'true_divide', 'shares_memory', 'may_share_memory']


def _num_outputs(sym):
Expand Down Expand Up @@ -4590,4 +4590,43 @@ def einsum(*operands, **kwargs):
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))


@set_module('mxnet.symbol.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : _Symbol
Input arrays
Returns
-------
out : _Symbol
"""
return _npi.share_memory(a, b)


@set_module('mxnet.symbol.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : _Symbol
Input arrays
Returns
-------
out : _Symbol
"""
return _npi.share_memory(a, b)


_set_np_symbol_class(_Symbol)
62 changes: 62 additions & 0 deletions src/operator/numpy/np_memory_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* Copyright (c) 2019 by Contributors
* \file np_memory_op.cc
*/

#include "./np_memory_op.h"

namespace mxnet {
namespace op {

inline bool NumpyShareMemoryType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kBool);
return out_attrs->at(0) != -1;
}

inline bool NumpyShareMemoryShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U);
CHECK_EQ(out_attrs->size(), 1U);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape(0, -1));
return true;
}

NNVM_REGISTER_OP(_npi_share_memory)
.set_num_inputs(2)
.set_num_outputs(1)
.set_attr<nnvm::FListInputNames>("FListInputNames",
[](const NodeAttrs& attrs) {
return std::vector<std::string>{"a", "b"};
})
.set_attr<mxnet::FInferShape>("FInferShape", NumpyShareMemoryShape)
.set_attr<nnvm::FInferType>("FInferType", NumpyShareMemoryType)
.set_attr<FCompute>("FCompute<cpu>", NumpyShareMemoryCompute<cpu>)
.add_argument("a", "NDArray-or-Symbol", "First input")
.add_argument("b", "NDArray-or-Symbol", "Second input");

} // namespace op
} // namespace mxnet
34 changes: 34 additions & 0 deletions src/operator/numpy/np_memory_op.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

/*!
* Copyright (c) 2019 by Contributors
* \file np_memory_op.cu
*/

#include "./np_memory_op.h"

namespace mxnet {
namespace op {

NNVM_REGISTER_OP(_npi_share_memory)
.set_attr<FCompute>("FCompute<gpu>", NumpyShareMemoryCompute<gpu>);

} // namespace op
} // namespace mxnet
Loading