Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

added large tensor support for add_n and tests for more ops #16476

Merged
merged 1 commit into from
Oct 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions src/operator/tensor/elemwise_sum.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,15 @@ namespace op {

struct Sum {
template<typename DType>
MSHADOW_XINLINE static DType sum(int i, const DType* a) {
MSHADOW_XINLINE static DType sum(index_t i, const DType* a) {
return a[i];
}
template<typename DType, typename... DTypes>
MSHADOW_XINLINE static DType sum(int i, const DType* a, const DTypes... b) {
MSHADOW_XINLINE static DType sum(index_t i, const DType* a, const DTypes... b) {
return a[i] + sum(i, b...);
}
template<typename DType, typename... DTypes>
MSHADOW_XINLINE static void Map(int i, DType* out, const OpReqType req, const DType* in0,
MSHADOW_XINLINE static void Map(index_t i, DType* out, const OpReqType req, const DType* in0,
const DTypes... ins) {
KERNEL_ASSIGN(out[i], req, sum(i, in0, ins...));
}
Expand All @@ -64,7 +64,7 @@ void ElementWiseSumCompute_(const nnvm::NodeAttrs& attrs,
size_t size = in_data.size();
Stream<xpu> *s = ctx.get_stream<xpu>();
DType* out_dptr = out_data[0].dptr<DType>();
int out_size = static_cast<int>((out_data[0].Size() + DataType<DType>::kLanes - 1)
index_t out_size = static_cast<index_t>((out_data[0].Size() + DataType<DType>::kLanes - 1)
access2rohit marked this conversation as resolved.
Show resolved Hide resolved
/DataType<DType>::kLanes);
switch (size) {
case 2: {
Expand Down
54 changes: 54 additions & 0 deletions tests/nightly/test_large_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.

import os
import tempfile
import math
import numpy as np
import mxnet as mx
Expand Down Expand Up @@ -1439,6 +1441,58 @@ def npy_instance_norm(data, gamma, beta, axis, eps=1E-5):
forward_check_eps)


def test_load_save():
x = create_2d_tensor(SMALL_Y, LARGE_X)
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'large_tensor')
nd.save(tmpfile, [x])
y = nd.load(tmpfile)
y = y[0]
assert x[0][0] == y[0][0]
assert x[-1][-1]== y[-1][-1]


def test_add_n():
x = [nd.ones(LARGE_X) for j in range(SMALL_Y)]
y = nd.add_n(*x)
assert y[0] == SMALL_Y
assert y[-1] == SMALL_Y


def test_modulo():
x = mx.nd.ones((SMALL_Y, LARGE_X))*6
y = mx.nd.ones(LARGE_X)*4
z = (x%y)
assert z[0][0] == 2
assert z[-1][-1] == 2
x = mx.nd.ones((SMALL_Y, LARGE_X))*5
z = nd.modulo(x,y)
assert z[0][0] == 1
assert z[-1][-1] == 1


def test_maximum():
x = mx.nd.ones((SMALL_Y, LARGE_X))*3
access2rohit marked this conversation as resolved.
Show resolved Hide resolved
y = mx.nd.ones(LARGE_X)*4
z = nd.maximum(x, y)
assert z[0][0] == 4
assert z[-1][-1] == 4
z = nd.maximum(x, 5)
assert z[0][0] == 5
assert z[-1][-1] == 5


def test_minimum():
x = mx.nd.ones((SMALL_Y, LARGE_X))*3
y = mx.nd.ones(LARGE_X)*2
z = nd.minimum(x, y)
assert z[0][0] == 2
assert z[-1][-1] == 2
z = nd.minimum(x, 5)
assert z[0][0] == 3
assert z[-1][-1] == 3


if __name__ == '__main__':
import nose
nose.runmodule()
54 changes: 54 additions & 0 deletions tests/nightly/test_large_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
# specific language governing permissions and limitations
# under the License.

import os
import tempfile
import math
import numpy as np
import mxnet as mx
Expand Down Expand Up @@ -876,6 +878,58 @@ def check_degrees():
check_degrees()


def test_load_save():
x = create_vector(size=LARGE_X)
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'large_vector')
nd.save(tmpfile, [x])
y = nd.load(tmpfile)
y = y[0]
assert x[0] == y[0]
assert x[-1] == y[-1]


def test_add_n():
x = [nd.ones(LARGE_X)]
y = nd.add_n(*x)
assert y[0] == 1
assert y[-1] == 1


def test_modulo():
x = mx.nd.ones(LARGE_X)*6
y = mx.nd.ones(LARGE_X)*4
z = (x%y)
access2rohit marked this conversation as resolved.
Show resolved Hide resolved
assert z[0] == 2
assert z[-1] == 2
x = mx.nd.ones(LARGE_X)*5
z = nd.modulo(x,y)
assert z[0] == 1
assert z[-1] == 1


def test_maximum():
x = mx.nd.ones(LARGE_X)*3
y = mx.nd.ones(LARGE_X)*4
z = nd.maximum(x, y)
assert z[0] == 4
assert z[-1] == 4
z = nd.maximum(x, 5)
assert z[0] == 5
assert z[-1] == 5


def test_minimum():
x = mx.nd.ones(LARGE_X)*3
y = mx.nd.ones(LARGE_X)*2
z = nd.minimum(x, y)
assert z[0] == 2
assert z[-1] == 2
z = nd.minimum(x, 5)
assert z[0] == 3
assert z[-1] == 3
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are the backward pass of any of these functions tested.

Copy link
Contributor Author

@access2rohit access2rohit Oct 17, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only forward passes are required by DGL. So only forward passes are tested.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there some large tensor docs in mxnet website. That should explicitly call out that the backward pass has not been verified with large tensor.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have to write that. Currently we don't have any docs explaining how to use large tensor support and caveats along with limitations and perf regressions.



if __name__ == '__main__':
import nose
nose.runmodule()