Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Test large vector mean operator and fix a few bugs #16079

Merged
merged 8 commits into from
Sep 5, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion src/common/tensor_inspector.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
#include <vector>
#include <fstream>
#include "../../3rdparty/mshadow/mshadow/base.h"
#include "../../tests/cpp/include/test_util.h"
apeforest marked this conversation as resolved.
Show resolved Hide resolved

namespace mxnet {

Expand Down
2 changes: 1 addition & 1 deletion src/operator/mshadow_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ MXNET_UNARY_MATH_OP(identity_grad, 1);

struct identity_with_cast {
template<typename DTypeIn, typename DTypeOut>
MSHADOW_XINLINE static void Map(int i, DTypeOut *out, DTypeIn *in) {
MSHADOW_XINLINE static void Map(index_t i, DTypeOut *out, DTypeIn *in) {
out[i] = DTypeOut(in[i]);
}
};
Expand Down
4 changes: 3 additions & 1 deletion src/operator/tensor/broadcast_reduce-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,10 @@ MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}

index_t s = 1;
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
Copy link
Contributor

@access2rohit access2rohit Sep 3, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nice catch ! :)

for (int i = ndim - 1, j = mdim; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
Expand Down
95 changes: 51 additions & 44 deletions tests/nightly/test_large_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@
from tests.python.unittest.common import with_seed, teardown

# dimension constants
LARGE_X = 5000000000
LARGE_X = 4300000000
MEDIUM_X = 1000000000


def test_slice():
a = nd.ones(LARGE_X)
res = nd.slice(a, begin=(LARGE_X - MEDIUM_X), end=LARGE_X)
assert a[0] == 1
assert res.shape[0] == MEDIUM_X
assert res[0] == 1


def test_ndarray_zeros():
Expand All @@ -45,7 +45,7 @@ def test_ndarray_zeros():
def test_ndarray_ones():
a = nd.ones(shape=LARGE_X)
assert a[-1] == 1
assert nd.sum(a).asnumpy() == LARGE_X
assert nd.sum(a) == LARGE_X


@with_seed()
Expand All @@ -56,15 +56,12 @@ def test_ndarray_random_uniform():

@with_seed()
def test_ndarray_random_randint():
a = nd.random.randint(100, 10000, shape=LARGE_X)
assert a.shape == (LARGE_X,)
# check if randint can generate value greater than 2**32 (large)
low_large_value = 2**32
high_large_value = 2**34
a = nd.random.randint(low_large_value, high_large_value, dtype=np.int64)
low = mx.nd.array([low_large_value], dtype='int64')
high = mx.nd.array([high_large_value], dtype='int64')
assert a >= low and a < high
low = 2**32
high = 2**34
a = nd.random.randint(low, high, dtype=np.int64, shape=LARGE_X).asnumpy()
assert a.shape == (LARGE_X,)
assert (a >= low).all() and (a < high).all()


def test_ndarray_empty():
Expand All @@ -83,15 +80,10 @@ def test_elementwise():
assert res[-1].asnumpy() == 3


def test_reduce():
a = nd.ones(shape=(LARGE_X, 1))
assert nd.sum(a).asnumpy() == a.shape[0] * a.shape[1]


def test_clip():
a = create_vector(LARGE_X)
res = nd.clip(a, a_min=100, a_max=1000)
assert np.sum(res[-1].asnumpy() == 1000) == 1
assert res[-1] == 1000
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good ... previous one was bad



def test_argmin():
Expand Down Expand Up @@ -146,27 +138,41 @@ def test_Dense(ctx=mx.cpu(0)):


def test_argsort():
b = create_vector(size=LARGE_X)
s = nd.argsort(b, axis=0, is_ascend=False, dtype=np.int64)
assert (s[0].asnumpy() == (LARGE_X - 1)).all()
a = create_vector(size=LARGE_X)
s = nd.argsort(a, axis=0, is_ascend=False, dtype=np.int64)
assert s[0] == (LARGE_X - 1)


def test_sort():
b = create_vector(size=LARGE_X)
s = nd.sort(b, axis=0, is_ascend=False)
assert np.sum(s[-1].asnumpy() == 0).all()
s = nd.sort(b, is_ascend=True)
assert np.sum(s[0].asnumpy() == 0).all()
a = create_vector(size=LARGE_X)

def test_descend(x):
s = nd.sort(x, axis=0, is_ascend=False)
assert s[-1] == 0

def test_ascend(x):
s = nd.sort(x, is_ascend=True)
assert s[0] == 0

test_descend(a)
test_ascend(a)


def test_topk():
b = create_vector(size=LARGE_X)
ind = nd.topk(b, k=10, axis=0, dtype=np.int64)
assert np.sum(ind.asnumpy() == (LARGE_X - 1)) == 1
ind, val = mx.nd.topk(b, k=3, axis=0, dtype=np.int64, ret_typ="both", is_ascend=False)
a = create_vector(size=LARGE_X)
ind = nd.topk(a, k=10, axis=0, dtype=np.int64)
for i in range(10):
assert ind[i] == (LARGE_X - i - 1)
ind, val = mx.nd.topk(a, k=3, axis=0, dtype=np.int64, ret_typ="both", is_ascend=False)
assert np.all(ind == val)
val = nd.topk(b, k=1, axis=0, dtype=np.int64, ret_typ="value")
assert val.sum() == (LARGE_X - 1)
val = nd.topk(a, k=1, axis=0, dtype=np.int64, ret_typ="value")
assert val == (LARGE_X - 1)


def test_mean():
a = nd.arange(-LARGE_X // 2, LARGE_X // 2 + 1, dtype=np.int64)
b = nd.mean(a, axis=0)
assert b == 0


@with_seed()
Expand Down Expand Up @@ -640,48 +646,48 @@ def test_eq():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 3)
c = (a == b)
assert np.sum(c[0].asnumpy() == 1).all()
assert (c.asnumpy() == 1).all()


def test_neq():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = (a != b)
assert np.sum(c[0].asnumpy() == 1).all()
assert (c.asnumpy() == 1).all()


def test_lt():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
d = (a <= b)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()


def test_lte():
a = nd.full(LARGE_X, 2)
b = nd.full(LARGE_X, 3)
c = nd.full(LARGE_X, 2)
d = (a <= b)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()
d = (a <= c)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()


def test_gt():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
d = (a > b)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()


def test_gte():
a = nd.full(LARGE_X, 3)
b = nd.full(LARGE_X, 2)
c = nd.full(LARGE_X, 3)
d = (a >= b)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()
d = (a >= c)
assert np.sum(d[0].asnumpy() == 1).all()
assert (d.asnumpy() == 1).all()


def test_slice_like():
Expand All @@ -690,20 +696,21 @@ def test_slice_like():
c = nd.slice_like(a, b)
assert c.shape == b.shape
assert c[0] == 0
assert c[-1] == (LARGE_X//2-1)
assert c[-1] == (LARGE_X // 2 - 1)


def test_slice_axis():
a = create_vector(size=LARGE_X)
c = nd.slice_axis(a, axis=0, begin=0, end=LARGE_X//2)
assert c.shape[0] == a.shape[0]//2
assert c[-1][0] == (LARGE_X//2-1)
med = LARGE_X // 2
c = nd.slice_axis(a, axis=0, begin=0, end=med)
assert c.shape[0] == a.shape[0] // 2
assert c[-1][0] == (med - 1)


def test_full():
a = nd.full(LARGE_X, 3)
assert a.shape[0] == LARGE_X
assert a[LARGE_X//2] == 3
assert a[LARGE_X // 2] == 3
assert a[-1] == 3


Expand Down