Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
add inline comments
Browse files Browse the repository at this point in the history
  • Loading branch information
wuxun-zhang committed Nov 5, 2019
1 parent cc2c4c9 commit c1e1e96
Showing 1 changed file with 7 additions and 0 deletions.
7 changes: 7 additions & 0 deletions tests/nightly/test_large_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,12 @@ def test_FullyConnected():
a = nd.ones(shape=(LARGE_X, SMALL_Y))
b = nd.ones(shape=(SMALL_Y, SMALL_Y))
c = nd.ones(shape=(b.shape[0],))

# w/o bias
res = nd.FullyConnected(a, b, num_hidden=b.shape[0], no_bias=True)
assert np.sum(res[-1].asnumpy() == a.shape[1]) == b.shape[0]

# w/ bias
res = nd.FullyConnected(a, b, c, num_hidden=b.shape[0], no_bias=False)
assert np.sum(res[-1].asnumpy() == a.shape[1] + 1) == b.shape[0]

Expand Down Expand Up @@ -818,6 +821,7 @@ def get_ref_mean_var(data, running_mean, running_var, eps, use_global_status=Tru
nd.waitall()
return mean, stdvar

# Here use 4D input to cover mkldnn BN and non-mkldnn BN
shape = (3, 3, LARGE_X, SMALL_Y)
axis = 1 # default
eps = 1e-3
Expand Down Expand Up @@ -997,6 +1001,9 @@ def test_flatten():
for dtype in test_dtypes:
a = create_2d_tensor(rows=LARGE_X, columns=SMALL_Y, dtype=dtype).reshape((LARGE_X//2, 2, SMALL_Y))
b = nd.flatten(a)
# Here we removed the value asserts due to different precision of `int64` and `float32`.
# For `float32`, it will lose some precision when `LARGE_X` is too large, that is `LARGE_X-1`
# and `LARGE_X-2` can not represent the accurate value in the current situation.
assert b.shape == (LARGE_X//2, SMALL_Y*2)


Expand Down

0 comments on commit c1e1e96

Please sign in to comment.