Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
test rand shape
Browse files Browse the repository at this point in the history
  • Loading branch information
yzhliu committed Jul 19, 2019
1 parent 2d81025 commit a690f17
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 3 deletions.
3 changes: 3 additions & 0 deletions make/osx.mk
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ ADD_LDFLAGS =
# the additional compile flags you want to add
ADD_CFLAGS =

# whether to build operators written in TVM
USE_TVM_OP = 0

#---------------------------------------------
# matrix computation libraries for CPU/GPU
#---------------------------------------------
Expand Down
8 changes: 5 additions & 3 deletions tests/python/unittest/test_tvm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@
# under the License.

import mxnet as mx
from mxnet.test_utils import same
from mxnet.test_utils import same, rand_shape_nd
from mxnet.runtime import Features

_features = Features()

def test_tvm_broadcast_add():

This comment has been minimized.

Copy link
@marcoabreu

marcoabreu Jul 19, 2019

Contributor

Please add the with seed annotation

if _features.is_enabled("TVM_OP"):
a = mx.nd.normal(shape=(2, 3, 4))
b = mx.nd.normal(shape=(1, 3, 1))
a_shape = rand_shape_nd(4)
b_shape = (1,) + a_shape[1:2] + (1, 1)
a = mx.nd.normal(shape=a_shape)
b = mx.nd.normal(shape=b_shape)
c = mx.nd.contrib.tvm_vadd(a, b)
c_np = a.asnumpy() + b.asnumpy()
assert same(c.asnumpy(), c_np)
Expand Down

0 comments on commit a690f17

Please sign in to comment.