Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fix binary broadcast with zero-size tensors
Browse files Browse the repository at this point in the history
  • Loading branch information
reminisce committed Apr 5, 2019
1 parent d6118f3 commit f39daaa
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 19 deletions.
15 changes: 6 additions & 9 deletions src/operator/tensor/elemwise_binary_broadcast_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,16 +61,13 @@ inline bool BinaryBroadcastShape(const nnvm::NodeAttrs& attrs,
int l = 1, r = 1;
if (i >= bl) l = lhs[i-bl];
if (i >= br) r = rhs[i-br];
if (!mxnet::dim_size_is_known(l) || !mxnet::dim_size_is_known(r)) continue;
if (l != r) {
if (l == 0 || r == 0) {
// TODO(junwu): here is not compatible with NumPy.
// For example, (2, 3) cannot broadcast to (2, 0, 3).
out[i] = 0;
} else {
CHECK(l == 1 || r == 1)
<< "operands could not be broadcast together with shapes " << lhs << " " << rhs;
out[i] = std::max(l, r);
}
// Make it compatible with NumPy.
// For example, (2, 3) cannot broadcast to (2, 0, 3), but (1, 3) can broadcast to (2, 0, 3).
CHECK(l == 1 || r == 1)
<< "operands could not be broadcast together with shapes " << lhs << " " << rhs;
out[i] = (l == 1 ? r : l);
} else {
out[i] = l;
}
Expand Down
22 changes: 12 additions & 10 deletions tests/python/gpu/test_operator_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -1963,19 +1963,21 @@ def check_proposal_consistency(op, batch_size, with_nms=False):
# The following 2 functions launch 0-thread kernels, an error that should be caught and signaled.
def kernel_error_check_imperative():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()
with mx.enable_np_comp():
a = mx.nd.array([1,2,3],ctx=mx.gpu(0))
b = mx.nd.array([],ctx=mx.gpu(0))
c = (a / b).asnumpy()

def kernel_error_check_symbolic():
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()
with mx.enable_np_comp():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
c = a / b
f = c.bind(mx.gpu(0), { 'a':mx.nd.array([1,2,3],ctx=mx.gpu(0)),
'b':mx.nd.array([],ctx=mx.gpu(0))})
f.forward()
g = f.outputs[0].asnumpy()

def test_kernel_error_checking():
# Running tests that may throw exceptions out of worker threads will stop CI testing
Expand Down

0 comments on commit f39daaa

Please sign in to comment.