diff --git a/src/operator/numpy/np_elemwise_broadcast_op.cu b/src/operator/numpy/np_elemwise_broadcast_op.cu index 3b780325227a..a682ec989ea8 100644 --- a/src/operator/numpy/np_elemwise_broadcast_op.cu +++ b/src/operator/numpy/np_elemwise_broadcast_op.cu @@ -138,8 +138,5 @@ NNVM_REGISTER_OP(_backward_npi_ldexp_scalar) NNVM_REGISTER_OP(_backward_npi_rldexp_scalar) .set_attr("FCompute", BinaryScalarOp::Backward); -NNVM_REGISTER_OP(_np_bitwise_xor) -.set_attr("FCompute", BinaryBroadcastCompute); - } // namespace op } // namespace mxnet diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 4f01fa9ed4fa..b764ac73d30c 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -34,6 +34,7 @@ from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf from mxnet.numpy_op_signature import _get_builtin_op from mxnet.test_utils import is_op_runnable, has_tvm_ops +from mxnet.operator import get_all_registered_operators @with_seed() @@ -3164,10 +3165,15 @@ def check_output_n_grad(data_shape, idx_shape, axis, mode): def test_np_builtin_op_signature(): import inspect from mxnet import _numpy_op_doc - for op_name in dir(_numpy_op_doc): + builtin_np_op_names = [name for name in get_all_registered_operators() if name.startswith('_np_')] + for op_name in builtin_np_op_names: + _op_from_doc = getattr(_numpy_op_doc, op_name, None) + assert _op_from_doc is not None, "Failed to find documentation for operator {}. " \ + "Please add the documentation in _numpy_op_doc.py for this operator."\ + .format(op_name) op = _get_builtin_op(op_name) - if op is not None: - assert str(op.__signature__) == str(inspect.signature(getattr(_numpy_op_doc, op_name))) + assert op is not None + assert str(op.__signature__) == str(inspect.signature(_op_from_doc)) @with_seed()