Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[v1.x] Attempt to fix v1.x CI issues. (#19872)
Browse files Browse the repository at this point in the history
* Attempt to fix v1.x CI issues.

* Re-pin scipy.

* Add numpy with pinned version so other package installs don't overwrite out required version.

* Use python3 (from /usr/local/bin) for tensorrt gpu tests, so it can find all required python modules.

* Fix onnx tests; need to pass scalar value (not np.array) to create_const_scalar_node.

* Fix pylint

* Set values using np.dtype(dtype) instead of using float32 and then casting to desired type.

* Skip 2 tests that are flakey, reported in issue #19877.

Co-authored-by: Joe Evans <[email protected]>
  • Loading branch information
josephevans and Joe Evans authored Feb 10, 2021
1 parent 0066ccb commit bf9e62a
Show file tree
Hide file tree
Showing 6 changed files with 18 additions and 18 deletions.
2 changes: 1 addition & 1 deletion ci/docker/install/requirements
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,4 @@ pylint==2.3.1 # pylint and astroid need to be aligned
astroid==2.3.3 # pylint and astroid need to be aligned
requests<2.19.0,>=2.18.4
scipy==1.2.1
setuptools<50
setuptools
2 changes: 1 addition & 1 deletion ci/docker/install/ubuntu_onnx.sh
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,4 @@ echo "Installing libprotobuf-dev and protobuf-compiler ..."
apt-get update || true
apt-get install -y libprotobuf-dev protobuf-compiler

pip3 install pytest pytest-cov pytest-xdist protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.6.0 gluonnlp gluoncv
pip3 install pytest pytest-cov pytest-xdist protobuf==3.5.2 onnx==1.7.0 Pillow==5.0.0 tabulate==0.7.5 onnxruntime==1.6.0 'numpy>1.16.0,<1.19.0' gluonnlp gluoncv
4 changes: 2 additions & 2 deletions ci/docker/install/ubuntu_python.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ apt-get update || true
apt-get install -y software-properties-common
add-apt-repository -y ppa:deadsnakes/ppa
apt-get update || true
apt-get install -y python3.6-dev virtualenv wget
apt-get install -y python3.7-dev python3.7-distutils virtualenv wget
# setup symlink in /usr/local/bin to override python3 version
ln -sf /usr/bin/python3.6 /usr/local/bin/python3
ln -sf /usr/bin/python3.7 /usr/local/bin/python3

# the version of the pip shipped with ubuntu may be too lower, install a recent version here
wget -nv https://bootstrap.pypa.io/get-pip.py
Expand Down
2 changes: 1 addition & 1 deletion ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1077,7 +1077,7 @@ unittest_ubuntu_tensorrt_gpu() {
export DMLC_LOG_STACK_TRACE_DEPTH=10
pip3 install --extra-index-url https://developer.download.nvidia.com/compute/redist nvidia-dali-cuda100==0.24
wget -nc http://data.mxnet.io/data/val_256_q90.rec
python3.6 tests/python/tensorrt/rec2idx.py val_256_q90.rec val_256_q90.idx
python3 tests/python/tensorrt/rec2idx.py val_256_q90.rec val_256_q90.idx
nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_trt_gpu.xml --verbose --nocapture tests/python/tensorrt/
rm val_256_q90*
}
Expand Down
24 changes: 11 additions & 13 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
# coding: utf-8
# pylint: disable=too-many-locals,no-else-return,too-many-lines
# pylint: disable=anomalous-backslash-in-string,eval-used
# pylint: disable=too-many-function-args
"""
Conversion Functions for common layers.
Add new functions here with a decorator.
Expand Down Expand Up @@ -162,7 +163,7 @@ def create_const_scalar_node(input_name, value, kwargs):
initializer = kwargs["initializer"]
input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[value.dtype]
value_node = make_tensor_value_info(input_name, input_type, ())
tensor_node = make_tensor(input_name, input_type, (), (value,))
tensor_node = make_tensor(input_name, input_type, (), ([value]))
initializer.append(tensor_node)
return value_node

Expand Down Expand Up @@ -362,7 +363,7 @@ def convert_fully_connected(node, **kwargs):
in_nodes = [name+'_data_flattened', input_nodes[1]]

if no_bias:
nodes.append(create_const_scalar_node(name+'_bias', np.array([0], dtype=dtype), kwargs))
nodes.append(create_const_scalar_node(name+'_bias', np.int32(0).astype(dtype), kwargs))
in_nodes.append(name+'_bias')
else:
in_nodes.append(input_nodes[2])
Expand Down Expand Up @@ -2430,7 +2431,7 @@ def convert_layer_norm(node, **kwargs):
create_tensor([], name+"_void", kwargs["initializer"]),
create_const_scalar_node(name+'_0_s', np.int64(0), kwargs),
create_const_scalar_node(name+'_1_s', np.int64(1), kwargs),
create_const_scalar_node(name+"_2_s", np.array(2, dtype=dtype), kwargs),
create_const_scalar_node(name+"_2_s", np.int64(2).astype(dtype), kwargs),
create_const_scalar_node(name+"_eps", np.float32(eps), kwargs),
make_node("ReduceMean", [input_nodes[0]], [name+"_rm0_out"], axes=[axes]),
make_node("Sub", [input_nodes[0], name+"_rm0_out"], [name+"_sub0_out"]),
Expand Down Expand Up @@ -2829,9 +2830,9 @@ def convert_arange_like(node, **kwargs):
raise NotImplementedError("arange_like operator with repeat != 1 not yet implemented.")

nodes = [
create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs),
create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs),
create_const_scalar_node(name+"_half_step", np.array([float(step)*0.5], dtype=dtype), kwargs),
create_const_scalar_node(name+"_start", np.dtype(dtype).type(start), kwargs),
create_const_scalar_node(name+"_step", np.dtype(dtype).type(step), kwargs),
create_const_scalar_node(name+"_half_step", np.dtype(dtype).type(float(step)*0.5), kwargs),
create_tensor([], name+'_void', kwargs["initializer"])
]
if axis == 'None':
Expand Down Expand Up @@ -2947,9 +2948,9 @@ def convert_arange(node, **kwargs):
raise NotImplementedError("arange operator with repeat != 1 not yet implemented.")

nodes = [
create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs),
create_const_scalar_node(name+"_stop", np.array([stop], dtype=dtype), kwargs),
create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs),
create_const_scalar_node(name+"_start", np.dtype(dtype).type(start), kwargs),
create_const_scalar_node(name+"_stop", np.dtype(dtype).type(stop), kwargs),
create_const_scalar_node(name+"_step", np.dtype(dtype).type(step), kwargs),
make_node("Range", [name+"_start", name+"_stop", name+"_step"], [name], name=name)
]

Expand Down Expand Up @@ -2977,7 +2978,7 @@ def convert_reverse(node, **kwargs):
create_tensor([axis], name+'_axis', kwargs['initializer']),
create_tensor([axis+1], name+'_axis_p1', kwargs['initializer']),
create_tensor([], name+'_void', kwargs['initializer']),
create_const_scalar_node(name+'_m1_s', np.array([-1], dtype='int64'), kwargs),
create_const_scalar_node(name+'_m1_s', np.int64(-1), kwargs),
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Sub', [name+'_10', name+'_dim'], [name+'_sub']),
Expand Down Expand Up @@ -3188,7 +3189,6 @@ def convert_greater_scalar(node, **kwargs):
else:
if dtype == 'float16':
# when using float16, we must convert it to np.uint16 view first
# pylint: disable=too-many-function-args
scalar = np.float16(scalar).view(np.uint16)

tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
Expand Down Expand Up @@ -3217,7 +3217,6 @@ def convert_lesser_scalar(node, **kwargs):
else:
if dtype == 'float16':
# when using float16, we must convert it to np.uint16 view first
# pylint: disable=too-many-function-args
scalar = np.float16(scalar).view(np.uint16)

tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
Expand Down Expand Up @@ -3245,7 +3244,6 @@ def convert_equal_scalar(node, **kwargs):
else:
if dtype == 'float16':
# when using float16, we must convert it to np.uint16 view first
# pylint: disable=too-many-function-args
scalar = np.float16(scalar).view(np.uint16)

tensor_value = make_tensor(name+"_scalar", input_type, [1], [scalar])
Expand Down
2 changes: 2 additions & 0 deletions tests/python/unittest/test_gluon_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ def _batchify(data):
nd.array(y_lens, ctx=context.Context('cpu_shared', 0)))

@with_seed()
@unittest.skip("skipping flaky test - see https://github.com/apache/incubator-mxnet/issues/19877")
def test_multi_worker_forked_data_loader():
data = _Dummy(False)
loader = DataLoader(data, batch_size=40, batchify_fn=_batchify, num_workers=2)
Expand All @@ -265,6 +266,7 @@ def test_multi_worker_forked_data_loader():
pass

@with_seed()
@unittest.skip("skipping flaky test - see https://github.com/apache/incubator-mxnet/issues/19877")
def test_multi_worker_dataloader_release_pool():
# will trigger too many open file if pool is not released properly
if os.name == 'nt':
Expand Down

0 comments on commit bf9e62a

Please sign in to comment.