diff --git a/benchmark/opperf/opperf.py b/benchmark/opperf/opperf.py old mode 100755 new mode 100644 diff --git a/cd/python/docker/Dockerfile.test b/cd/python/docker/Dockerfile.test index bed059d0fc73..6ded5f4722fb 100644 --- a/cd/python/docker/Dockerfile.test +++ b/cd/python/docker/Dockerfile.test @@ -24,7 +24,7 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} # Install test dependencies -RUN pip install nose +RUN pip install pytest ARG USER_ID=1001 ARG GROUP_ID=1001 diff --git a/cd/python/pypi/pypi_publish.py b/cd/python/pypi/pypi_publish.py old mode 100755 new mode 100644 diff --git a/cd/utils/artifact_repository.py b/cd/utils/artifact_repository.py old mode 100755 new mode 100644 diff --git a/ci/README.md b/ci/README.md index 155a0104a125..f5e847eac3bd 100644 --- a/ci/README.md +++ b/ci/README.md @@ -185,7 +185,7 @@ sudo pip3 install --upgrade --force-reinstall build/mxnet-1.3.1-py2.py3-none-any # Execute a single python test: -nosetests-3.4 -v -s tests/python/unittest/test_ndarray.py +pytest -v --capture=no tests/python/unittest/test_ndarray.py # Debug with cgdb diff --git a/ci/dev_menu.py b/ci/dev_menu.py old mode 100755 new mode 100644 index e9f031e1b171..b14f7f296c12 --- a/ci/dev_menu.py +++ b/ci/dev_menu.py @@ -119,7 +119,7 @@ def provision_virtualenv(venv_path=DEFAULT_PYENV): provision_virtualenv, ]), ('[Local] Python Unit tests', - "./py3_venv/bin/nosetests -v tests/python/unittest/" + "pytest -v tests/python/unittest/" ), ('[Docker] Build the MXNet binary - outputs to "lib/"', "ci/build.py --platform ubuntu_cpu_lite /work/runtime_functions.sh build_ubuntu_cpu_docs"), diff --git a/ci/docker/Dockerfile.build.centos7_cpu b/ci/docker/Dockerfile.build.centos7_cpu index 0cfa5a9f6e47..b16984706146 100644 --- a/ci/docker/Dockerfile.build.centos7_cpu +++ b/ci/docker/Dockerfile.build.centos7_cpu @@ -24,10 +24,14 @@ WORKDIR /work/deps COPY install/centos7_core.sh /work/ RUN /work/centos7_core.sh + COPY install/centos7_ccache.sh /work/ RUN /work/centos7_ccache.sh + COPY install/centos7_python.sh /work/ +COPY install/requirements /work/ RUN /work/centos7_python.sh + COPY install/centos7_scala.sh /work/ RUN /work/centos7_scala.sh diff --git a/ci/docker/Dockerfile.build.centos7_gpu b/ci/docker/Dockerfile.build.centos7_gpu index 7e49e88b3a52..389a17eab12b 100644 --- a/ci/docker/Dockerfile.build.centos7_gpu +++ b/ci/docker/Dockerfile.build.centos7_gpu @@ -24,9 +24,12 @@ WORKDIR /work/deps COPY install/centos7_core.sh /work/ RUN /work/centos7_core.sh + COPY install/centos7_ccache.sh /work/ RUN /work/centos7_ccache.sh + COPY install/centos7_python.sh /work/ +COPY install/requirements /work/ RUN /work/centos7_python.sh ENV CUDNN_VERSION=7.6.0.64 diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu b/ci/docker/Dockerfile.build.ubuntu_cpu index b1eb89bb3f36..266109c05cbd 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_cpu @@ -61,9 +61,6 @@ RUN /work/ubuntu_mkl.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_julia b/ci/docker/Dockerfile.build.ubuntu_cpu_julia index b1eb89bb3f36..266109c05cbd 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_julia +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_julia @@ -61,9 +61,6 @@ RUN /work/ubuntu_mkl.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_cpu_python b/ci/docker/Dockerfile.build.ubuntu_cpu_python index 6b217d4d341d..d1b0f6575100 100644 --- a/ci/docker/Dockerfile.build.ubuntu_cpu_python +++ b/ci/docker/Dockerfile.build.ubuntu_cpu_python @@ -32,9 +32,6 @@ COPY install/ubuntu_python.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_python.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ RUN /work/ubuntu_docs.sh @@ -46,4 +43,4 @@ RUN /work/ubuntu_adduser.sh COPY runtime_functions.sh /work/ -WORKDIR /work/mxnet \ No newline at end of file +WORKDIR /work/mxnet diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 index e35c64eeca5d..2c4a34a0ff51 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu100 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 index aa62fbc6307e..fb124f4abff7 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu101 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 index 8badadbb1bdb..0fec5d8790b6 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu102 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 index 30971b0a5c6e..09996ec9b95e 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu80 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 index cc50e7e55191..910631c927c0 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu90 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 index 40a4f44abeb5..481ae4260c32 100644 --- a/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 +++ b/ci/docker/Dockerfile.build.ubuntu_gpu_cu92 @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu index 5717df1b9130..e7d42557fd7e 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_cpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_cpu @@ -49,9 +49,6 @@ RUN /work/ubuntu_clang.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu index 5e812c433b43..75c5b37d4b12 100644 --- a/ci/docker/Dockerfile.build.ubuntu_nightly_gpu +++ b/ci/docker/Dockerfile.build.ubuntu_nightly_gpu @@ -55,9 +55,6 @@ RUN /work/ubuntu_llvm.sh COPY install/ubuntu_caffe.sh /work/ RUN /work/ubuntu_caffe.sh -COPY install/ubuntu_onnx.sh /work/ -RUN /work/ubuntu_onnx.sh - COPY install/ubuntu_docs.sh /work/ COPY install/requirements /work/ RUN /work/ubuntu_docs.sh diff --git a/ci/docker/install/centos7_python.sh b/ci/docker/install/centos7_python.sh index 06c53bea48c1..35202f2bbb2d 100755 --- a/ci/docker/install/centos7_python.sh +++ b/ci/docker/install/centos7_python.sh @@ -29,4 +29,4 @@ yum -y install python36u # Install PIP curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" python3.6 get-pip.py -pip3 install nose pylint numpy nose-timer requests h5py scipy==1.2.3 +pip3 install -r /work/requirements diff --git a/ci/docker/install/requirements b/ci/docker/install/requirements index 2d5125e8e2da..fc3763494a5b 100644 --- a/ci/docker/install/requirements +++ b/ci/docker/install/requirements @@ -24,11 +24,17 @@ Cython==0.29.7 decorator==4.4.0 h5py==2.8.0rc1 mock==2.0.0 -nose==1.3.7 -nose-timer==0.7.3 numpy>1.16.0,<2.0.0 pylint==2.3.1; python_version >= '3.0' requests<2.19.0,>=2.18.4 scipy==1.2.1 six==1.11.0 setuptools +pytest==5.3.2 +pytest-env==0.6.2 +pytest-cov==2.8.1 +pytest-xdist==1.31.0 +protobuf==3.5.2 +onnx==1.3.0 +Pillow==5.0.0 +tabulate==0.7.5 diff --git a/ci/docker/install/ubuntu_onnx.sh b/ci/docker/install/ubuntu_onnx.sh deleted file mode 100755 index 44d6b9ed52dc..000000000000 --- a/ci/docker/install/ubuntu_onnx.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -###################################################################### -# This script installs ONNX for Python along with all required dependencies -# on a Ubuntu Machine. -# Tested on Ubuntu 16.04 distro. -###################################################################### - -set -e -set -x - -echo "Installing libprotobuf-dev and protobuf-compiler ..." -apt-get update || true -apt-get install -y libprotobuf-dev protobuf-compiler - -echo "Installing pytest, pytest-cov, protobuf, Pillow, ONNX and tabulate ..." -pip3 install pytest==3.6.3 pytest-cov==2.5.1 protobuf==3.5.2 onnx==1.3.0 Pillow==5.0.0 tabulate==0.7.5 diff --git a/ci/docker/install/ubuntu_publish.sh b/ci/docker/install/ubuntu_publish.sh index 5b13e1306fbd..4d20cb2f4b78 100755 --- a/ci/docker/install/ubuntu_publish.sh +++ b/ci/docker/install/ubuntu_publish.sh @@ -77,7 +77,7 @@ apt-get install -y python python-pip python3 python3-pip python3 -m pip install --upgrade 'pip<19' # Restrict numpy version to <1.18 due to use of Python 3.4 on Ubuntu 14.04 -python3 -m pip install --upgrade --ignore-installed nose cpplint==1.3.0 pylint==2.3.1 'numpy>1.16.0,<1.18' nose-timer 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 +python3 -m pip install --upgrade --ignore-installed cpplint==1.3.0 pylint==2.3.1 'numpy>1.16.0,<1.18' 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 boto3 pytest==4.6.9 pytest-env==0.6.2 pytest-cov==2.8.1 pytest-xdist==1.31.0 # CMake 3.13.2+ is required mkdir /opt/cmake && cd /opt/cmake diff --git a/ci/docker/install/ubuntu_python.sh b/ci/docker/install/ubuntu_python.sh index b6792a286fad..6234aac12283 100755 --- a/ci/docker/install/ubuntu_python.sh +++ b/ci/docker/install/ubuntu_python.sh @@ -24,6 +24,7 @@ set -ex # install libraries for mxnet's python package on ubuntu apt-get update || true apt-get install -y python-dev python3-dev virtualenv wget +apt-get install -y libprotobuf-dev protobuf-compiler # the version of the pip shipped with ubuntu may be too lower, install a recent version here wget -nv https://bootstrap.pypa.io/get-pip.py diff --git a/ci/docker/qemu/runtime_functions.py b/ci/docker/qemu/runtime_functions.py index 5a57cb8dae6a..b8306bbaf19f 100755 --- a/ci/docker/qemu/runtime_functions.py +++ b/ci/docker/qemu/runtime_functions.py @@ -77,12 +77,10 @@ def run_ut_python3_qemu_internal(): logging.info("=== NOW Running inside QEMU ===") logging.info("PIP Installing %s", pkg) check_call(['sudo', 'pip3', 'install', pkg]) - logging.info("PIP Installing mxnet/test_requirements.txt") + logging.info("PIP Installing mxnet/test_requirements.txt") check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt']) logging.info("Running tests in mxnet/tests/python/unittest/") - check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py']) - # Example to run a single unit test: - # check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_ndarray.py:test_ndarray_fluent']) + check_call(['pytest', '--durations=50', '--cov-report', 'xml:tests_unittest.xml', '-v', 'mxnet/tests/python/unittest/test_engine.py']) diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh index e171767d51f3..c39e3967371b 100755 --- a/ci/docker/runtime_functions.sh +++ b/ci/docker/runtime_functions.sh @@ -22,8 +22,6 @@ set -ex -NOSE_COVERAGE_ARGUMENTS="--with-coverage --cover-inclusive --cover-xml --cover-branches --cover-package=mxnet" -NOSE_TIMER_ARGUMENTS="--with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error" CI_CUDA_COMPUTE_CAPABILITIES="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_70,code=sm_70" CI_CMAKE_CUDA_ARCH="5.2 7.0" @@ -968,7 +966,7 @@ sanity_check() { make cpplint jnilint make -f R-package/Makefile rcpplint make pylint - nosetests-3.4 tests/tutorials/test_sanity_tutorials.py + pytest tests/tutorials/test_sanity_tutorials.py } # Tests libmxnet @@ -988,10 +986,8 @@ cd_unittest_ubuntu() { local mxnet_variant=${1:?"This function requires a mxnet variant as the first argument"} local python_cmd=${2:?"This function requires a python command as the first argument"} - local nose_cmd="nosetests-3.4" - - $nose_cmd $NOSE_TIMER_ARGUMENTS --verbose tests/python/unittest - $nose_cmd $NOSE_TIMER_ARGUMENTS --verbose tests/python/quantization + pytest --durations=50 --verbose tests/python/unittest + pytest --durations=50 --verbose tests/python/quantization # https://github.com/apache/incubator-mxnet/issues/11801 # if [[ ${mxnet_variant} = "cpu" ]] || [[ ${mxnet_variant} = "mkl" ]]; then @@ -999,7 +995,7 @@ cd_unittest_ubuntu() { # fi if [[ ${mxnet_variant} = cu* ]]; then - $nose_cmd $NOSE_TIMER_ARGUMENTS --verbose tests/python/gpu + pytest --durations=50 --verbose tests/python/gpu # Adding these here as CI doesn't test all CUDA environments $python_cmd example/image-classification/test_score.py @@ -1010,7 +1006,7 @@ cd_unittest_ubuntu() { # skipping python 2 testing # https://github.com/apache/incubator-mxnet/issues/14675 if [[ ${python_cmd} = "python3" ]]; then - $nose_cmd $NOSE_TIMER_ARGUMENTS --verbose tests/python/mkl + pytest --durations=50 --verbose tests/python/mkl fi fi } @@ -1023,8 +1019,8 @@ unittest_ubuntu_python3_cpu() { export MXNET_SUBGRAPH_VERBOSE=0 export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_quantization.xml --verbose tests/python/quantization + pytest --durations=50 --cov-report xml:tests_unittest.xml --verbose tests/python/unittest + pytest --durations=50 --cov-report xml:tests_quantization.xml --verbose tests/python/quantization } unittest_ubuntu_python3_cpu_mkldnn() { @@ -1035,8 +1031,8 @@ unittest_ubuntu_python3_cpu_mkldnn() { export MXNET_SUBGRAPH_VERBOSE=0 export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_mkl.xml --verbose tests/python/mkl + pytest --durations=50 --cov-report xml:tests_unittest.xml --verbose tests/python/unittest + pytest --durations=50 --cov-report xml:tests_mkl.xml --verbose tests/python/mkl } unittest_ubuntu_python3_gpu() { @@ -1048,7 +1044,7 @@ unittest_ubuntu_python3_gpu() { export CUDNN_VERSION=${CUDNN_VERSION:-7.0.3} export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu + pytest --durations=50 --cov-report xml:tests_gpu.xml --verbose tests/python/gpu } unittest_ubuntu_python3_gpu_cython() { @@ -1062,7 +1058,7 @@ unittest_ubuntu_python3_gpu_cython() { export MXNET_ENFORCE_CYTHON=1 export DMLC_LOG_STACK_TRACE_DEPTH=10 check_cython - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu + pytest --durations=50 --cov-report xml:tests_gpu.xml --verbose tests/python/gpu } unittest_ubuntu_python3_gpu_nocudnn() { @@ -1073,7 +1069,7 @@ unittest_ubuntu_python3_gpu_nocudnn() { export CUDNN_OFF_TEST_ONLY=true export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu + pytest --durations=50 --cov-report xml:tests_gpu.xml --verbose tests/python/gpu } unittest_ubuntu_tensorrt_gpu() { @@ -1086,7 +1082,7 @@ unittest_ubuntu_tensorrt_gpu() { export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 tests/python/tensorrt/lenet5_train.py - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_trt_gpu.xml --verbose --nocapture tests/python/tensorrt/ + pytest --durations=50 --cov-report xml:tests_trt_gpu.xml --verbose --capture=no tests/python/tensorrt/ } # quantization gpu currently only runs on P3 instances @@ -1100,7 +1096,7 @@ unittest_ubuntu_python3_quantization_gpu() { export CUDNN_VERSION=${CUDNN_VERSION:-7.0.3} export MXNET_ENABLE_CYTHON=0 export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_quantization_gpu.xml --verbose tests/python/quantization_gpu + pytest --durations=50 --cov-report xml:tests_quantization_gpu.xml --verbose tests/python/quantization_gpu } unittest_centos7_cpu_scala() { @@ -1233,8 +1229,8 @@ unittest_ubuntu_cpu_julia10() { unittest_centos7_cpu() { set -ex cd /work/mxnet - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_unittest.xml --verbose tests/python/unittest - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_train.xml --verbose tests/python/train + python3.6 -m pytest --durations=50 --cov-report xml:tests_unittest.xml --verbose tests/python/unittest + python3.6 -m pytest --durations=50 --cov-report xml:tests_train.xml --verbose tests/python/train } unittest_centos7_gpu() { @@ -1242,17 +1238,17 @@ unittest_centos7_gpu() { cd /work/mxnet export CUDNN_VERSION=${CUDNN_VERSION:-7.0.3} export DMLC_LOG_STACK_TRACE_DEPTH=10 - python3.6 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_gpu.xml --verbose tests/python/gpu + python3.6 -m pytest --durations=50 --cov-report xml:tests_gpu.xml --verbose tests/python/gpu } integrationtest_ubuntu_cpu_onnx() { set -ex export PYTHONPATH=./python/ export DMLC_LOG_STACK_TRACE_DEPTH=10 - tests/python-pytest/onnx/backend_test.py - pytest tests/python-pytest/onnx/mxnet_export_test.py - pytest tests/python-pytest/onnx/test_models.py - pytest tests/python-pytest/onnx/test_node.py + pytest tests/python/unittest/onnx/backend_test.py + pytest tests/python/unittest/onnx/mxnet_export_test.py + pytest tests/python/unittest/onnx/test_models.py + pytest tests/python/unittest/onnx/test_node.py } integrationtest_ubuntu_gpu_python() { @@ -1361,10 +1357,9 @@ test_ubuntu_cpu_python3() { source $VENV/bin/activate cd /work/mxnet/python - pip3 install nose nose-timer pip3 install -e . cd /work/mxnet - python3 -m "nose" $NOSE_COVERAGE_ARGUMENTS $NOSE_TIMER_ARGUMENTS --verbose tests/python/unittest + python3 -m pytest $TEST_TIMER_ARGUMENTS --verbose tests/python/unittest popd } @@ -1440,9 +1435,9 @@ nightly_test_large_tensor() { set -ex export PYTHONPATH=./python/ export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 tests/nightly/test_large_array.py:test_tensor - nosetests-3.4 tests/nightly/test_large_array.py:test_nn - nosetests-3.4 tests/nightly/test_large_array.py:test_basic + pytest tests/nightly/test_large_array.py::test_tensor + pytest tests/nightly/test_large_array.py::test_nn + pytest tests/nightly/test_large_array.py::test_basic } #Test Large Vectors @@ -1450,9 +1445,9 @@ nightly_test_large_vector() { set -ex export PYTHONPATH=./python/ export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 tests/nightly/test_large_vector.py:test_tensor - nosetests-3.4 tests/nightly/test_large_vector.py:test_nn - nosetests-3.4 tests/nightly/test_large_vector.py:test_basic + pytest tests/nightly/test_large_vector.py::test_tensor + pytest tests/nightly/test_large_vector.py::test_nn + pytest tests/nightly/test_large_vector.py::test_basic } #Test Large Vectors @@ -1460,9 +1455,9 @@ nightly_test_large_vector() { set -ex export PYTHONPATH=./python/ export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 tests/nightly/test_large_vector.py:test_tensor - nosetests-3.4 tests/nightly/test_large_vector.py:test_nn - nosetests-3.4 tests/nightly/test_large_vector.py:test_basic + pytest tests/nightly/test_large_vector.py::test_tensor + pytest tests/nightly/test_large_vector.py::test_nn + pytest tests/nightly/test_large_vector.py::test_basic } #Tests Amalgamation Build with 5 different sets of flags @@ -1502,26 +1497,6 @@ nightly_model_backwards_compat_train() { ./tests/nightly/model_backwards_compatibility_check/train_mxnet_legacy_models.sh } -nightly_straight_dope_python3_single_gpu_tests() { - set -ex - cd /work/mxnet/tests/nightly/straight_dope - export PYTHONPATH=/work/mxnet/python/ - export MXNET_TEST_KERNEL=python3 - export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_straight_dope_python3_single_gpu.xml \ - test_notebooks_single_gpu.py --nologcapture -} - -nightly_straight_dope_python3_multi_gpu_tests() { - set -ex - cd /work/mxnet/tests/nightly/straight_dope - export PYTHONPATH=/work/mxnet/python/ - export MXNET_TEST_KERNEL=python3 - export DMLC_LOG_STACK_TRACE_DEPTH=10 - nosetests-3.4 $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_straight_dope_python3_multi_gpu.xml \ - test_notebooks_multi_gpu.py --nologcapture -} - nightly_tutorial_test_ubuntu_python3_gpu() { set -ex cd /work/mxnet/docs @@ -1534,7 +1509,7 @@ nightly_tutorial_test_ubuntu_python3_gpu() { export PYTHONPATH=/work/mxnet/python/ export MXNET_TUTORIAL_TEST_KERNEL=python3 cd /work/mxnet/tests/tutorials - nosetests-3.4 $NOSE_TIMER_ARGUMENTS --with-xunit --xunit-file nosetests_tutorials.xml test_tutorials.py --nologcapture + pytest --durations=50 --cov-report xml:tests_tutorials.xml --capture=no test_tutorials.py } nightly_java_demo_test_cpu() { @@ -1558,8 +1533,8 @@ nightly_estimator() { export DMLC_LOG_STACK_TRACE_DEPTH=10 cd /work/mxnet/tests/nightly/estimator export PYTHONPATH=/work/mxnet/python/ - nosetests test_estimator_cnn.py - nosetests test_sentiment_rnn.py + pytest test_estimator_cnn.py + pytest test_sentiment_rnn.py } # For testing PRs diff --git a/ci/docker_cache.py b/ci/docker_cache.py old mode 100755 new mode 100644 diff --git a/ci/docker_login.py b/ci/docker_login.py old mode 100755 new mode 100644 diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy index e5ce8de24485..96ff84dd6d55 100644 --- a/ci/jenkins/Jenkins_steps.groovy +++ b/ci/jenkins/Jenkins_steps.groovy @@ -808,8 +808,8 @@ def test_unix_python3_cpu() { python3_ut('ubuntu_cpu') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_cpu_unittest.xml') - utils.collect_test_results_unix('nosetests_quantization.xml', 'nosetests_python3_cpu_quantization.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_cpu_unittest.xml') + utils.collect_test_results_unix('tests_quantization.xml', 'tests_python3_cpu_quantization.xml') } } } @@ -825,8 +825,8 @@ def test_unix_python3_mkl_cpu() { python3_ut('ubuntu_cpu') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_cpu_unittest.xml') - utils.collect_test_results_unix('nosetests_quantization.xml', 'nosetests_python3_cpu_quantization.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_cpu_unittest.xml') + utils.collect_test_results_unix('tests_quantization.xml', 'tests_python3_cpu_quantization.xml') } } } @@ -842,7 +842,7 @@ def test_unix_python3_gpu() { python3_gpu_ut_cython('ubuntu_gpu_cu101') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_gpu.xml') + utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_gpu.xml') } } } @@ -858,7 +858,7 @@ def test_unix_python3_gpu_no_tvm_op() { python3_gpu_ut_cython('ubuntu_gpu_cu101') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_gpu.xml') + utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_gpu.xml') } } } @@ -875,7 +875,7 @@ def test_unix_python3_quantize_gpu() { utils.docker_run('ubuntu_gpu_cu101', 'unittest_ubuntu_python3_quantization_gpu', true) utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_quantization_gpu.xml', 'nosetests_python3_quantize_gpu.xml') + utils.collect_test_results_unix('tests_quantization_gpu.xml', 'tests_python3_quantize_gpu.xml') } } } @@ -891,8 +891,8 @@ def test_unix_python3_debug_cpu() { utils.unpack_and_init('cpu_debug', mx_cmake_lib_debug, true) python3_ut('ubuntu_cpu') } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_cpu_debug_unittest.xml') - utils.collect_test_results_unix('nosetests_quantization.xml', 'nosetests_python3_cpu_debug_quantization.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_cpu_debug_unittest.xml') + utils.collect_test_results_unix('tests_quantization.xml', 'tests_python3_cpu_debug_quantization.xml') } } } @@ -907,8 +907,8 @@ def test_unix_python3_cpu_no_tvm_op() { utils.unpack_and_init('cpu_openblas_no_tvm_op', mx_cmake_lib_no_tvm_op) python3_ut('ubuntu_cpu') } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_cpu_no_tvm_op_unittest.xml') - utils.collect_test_results_unix('nosetests_quantization.xml', 'nosetests_python3_cpu_no_tvm_op_quantization.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_cpu_no_tvm_op_unittest.xml') + utils.collect_test_results_unix('tests_quantization.xml', 'tests_python3_cpu_no_tvm_op_quantization.xml') } } } @@ -924,8 +924,8 @@ def test_unix_python3_mkldnn_cpu() { python3_ut_mkldnn('ubuntu_cpu') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_mkldnn_cpu_unittest.xml') - utils.collect_test_results_unix('nosetests_mkl.xml', 'nosetests_python3_mkldnn_cpu_mkl.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_mkldnn_cpu_unittest.xml') + utils.collect_test_results_unix('tests_mkl.xml', 'tests_python3_mkldnn_cpu_mkl.xml') } } } @@ -941,8 +941,8 @@ def test_unix_python3_mkldnn_mkl_cpu() { python3_ut_mkldnn('ubuntu_cpu') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_mkldnn_cpu_unittest.xml') - utils.collect_test_results_unix('nosetests_mkl.xml', 'nosetests_python3_mkldnn_cpu_mkl.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_mkldnn_cpu_unittest.xml') + utils.collect_test_results_unix('tests_mkl.xml', 'tests_python3_mkldnn_cpu_mkl.xml') } } } @@ -958,7 +958,7 @@ def test_unix_python3_mkldnn_gpu() { python3_gpu_ut('ubuntu_gpu_cu101') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_mkldnn_gpu.xml') + utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_mkldnn_gpu.xml') } } } @@ -974,7 +974,7 @@ def test_unix_python3_mkldnn_nocudnn_gpu() { python3_gpu_ut_nocudnn('ubuntu_gpu_cu101') utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_mkldnn_gpu_nocudnn.xml') + utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_mkldnn_gpu_nocudnn.xml') } } } @@ -991,7 +991,7 @@ def test_unix_python3_tensorrt_gpu() { utils.docker_run('ubuntu_gpu_tensorrt', 'unittest_ubuntu_tensorrt_gpu', true) utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_tensorrt.xml', 'nosetests_python3_tensorrt_gpu.xml') + utils.collect_test_results_unix('tests_tensorrt.xml', 'tests_python3_tensorrt_gpu.xml') } } } @@ -1301,8 +1301,8 @@ def test_centos7_python3_cpu() { utils.docker_run('centos7_cpu', 'unittest_centos7_cpu', false) utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_unittest.xml', 'nosetests_python3_centos7_cpu_unittest.xml') - utils.collect_test_results_unix('nosetests_train.xml', 'nosetests_python3_centos7_cpu_train.xml') + utils.collect_test_results_unix('tests_unittest.xml', 'tests_python3_centos7_cpu_unittest.xml') + utils.collect_test_results_unix('tests_train.xml', 'tests_python3_centos7_cpu_train.xml') } } } @@ -1320,7 +1320,7 @@ def test_centos7_python3_gpu() { utils.docker_run('centos7_gpu', 'unittest_centos7_gpu', true) utils.publish_test_coverage() } finally { - utils.collect_test_results_unix('nosetests_gpu.xml', 'nosetests_python3_centos7_gpu.xml') + utils.collect_test_results_unix('tests_gpu.xml', 'tests_python3_centos7_gpu.xml') } } } @@ -1352,8 +1352,8 @@ def test_windows_python3_gpu() { unstash 'windows_package_gpu' powershell 'ci/windows/test_py3_gpu.ps1' } finally { - utils.collect_test_results_windows('nosetests_forward.xml', 'nosetests_gpu_forward_windows_python3_gpu.xml') - utils.collect_test_results_windows('nosetests_operator.xml', 'nosetests_gpu_operator_windows_python3_gpu.xml') + utils.collect_test_results_windows('tests_forward.xml', 'tests_gpu_forward_windows_python3_gpu.xml') + utils.collect_test_results_windows('tests_operator.xml', 'tests_gpu_operator_windows_python3_gpu.xml') } } } @@ -1371,8 +1371,8 @@ def test_windows_python3_gpu_mkldnn() { unstash 'windows_package_gpu_mkldnn' powershell 'ci/windows/test_py3_gpu.ps1' } finally { - utils.collect_test_results_windows('nosetests_forward.xml', 'nosetests_gpu_forward_windows_python3_gpu_mkldnn.xml') - utils.collect_test_results_windows('nosetests_operator.xml', 'nosetests_gpu_operator_windows_python3_gpu_mkldnn.xml') + utils.collect_test_results_windows('tests_forward.xml', 'tests_gpu_forward_windows_python3_gpu_mkldnn.xml') + utils.collect_test_results_windows('tests_operator.xml', 'tests_gpu_operator_windows_python3_gpu_mkldnn.xml') } } } @@ -1390,7 +1390,7 @@ def test_windows_python3_cpu() { unstash 'windows_package_cpu' powershell 'ci/windows/test_py3_cpu.ps1' } finally { - utils.collect_test_results_windows('nosetests_unittest.xml', 'nosetests_unittest_windows_python3_cpu.xml') + utils.collect_test_results_windows('tests_unittest.xml', 'tests_unittest_windows_python3_cpu.xml') } } } diff --git a/ci/qemu/mxnet_requirements.txt b/ci/qemu/mxnet_requirements.txt index 2ab0fd9612e5..48b87b3773ca 100644 --- a/ci/qemu/mxnet_requirements.txt +++ b/ci/qemu/mxnet_requirements.txt @@ -3,5 +3,7 @@ requests<2.19.0,>=2.18.4 graphviz<0.9.0,>=0.8.1 numpy>1.16.0,<2.0.0 mock -nose -nose-timer +pytest==5.3.2 +pytest-env==0.6.2 +pytest-cov==2.8.1 +pytest-xdist==1.31.0 diff --git a/ci/qemu/test_requirements.txt b/ci/qemu/test_requirements.txt index 77037d89c673..1ea1a2e6ce56 100644 --- a/ci/qemu/test_requirements.txt +++ b/ci/qemu/test_requirements.txt @@ -1,3 +1,5 @@ mock -nose -nose-timer \ No newline at end of file +pytest==5.3.2 +pytest-env==0.6.2 +pytest-cov==2.8.1 +pytest-xdist==1.31.0 diff --git a/ci/safe_docker_run.py b/ci/safe_docker_run.py old mode 100755 new mode 100644 diff --git a/ci/test_docker_cache.py b/ci/test_docker_cache.py index aeb399ff6b45..81b315be4cff 100644 --- a/ci/test_docker_cache.py +++ b/ci/test_docker_cache.py @@ -270,8 +270,3 @@ def _assert_docker_build(lambda_func, expected_cache_hit_count: int, expected_ca assert output.count('Using cache') == expected_cache_hit_count, \ 'Expected {} "Using cache", got {}. Log:{}'.\ format(expected_cache_hit_count, output.count('Using cache'), output) - - -if __name__ == '__main__': - import nose - nose.main() diff --git a/ci/test_docker_login.py b/ci/test_docker_login.py index 6c989ade92ff..488295d9fa71 100644 --- a/ci/test_docker_login.py +++ b/ci/test_docker_login.py @@ -228,7 +228,3 @@ def test_main_default_argument_values(self, mock_login): with self.assertRaises(RuntimeError): main(["--secret-name", "name"]) - -if __name__ == '__main__': - import nose - nose.main() diff --git a/ci/test_safe_docker_run.py b/ci/test_safe_docker_run.py index 433d42e8b2ea..5b75c0eef4ba 100644 --- a/ci/test_safe_docker_run.py +++ b/ci/test_safe_docker_run.py @@ -420,8 +420,3 @@ def wait_for_container(name: str) -> bool: # The container should no longer exist assert get_container(container_name) is None - - -if __name__ == '__main__': - import nose - nose.main() diff --git a/ci/windows/test_py3_cpu.ps1 b/ci/windows/test_py3_cpu.ps1 index 8f520bb5b259..6dc1e19fd4c8 100644 --- a/ci/windows/test_py3_cpu.ps1 +++ b/ci/windows/test_py3_cpu.ps1 @@ -24,11 +24,11 @@ $env:MXNET_SUBGRAPH_VERBOSE=0 $env:MXNET_HOME=[io.path]::combine($PSScriptRoot, 'mxnet_home') C:\Python37\Scripts\pip install -r tests\requirements.txt -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_unittest.xml tests\python\unittest +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_unittest.xml tests\python\unittest if ($LastExitCode -ne 0) { Throw ("Error running unittest, python exited with status code " + ('{0:X}' -f $LastExitCode)) } -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_train.xml tests\python\train +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_train.xml tests\python\train if ($LastExitCode -ne 0) { Throw ("Error running train tests, python exited with status code " + ('{0:X}' -f $LastExitCode)) } # Adding this extra test since it's not possible to set env var on the fly in Windows. $env:MXNET_SAFE_ACCUMULATION=1 -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_unittest.xml tests\python\unittest\test_operator.py:test_norm +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_unittest.xml tests\python\unittest\test_operator.py:test_norm if ($LastExitCode -ne 0) { Throw ("Error running unittest, python exited with status code " + ('{0:X}' -f $LastExitCode)) } diff --git a/ci/windows/test_py3_gpu.ps1 b/ci/windows/test_py3_gpu.ps1 index 0ce3d953d486..3cc4d18cf72c 100644 --- a/ci/windows/test_py3_gpu.ps1 +++ b/ci/windows/test_py3_gpu.ps1 @@ -24,15 +24,15 @@ $env:MXNET_SUBGRAPH_VERBOSE=0 $env:MXNET_HOME=[io.path]::combine($PSScriptRoot, 'mxnet_home') C:\Python37\Scripts\pip install -r tests\requirements.txt -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_unittest.xml tests\python\unittest +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_unittest.xml tests\python\unittest if ($LastExitCode -ne 0) { Throw ("Error running unittest, python exited with status code " + ('{0:X}' -f $LastExitCode)) } -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_operator.xml tests\python\gpu\test_operator_gpu.py +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_operator.xml tests\python\gpu\test_operator_gpu.py if ($LastExitCode -ne 0) { Throw ("Error running tests, python exited with status code " + ('{0:X}' -f $LastExitCode)) } -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_forward.xml tests\python\gpu\test_forward.py +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_forward.xml tests\python\gpu\test_forward.py if ($LastExitCode -ne 0) { Throw ("Error running tests, python exited with status code " + ('{0:X}' -f $LastExitCode)) } -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_train.xml tests\python\train +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_train.xml tests\python\train if ($LastExitCode -ne 0) { Throw ("Error running tests, python exited with status code " + ('{0:X}' -f $LastExitCode)) } # Adding this extra test since it's not possible to set env var on the fly in Windows. $env:MXNET_SAFE_ACCUMULATION=1 -C:\Python37\python.exe -m nose -v --with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error --with-xunit --xunit-file nosetests_operator.xml tests\python\gpu\test_operator_gpu.py:test_norm +C:\Python37\python.exe -m pytest -v --durations=50 --cov-report xml:tests_operator.xml tests\python\gpu\test_operator_gpu.py:test_norm if ($LastExitCode -ne 0) { Throw ("Error running tests, python exited with status code " + ('{0:X}' -f $LastExitCode)) } diff --git a/conftest.py b/conftest.py new file mode 100644 index 000000000000..aedf2eb6ff36 --- /dev/null +++ b/conftest.py @@ -0,0 +1,221 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""conftest.py contains configuration for pytest. + +Configuration file for tests in tests/ and scripts/ folders. + +Note that fixtures of higher-scoped fixtures (such as ``session``) are +instantiated before lower-scoped fixtures (such as ``function``). + +""" + +import logging +import os +import random + +import pytest + + +def pytest_sessionfinish(session, exitstatus): + if exitstatus == 5: # Don't fail if no tests were run + session.exitstatus = 0 + + +# * Random seed setup +def pytest_configure(): + """Pytest configuration hook to help reproduce test segfaults + + Sets and outputs rng seeds. + + The segfault-debug procedure on a module called test_module.py is: + + 1. run "pytest --verbose test_module.py". A seg-faulting output might be: + + [INFO] np, mx and python random seeds = 4018804151 + test_module.test1 ... ok + test_module.test2 ... Illegal instruction (core dumped) + + 2. Copy the module-starting seed into the next command, then run: + + MXNET_MODULE_SEED=4018804151 pytest --log-level=DEBUG --verbose test_module.py + + Output might be: + + [WARNING] **** module-level seed is set: all tests running deterministically **** + [INFO] np, mx and python random seeds = 4018804151 + test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516 + ok + test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594 + Illegal instruction (core dumped) + + 3. Copy the segfaulting-test seed into the command: + MXNET_TEST_SEED=1435005594 pytest --log-level=DEBUG --verbose test_module.py:test2 + Output might be: + + [INFO] np, mx and python random seeds = 2481884723 + test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594 + Illegal instruction (core dumped) + + 3. Finally reproduce the segfault directly under gdb (might need additional os packages) + by editing the bottom of test_module.py to be + + if __name__ == '__main__': + logging.getLogger().setLevel(logging.DEBUG) + test2() + + MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py + + 4. When finished debugging the segfault, remember to unset any exported MXNET_ seed + variables in the environment to return to non-deterministic testing (a good thing). + """ + + try: + import numpy as np + import mxnet as mx + except: + print('Unable to import numpy/mxnet. Skipping conftest.') + return + module_seed_str = os.getenv('MXNET_MODULE_SEED') + if module_seed_str is None: + seed = np.random.randint(0, np.iinfo(np.int32).max) + else: + seed = int(module_seed_str) + logging.warning('*** module-level seed is set: ' + 'all tests running deterministically ***') + print('Setting module np/mx/python random seeds, ' + 'use MXNET_MODULE_SEED={} to reproduce.'.format(seed)) + + np.random.seed(seed) + mx.random.seed(seed) + random.seed(seed) + + # The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with + # the 'with_seed()' decoration. Inform the user of this once here at the module level. + if os.getenv('MXNET_TEST_SEED') is not None: + logging.warning('*** test-level seed set: all "@with_seed()" ' + 'tests run deterministically ***') + + +@pytest.hookimpl(tryfirst=True, hookwrapper=True) +def pytest_runtest_makereport(item, call): + """Make test outcome available to fixture. + + https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures + """ + # execute all other hooks to obtain the report object + outcome = yield + rep = outcome.get_result() + + # set a report attribute for each phase of a call, which can + # be "setup", "call", "teardown" + setattr(item, "rep_" + rep.when, rep) + + +@pytest.fixture(scope='function', autouse=True) +def function_scope_seed(request): + """A function scope fixture that manages rng seeds. + + This fixture automatically initializes the python, numpy and mxnet random + number generators randomly on every test run. + + def test_ok_with_random_data(): + ... + + To fix the seed used for a test case mark the test function with the + desired seed: + + @pytest.mark.seed(1) + def test_not_ok_with_random_data(): + '''This testcase actually works.''' + assert 17 == random.randint(0, 100) + + When a test fails, the fixture outputs the seed used. The user can then set + the environment variable MXNET_TEST_SEED to the value reported, then rerun + the test with: + + pytest --verbose -s -k + + To run a test repeatedly, install pytest-repeat and add the --count argument: + + pip install pytest-repeat + pytest --verbose -s -k --count 1000 + + """ + try: + import numpy as np + import mxnet as mx + except: + print('Unable to import numpy/mxnet. Skipping conftest.') + return + + seed = request.node.get_closest_marker('seed') + env_seed_str = os.getenv('MXNET_TEST_SEED') + + if seed is not None: + seed = seed.args[0] + assert isinstance(seed, int) + elif env_seed_str is not None: + seed = int(env_seed_str) + else: + seed = np.random.randint(0, np.iinfo(np.int32).max) + + post_test_state = np.random.get_state() + np.random.seed(seed) + mx.random.seed(seed) + random.seed(seed) + + seed_message = ('np/mx/python random seeds are set to ' + '{}, use MXNET_TEST_SEED={} to reproduce.') + seed_message = seed_message.format(seed, seed) + + # Always log seed on DEBUG log level. This makes sure we can find out the + # value of the seed even if the test case causes a segfault and subsequent + # teardown code is not run. + logging.debug(seed_message) + + yield # run the test + + if request.node.rep_setup.failed: + logging.info("Setting up a test failed: {}", request.node.nodeid) + elif request.node.rep_call.outcome == 'failed': + # Either request.node.rep_setup.failed or request.node.rep_setup.passed + # should be True + assert request.node.rep_setup.passed + # On failure also log seed on INFO log level + logging.info(seed_message) + + np.random.set_state(post_test_state) + + +# * Shared test fixtures +@pytest.fixture(params=[True, False]) +def hybridize(request): + return request.param + +@pytest.fixture(autouse=True) +def doctest(doctest_namespace): + try: + import numpy as np + import mxnet as mx + except: + print('Unable to import numpy/mxnet. Skipping conftest.') + return + doctest_namespace['np'] = np + doctest_namespace['mx'] = mx + doctest_namespace['gluon'] = mx.gluon + import doctest + doctest.ELLIPSIS_MARKER = '-etc-' diff --git a/contrib/clojure-package/examples/captcha/gen_captcha.py b/contrib/clojure-package/examples/captcha/gen_captcha.py old mode 100755 new mode 100644 diff --git a/docker/install/python.sh b/docker/install/python.sh index ba71246babbf..a8d0fd9217c9 100755 --- a/docker/install/python.sh +++ b/docker/install/python.sh @@ -24,5 +24,4 @@ apt-get update && apt-get install -y python-dev python3-dev # the version of the pip shipped with ubuntu may be too lower, install a recent version here cd /tmp && wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py && python2 get-pip.py -pip2 install nose pylint numpy nose-timer requests Pillow -pip3 install nose pylint numpy nose-timer requests Pillow +pip3 install pylint numpy requests Pillow pytest==5.3.2 pytest-env==0.6.2 pytest-cov==2.8.1 pytest-xdist==1.31.0 diff --git a/docs/static_site/src/pages/get_started/build_from_source.md b/docs/static_site/src/pages/get_started/build_from_source.md index 1dfa95a82ade..ae391e4fe67e 100644 --- a/docs/static_site/src/pages/get_started/build_from_source.md +++ b/docs/static_site/src/pages/get_started/build_from_source.md @@ -221,7 +221,7 @@ make -j"$(nproc)" ``` - Run test_nccl.py script as follows. The test should complete. It does not produce any output. ``` bash -nosetests --verbose tests/python/gpu/test_nccl.py +pytest --verbose tests/python/gpu/test_nccl.py ``` **Recommendation to get the best performance out of NCCL:** diff --git a/example/image-classification/__init__.py b/example/image-classification/__init__.py old mode 100755 new mode 100644 diff --git a/example/image-classification/benchmark.py b/example/image-classification/benchmark.py old mode 100755 new mode 100644 diff --git a/example/image-classification/benchmark_score.py b/example/image-classification/benchmark_score.py old mode 100755 new mode 100644 diff --git a/example/image-classification/common/data.py b/example/image-classification/common/data.py old mode 100755 new mode 100644 diff --git a/example/image-classification/common/fit.py b/example/image-classification/common/fit.py old mode 100755 new mode 100644 diff --git a/example/image-classification/fine-tune.py b/example/image-classification/fine-tune.py old mode 100755 new mode 100644 diff --git a/example/image-classification/score.py b/example/image-classification/score.py old mode 100755 new mode 100644 diff --git a/example/image-classification/symbols/alexnet.py b/example/image-classification/symbols/alexnet.py old mode 100755 new mode 100644 diff --git a/example/image-classification/symbols/resnet-v1.py b/example/image-classification/symbols/resnet-v1.py old mode 100755 new mode 100644 diff --git a/example/image-classification/symbols/resnetv1.py b/example/image-classification/symbols/resnetv1.py old mode 100755 new mode 100644 diff --git a/example/image-classification/test_score.py b/example/image-classification/test_score.py old mode 100755 new mode 100644 diff --git a/example/image-classification/train_cifar10.py b/example/image-classification/train_cifar10.py old mode 100755 new mode 100644 diff --git a/example/image-classification/train_imagenet.py b/example/image-classification/train_imagenet.py old mode 100755 new mode 100644 diff --git a/example/image-classification/train_mnist.py b/example/image-classification/train_mnist.py old mode 100755 new mode 100644 diff --git a/example/reinforcement-learning/dqn/dqn_demo.py b/example/reinforcement-learning/dqn/dqn_demo.py old mode 100755 new mode 100644 diff --git a/example/reinforcement-learning/dqn/dqn_run_test.py b/example/reinforcement-learning/dqn/dqn_run_test.py old mode 100755 new mode 100644 diff --git a/example/ssd/data/demo/download_demo_images.py b/example/ssd/data/demo/download_demo_images.py old mode 100755 new mode 100644 diff --git a/example/ssd/dataset/pycocotools/__init__.py b/example/ssd/dataset/pycocotools/__init__.py old mode 100755 new mode 100644 diff --git a/example/ssd/dataset/pycocotools/coco.py b/example/ssd/dataset/pycocotools/coco.py old mode 100755 new mode 100644 diff --git a/example/ssd/demo.py b/example/ssd/demo.py old mode 100755 new mode 100644 diff --git a/example/ssd/tools/prepare_dataset.py b/example/ssd/tools/prepare_dataset.py old mode 100755 new mode 100644 diff --git a/example/ssd/train.py b/example/ssd/train.py old mode 100755 new mode 100644 diff --git a/python/README.md b/python/README.md index 148188537f48..e5d2faa86575 100644 --- a/python/README.md +++ b/python/README.md @@ -25,14 +25,14 @@ To install MXNet Python package, visit MXNet [Install Instruction](https://mxnet ## Running the unit tests -For running unit tests, you will need the [nose PyPi package](https://pypi.python.org/pypi/nose). To install: +For running unit tests, you will need the [pytest PyPi package](https://pypi.python.org/pypi/pytest). To install: ```bash -pip install --upgrade nose +pip install --upgrade pytest ``` -Once ```nose``` is installed, run the following from MXNet root directory (please make sure the installation path of ```nosetests``` is included in your ```$PATH``` environment variable): +Once ```pytest``` is installed, run the following from MXNet root directory (please make sure the installation path of ```pytest``` is included in your ```$PATH``` environment variable): ``` -nosetests tests/python/unittest -nosetests tests/python/train +pytest tests/python/unittest +pytest tests/python/train ``` diff --git a/python/mxnet/contrib/amp/amp.py b/python/mxnet/contrib/amp/amp.py old mode 100755 new mode 100644 diff --git a/python/mxnet/contrib/amp/loss_scaler.py b/python/mxnet/contrib/amp/loss_scaler.py old mode 100755 new mode 100644 diff --git a/python/mxnet/initializer.py b/python/mxnet/initializer.py old mode 100755 new mode 100644 diff --git a/python/mxnet/module/executor_group.py b/python/mxnet/module/executor_group.py old mode 100755 new mode 100644 diff --git a/python/mxnet/optimizer/optimizer.py b/python/mxnet/optimizer/optimizer.py old mode 100755 new mode 100644 diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py old mode 100755 new mode 100644 diff --git a/tests/README.md b/tests/README.md index de5d8107a790..b59335ea1593 100644 --- a/tests/README.md +++ b/tests/README.md @@ -68,7 +68,7 @@ Ninja is a build tool (like make) that prioritizes building speed. If you will b ``` An example for running python tests would be ``` -ci/build.py --platform build_ubuntu_cpu_mkldnn /work/runtime_functions.sh unittest_ubuntu_python3_cpu PYTHONPATH=./python/ nosetests-2.7 tests/python/unittest +ci/build.py --platform build_ubuntu_cpu_mkldnn /work/runtime_functions.sh unittest_ubuntu_python3_cpu PYTHONPATH=./python/ pytest tests/python/unittest ``` diff --git a/tests/jenkins/run_test.sh b/tests/jenkins/run_test.sh index 5ded74291f25..48bb4da53fc4 100755 --- a/tests/jenkins/run_test.sh +++ b/tests/jenkins/run_test.sh @@ -42,16 +42,10 @@ export MXNET_ENGINE_INFO=false export PYTHONPATH=$(pwd)/python echo "BUILD python_test" -nosetests --verbose tests/python/unittest || exit -1 -nosetests --verbose tests/python/gpu/test_operator_gpu.py || exit -1 -nosetests --verbose tests/python/gpu/test_forward.py || exit -1 -nosetests --verbose tests/python/train || exit -1 - -echo "BUILD python3_test" -nosetests3 --verbose tests/python/unittest || exit -1 -nosetests3 --verbose tests/python/gpu/test_operator_gpu.py || exit -1 -nosetests3 --verbose tests/python/gpu/test_forward.py || exit -1 -nosetests3 --verbose tests/python/train || exit -1 +pytest --verbose tests/python/unittest || exit -1 +pytest --verbose tests/python/gpu/test_operator_gpu.py || exit -1 +pytest --verbose tests/python/gpu/test_forward.py || exit -1 +pytest --verbose tests/python/train || exit -1 echo "BUILD scala_test" export PATH=$PATH:/opt/apache-maven/bin diff --git a/tests/jenkins/run_test_amzn_linux_gpu.sh b/tests/jenkins/run_test_amzn_linux_gpu.sh index 57d9c7884088..a257b9684ba0 100755 --- a/tests/jenkins/run_test_amzn_linux_gpu.sh +++ b/tests/jenkins/run_test_amzn_linux_gpu.sh @@ -53,12 +53,8 @@ export MXNET_ENGINE_INFO=false export PYTHONPATH=${PWD}/python echo "BUILD python_test" -nosetests --verbose tests/python/unittest -nosetests --verbose tests/python/train - -echo "BUILD python3_test" -nosetests3 --verbose tests/python/unittest -nosetests3 --verbose tests/python/train +pytest --verbose tests/python/unittest +pytest --verbose tests/python/train #echo "BUILD julia_test" #export MXNET_HOME="${PWD}" diff --git a/tests/jenkins/run_test_ubuntu.sh b/tests/jenkins/run_test_ubuntu.sh index 0459d2cc8ec5..9c3d3c55c852 100755 --- a/tests/jenkins/run_test_ubuntu.sh +++ b/tests/jenkins/run_test_ubuntu.sh @@ -54,16 +54,10 @@ make -j$(nproc) export PYTHONPATH=${PWD}/python echo "BUILD python_test" -nosetests --verbose tests/python/unittest || exit 1 -nosetests --verbose tests/python/gpu/test_operator_gpu.py || exit 1 -nosetests --verbose tests/python/gpu/test_forward.py || exit 1 -nosetests --verbose tests/python/train || exit 1 - -echo "BUILD python3_test" -nosetests3 --verbose tests/python/unittest || exit 1 -nosetests3 --verbose tests/python/gpu/test_operator_gpu.py || exit 1 -nosetests3 --verbose tests/python/gpu/test_forward.py || exit 1 -nosetests3 --verbose tests/python/train || exit 1 +pytest --verbose tests/python/unittest || exit 1 +pytest --verbose tests/python/gpu/test_operator_gpu.py || exit 1 +pytest --verbose tests/python/gpu/test_forward.py || exit 1 +pytest --verbose tests/python/train || exit 1 echo "BUILD scala_test" export PATH=$PATH:/opt/apache-maven/bin diff --git a/tests/nightly/broken_link_checker_test/test_broken_links.py b/tests/nightly/broken_link_checker_test/test_broken_links.py old mode 100755 new mode 100644 diff --git a/tests/nightly/compilation_warnings/process_output.py b/tests/nightly/compilation_warnings/process_output.py old mode 100755 new mode 100644 diff --git a/tests/nightly/estimator/test_estimator_cnn.py b/tests/nightly/estimator/test_estimator_cnn.py index af519536dbed..0d113cdf4984 100644 --- a/tests/nightly/estimator/test_estimator_cnn.py +++ b/tests/nightly/estimator/test_estimator_cnn.py @@ -155,7 +155,3 @@ def test_estimator_gpu(): assert acc.get()[1] > 0.80 - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/nightly/estimator/test_sentiment_rnn.py b/tests/nightly/estimator/test_sentiment_rnn.py index ab124ba95db3..367c69b88a0b 100644 --- a/tests/nightly/estimator/test_sentiment_rnn.py +++ b/tests/nightly/estimator/test_sentiment_rnn.py @@ -280,7 +280,3 @@ def test_estimator_gpu(): assert acc.get()[1] > 0.70 - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/nightly/test_kvstore.py b/tests/nightly/test_kvstore.py old mode 100755 new mode 100644 diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index 5fb0ff81da6b..0f0b373409b9 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -28,7 +28,6 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor from mxnet import gluon, nd from common import with_seed, with_post_test_cleanup -from nose.tools import with_setup import unittest # dimension constants @@ -469,7 +468,7 @@ def check_col2im(): assert res.shape[2] == 2 assert res.shape[3] == 2 assert res.shape[4] == 1 - + def check_embedding(): data = nd.random_normal(shape=(LARGE_TENSOR_SHAPE, 1)) weight = nd.random_normal(shape=(LARGE_TENSOR_SHAPE, 1)) @@ -480,7 +479,7 @@ def check_embedding(): assert out.shape[0] == LARGE_TENSOR_SHAPE assert out.shape[1] == 1 - + def check_spatial_transformer(): data = nd.random_normal(shape=(2, 2**29, 1, 6)) loc = nd.random_normal(shape=(2, 6)) @@ -495,7 +494,7 @@ def check_spatial_transformer(): assert res.shape[1] == 536870912 assert res.shape[2] == 2 assert res.shape[3] == 6 - + def check_ravel(): data = nd.random_normal(shape=(2, LARGE_TENSOR_SHAPE)) shape = (2, 10) @@ -530,7 +529,7 @@ def check_multi_lars(): # Trigger lazy evaluation of the output NDArray and ensure that it has been filled assert type(out[0, 0].asscalar()).__name__ == 'float32' - + def check_rnn(): data = nd.random_normal(shape=(RNN_LARGE_TENSOR, 4, 4)) parameters_relu_tanh = nd.random_normal(shape=(7,)) @@ -547,10 +546,10 @@ def check_rnn(): out_relu = nd.RNN(data=data, parameters=parameters_relu_tanh, state=state, mode=mode_relu, state_size=state_size, num_layers=num_layers) - + out_tanh = nd.RNN(data=data, parameters=parameters_relu_tanh, state=state, mode=mode_tanh, state_size=state_size, num_layers=num_layers) - + out_lstm = nd.RNN(data=data, parameters=parameters_lstm, state=state, mode=mode_lstm, state_cell=state_cell, state_size=state_size, num_layers=num_layers) @@ -1546,7 +1545,7 @@ def check_logical_xor(a, b): def create_input_for_rounding_ops(): # Creates an vector with values (-LARGE_X/2 .... -2, -1, 0, 1, 2, .... , LARGE_X/2-1) # then divides each element by 2 i.e (-LARGE_X/4 .... -1, -0.5, 0, 0.5, 1, .... , LARGE_X/4-1) - # and finally broadcasts to + # and finally broadcasts to inp = nd.arange(-LARGE_X//2, LARGE_X//2, dtype=np.float64).reshape(1, LARGE_X) inp = inp/2 inp = nd.broadcast_to(inp, (SMALL_Y, LARGE_X)) @@ -1559,7 +1558,7 @@ def assert_correctness_of_rounding_ops(output, mid, expected_vals): for i in range(len(output_idx_to_inspect)): assert output[1][output_idx_to_inspect[i]] == expected_vals[i] - # TODO(access2rohit): merge similar tests in large vector and array into one file. + # TODO(access2rohit): merge similar tests in large vector and array into one file. def check_rounding_ops(): x = create_input_for_rounding_ops() def check_ceil(): @@ -1819,7 +1818,3 @@ def test_sparse_dot(): assert out.asnumpy()[0][0] == 2 assert out.shape == (2, 2) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/nightly/test_large_vector.py b/tests/nightly/test_large_vector.py index bbad75627769..57fecaafc0ac 100644 --- a/tests/nightly/test_large_vector.py +++ b/tests/nightly/test_large_vector.py @@ -28,7 +28,6 @@ from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, create_vector from mxnet import gluon, nd from tests.python.unittest.common import with_seed -from nose.tools import with_setup import unittest # dimension constants @@ -370,7 +369,7 @@ def check_slice_axis(): def check_gather(): arr = mx.nd.ones(LARGE_X) - # Passing dtype=np.int64 since randomly generated indices are + # Passing dtype=np.int64 since randomly generated indices are # very large that exceeds int32 limits. idx = mx.nd.random.randint(0, LARGE_X, 10, dtype=np.int64) # Calls gather_nd internally @@ -1063,7 +1062,3 @@ def check_minimum(): check_maximum() check_minimum() - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/nightly/test_np_random.py b/tests/nightly/test_np_random.py index 09ebdad90bd2..63ac4b9db60e 100644 --- a/tests/nightly/test_np_random.py +++ b/tests/nightly/test_np_random.py @@ -173,7 +173,3 @@ def test_np_laplace(): generator_mx_np = lambda x: np.random.laplace(loc, scale, size=x, ctx=ctx, dtype=dtype).asnumpy() verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/nightly/test_optimizer.py b/tests/nightly/test_optimizer.py index c4e264b79b65..0a87368d991e 100644 --- a/tests/nightly/test_optimizer.py +++ b/tests/nightly/test_optimizer.py @@ -88,6 +88,3 @@ def test_lars(): accuracy = acc.get()[1] assert accuracy > 0.98, "LeNet-5 training accuracy on MNIST was too low" -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_contrib_amp.py b/tests/python/gpu/test_contrib_amp.py index 527f8534969c..398dfc394eff 100644 --- a/tests/python/gpu/test_contrib_amp.py +++ b/tests/python/gpu/test_contrib_amp.py @@ -24,14 +24,14 @@ import collections import ctypes import mxnet.contrib.amp as amp -from nose.tools import assert_raises +import pytest from mxnet.test_utils import set_default_context, download_model, same_symbol_structure from mxnet.gluon.model_zoo.vision import get_model from mxnet.gluon import SymbolBlock, nn, rnn from mxnet.contrib.amp import amp curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import with_seed, teardown, assert_raises_cudnn_not_satisfied +from common import with_seed, teardown_module, assert_raises_cudnn_not_satisfied sys.path.insert(0, os.path.join(curr_path, '../train')) from test_bucketing import train_model set_default_context(mx.gpu(0)) @@ -119,18 +119,18 @@ def check_amp_convert_symbol(): "convert_symbol generating wrong computation graph" # convert_symbol called with incorrect inputs - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="float16", target_dtype_ops=["FullyConnected"], fp32_ops=["elemwise_add"]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="float16", target_dtype_ops=["FullyConnected"], fp32_ops=["Activation"], conditional_fp32_ops=[('Activation', 'act_type', ['selu'])]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="float16", target_dtype_ops=["Activation"], fp32_ops=["Activation"], conditional_fp32_ops=[('Activation', 'act_type', ['selu'])]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="float16", target_dtype_ops=["FullyConnected"], fp32_ops=["FullyConnected"]) @@ -484,7 +484,3 @@ def test_fp16_casting(): out = mx.sym.split(concat_res, axis=1, num_outputs=2) final_res = amp.convert_symbol(out) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_deferred_compute_gpu.py b/tests/python/gpu/test_deferred_compute_gpu.py index 7503c7ba102e..9802d2b57d24 100644 --- a/tests/python/gpu/test_deferred_compute_gpu.py +++ b/tests/python/gpu/test_deferred_compute_gpu.py @@ -24,10 +24,6 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) # We import all tests from ../unittest/test_deferred_compute.py -# They will be detected by nose, as long as the current file has a different filename +# They will be detected by test framework, as long as the current file has a different filename from test_deferred_compute import * - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_forward.py b/tests/python/gpu/test_forward.py index 02b0256024d3..2ec5ee262e0c 100644 --- a/tests/python/gpu/test_forward.py +++ b/tests/python/gpu/test_forward.py @@ -22,7 +22,7 @@ from mxnet.test_utils import * curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.gluon import utils import tarfile diff --git a/tests/python/gpu/test_fusion.py b/tests/python/gpu/test_fusion.py index c0a8bdbb0807..f3539c5d32f7 100644 --- a/tests/python/gpu/test_fusion.py +++ b/tests/python/gpu/test_fusion.py @@ -310,6 +310,3 @@ def test_fusion_reshape_executor(): out = f.forward(is_train=False, data1=data, data2=data) assert out[0].sum().asscalar() == 150 -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_gluon_contrib_gpu.py b/tests/python/gpu/test_gluon_contrib_gpu.py index 348e9f77acc8..9b43c797a252 100644 --- a/tests/python/gpu/test_gluon_contrib_gpu.py +++ b/tests/python/gpu/test_gluon_contrib_gpu.py @@ -85,6 +85,3 @@ def test_ModulatedDeformableConvolution(): y = net(x) y.backward() -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_gluon_gpu.py b/tests/python/gpu/test_gluon_gpu.py index 7e90854d923f..4a398d1c6a4c 100644 --- a/tests/python/gpu/test_gluon_gpu.py +++ b/tests/python/gpu/test_gluon_gpu.py @@ -30,7 +30,7 @@ curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, run_in_spawned_process +from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, run_in_spawned_process from test_gluon import * from test_loss import * from test_gluon_rnn import * @@ -639,7 +639,3 @@ def test_gemms_true_fp16(): atol=atol, rtol=rtol) os.environ["MXNET_FC_TRUE_FP16"] = "0" - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_gluon_model_zoo_gpu.py b/tests/python/gpu/test_gluon_model_zoo_gpu.py index 6f559db62808..ac50bb86e7ab 100644 --- a/tests/python/gpu/test_gluon_model_zoo_gpu.py +++ b/tests/python/gpu/test_gluon_model_zoo_gpu.py @@ -27,7 +27,7 @@ import unittest curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) @@ -180,6 +180,3 @@ def test_training(): gpu_param = gpu_params.get(k) assert_almost_equal(cpu_param.data(), gpu_param.data(), rtol=1e-3, atol=1e-3) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_gluon_transforms.py b/tests/python/gpu/test_gluon_transforms.py index e777f0bb90e2..23addbffc20f 100644 --- a/tests/python/gpu/test_gluon_transforms.py +++ b/tests/python/gpu/test_gluon_transforms.py @@ -27,7 +27,7 @@ from mxnet.test_utils import almost_equal, same curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import assertRaises, setup_module, with_seed, teardown +from common import assertRaises, setup_module, with_seed, teardown_module from test_gluon_data_vision import test_to_tensor, test_normalize, test_crop_resize set_default_context(mx.gpu(0)) diff --git a/tests/python/gpu/test_kvstore_gpu.py b/tests/python/gpu/test_kvstore_gpu.py index a986f70d7525..f83220ad2e61 100644 --- a/tests/python/gpu/test_kvstore_gpu.py +++ b/tests/python/gpu/test_kvstore_gpu.py @@ -24,7 +24,7 @@ from mxnet.test_utils import assert_almost_equal, default_context, EnvManager curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module shape = (4, 4) keys = [5, 7, 11] @@ -40,12 +40,12 @@ def init_kv_with_str(stype='default', kv_type='local'): return kv # 1. Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump. -# 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-mkldnn-gpu have error +# 2. Test seed 1155716252 (module seed 1032824746) resulted in py3-mkldnn-gpu have error # src/operator/nn/mkldnn/mkldnn_base.cc:567: Check failed: similar # Both of them are not reproducible, so this test is back on random seeds. @with_seed() @unittest.skipIf(mx.context.num_gpus() < 2, "test_rsp_push_pull needs more than 1 GPU") -@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189") +@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14189") def test_rsp_push_pull(): def check_rsp_push_pull(kv_type, sparse_pull, is_push_cpu=True): kv = init_kv_with_str('row_sparse', kv_type) @@ -134,6 +134,3 @@ def test_rsp_push_pull_large_rowid(): kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64')) assert(out.indices.shape[0] == num_rows) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_numpy_fallback.py b/tests/python/gpu/test_numpy_fallback.py index 1499e1b15c57..71ca09c5ab2e 100644 --- a/tests/python/gpu/test_numpy_fallback.py +++ b/tests/python/gpu/test_numpy_fallback.py @@ -26,7 +26,6 @@ import mxnet as mx import scipy.stats as ss import scipy.special as scipy_special -from nose.tools import assert_raises from mxnet import np, npx from mxnet.base import MXNetError from mxnet.test_utils import assert_almost_equal, use_np, set_default_context @@ -109,7 +108,3 @@ def empty_ret_func(): # does not support functions with no return values assertRaises(ValueError, empty_ret_func) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_operator_gpu.py b/tests/python/gpu/test_operator_gpu.py index a017b8ce59c2..82cefcd2f2b1 100644 --- a/tests/python/gpu/test_operator_gpu.py +++ b/tests/python/gpu/test_operator_gpu.py @@ -23,14 +23,14 @@ import mxnet as mx import numpy as np import unittest -from nose.tools import assert_raises +import pytest from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal, assert_allclose from mxnet.base import MXNetError from mxnet import autograd curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied +from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied from common import run_in_spawned_process from test_operator import * from test_numpy_ndarray import * @@ -1342,7 +1342,7 @@ def test_bilinear_resize_op(): check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, height=10, width=5, align_corners=False) - check_consistency(sym, ctx_list) + check_consistency(sym, ctx_list) sym = mx.sym.contrib.BilinearResize2D(data, None, scale_height=2, scale_width=0.5, mode='odd_scale', align_corners=True) check_consistency(sym, ctx_list) @@ -2274,7 +2274,7 @@ def test_kernel_error_checking(): def test_incorrect_gpu(): # Try setting dev_id to a really big number - assert_raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001)) + pytest.raises(MXNetError, mx.nd.ones, (2,2), ctx=mx.gpu(100001)) @with_seed() def test_batchnorm_backwards_notrain(): @@ -2526,7 +2526,7 @@ def run_math(op, shape, dtype="float32", check_value=True): def test_math(): ops = ['log', 'erf', 'square'] check_value= True - shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]] + shape_lst = [[1000], [100,1000], [10,100,100], [10,100,100,100]] dtypes = ["float32", "float64"] for shape in shape_lst: for dtype in dtypes: @@ -2548,6 +2548,3 @@ def test_arange_like_dtype(): for v in out: assert v.dtype == t -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_predictor.py b/tests/python/gpu/test_predictor.py index 4838a76c7cb1..592733a90174 100644 --- a/tests/python/gpu/test_predictor.py +++ b/tests/python/gpu/test_predictor.py @@ -31,7 +31,7 @@ from mxnet.contrib.amp import amp from mxnet.base import NDArrayHandle, py_str sys.path.insert(0, os.path.join(curr_path, '../unittest')) -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module @with_seed() def test_predictor_with_dtype(): @@ -122,7 +122,3 @@ def test_predictor_amp(): cast_optional_params=True) compare_module_cpredict(result_sym, result_arg_params, result_aux_params, monitor_callback=True) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/gpu/test_tvm_bridge.py b/tests/python/gpu/test_tvm_bridge.py index 5c87536bdbae..7a4339c6ae94 100644 --- a/tests/python/gpu/test_tvm_bridge.py +++ b/tests/python/gpu/test_tvm_bridge.py @@ -61,7 +61,3 @@ def check(target, dtype): "float32", "float64"]: check(tgt, dtype) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/mkl/test_bf16_operator.py b/tests/python/mkl/test_bf16_operator.py index b275c96bbb67..888b5d20b908 100644 --- a/tests/python/mkl/test_bf16_operator.py +++ b/tests/python/mkl/test_bf16_operator.py @@ -25,7 +25,6 @@ import ctypes import itertools import mxnet.contrib.amp as amp -from nose.tools import assert_raises from mxnet.test_utils import set_default_context, download_model, same_symbol_structure, assert_almost_equal_with_err, rand_shape_nd from mxnet.gluon.model_zoo.vision import get_model from mxnet.gluon import SymbolBlock, nn, rnn @@ -55,7 +54,7 @@ def check_operator_accuracy(sym_fp32, sym_bf16, data_shape, num_input_data=1, bf the relative threshold atol: float the absolute threshold - etol: float + etol: float The error rate threshold, allow a small amount of value not consistent between bf16 and fp32 """ if not isinstance(data_shape, tuple): @@ -105,7 +104,7 @@ def check_operator_accuracy(sym_fp32, sym_bf16, data_shape, num_input_data=1, bf exe_bf16.arg_dict[arg_name][:] = arg_params_fp32[arg_name] else: exe_bf16.arg_dict[arg_name][:] = mx.nd.amp_cast(arg_params_fp32[arg_name], dtype=bfloat16) - + for aux_name in aux_names: if bf16_use_fp32_params: exe_bf16.aux_dict[aux_name][:] = aux_params_fp32[aux_name] @@ -169,7 +168,7 @@ def test_bf16_pooling(): pool_conventions = ["full", "valid"] for new_params in itertools.product(data_shapes, pool_types, pool_conventions): pool_params.update({"pool_type": new_params[1], "pooling_convention": new_params[2]}) - + data_sym_fp32 = mx.sym.Variable(name='data') data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) pool_fp32 = mx.sym.Pooling(data_sym_fp32, **pool_params) @@ -230,7 +229,7 @@ def test_bf16_abs(): data_sym_bf16 = mx.sym.Variable(name='data', dtype=bfloat16) sym_fp32 = mx.sym.abs(data_sym_fp32) sym_bf16 = mx.sym.abs(data_sym_bf16) - + check_operator_accuracy(sym_fp32, sym_bf16, data_shape, bf16_use_fp32_params=True) @with_seed() @@ -285,6 +284,3 @@ def test_bf16_fallback(): conv_bf16 = mx.sym.Convolution(data_sym_bf16, **conv_params) check_operator_accuracy(sym_fp32=conv_fp32, sym_bf16=conv_bf16, data_shape=(3, 32, 28, 28, 4), bf16_use_fp32_params=False) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/mkl/test_contrib_amp.py b/tests/python/mkl/test_contrib_amp.py index 5d5774099255..ec88851751ff 100644 --- a/tests/python/mkl/test_contrib_amp.py +++ b/tests/python/mkl/test_contrib_amp.py @@ -24,7 +24,7 @@ import collections import ctypes import mxnet.contrib.amp as amp -from nose.tools import assert_raises +import pytest from mxnet.test_utils import set_default_context, download_model, same_symbol_structure, assert_almost_equal from mxnet.gluon.model_zoo.vision import get_model from mxnet.gluon import SymbolBlock, nn, rnn @@ -117,18 +117,18 @@ def check_amp_convert_symbol(): "convert_symbol generating wrong computation graph" # convert_symbol called with incorrect inputs - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="bfloat16", target_dtype_ops=["FullyConnected"], fp32_ops=["elemwise_add"]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="bfloat16", target_dtype_ops=["FullyConnected"], fp32_ops=["Activation"], conditional_fp32_ops=[('Activation', 'act_type', ['selu'])]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="bfloat16", target_dtype_ops=["Activation"], fp32_ops=["Activation"], conditional_fp32_ops=[('Activation', 'act_type', ['selu'])]) - assert_raises(AssertionError, amp.convert_symbol, res, + pytest.raises(AssertionError, amp.convert_symbol, res, target_dtype="bfloat16", target_dtype_ops=["FullyConnected"], fp32_ops=["FullyConnected"]) @@ -495,7 +495,3 @@ def test_bf16_casting(): exe = final_res.simple_bind(ctx=mx.cpu(), data=(1, 2), data2=(1, 2)) assert exe.arg_arrays[0].dtype == bfloat16 - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py index b52bb03a80c8..59b860755750 100644 --- a/tests/python/mkl/test_mkldnn.py +++ b/tests/python/mkl/test_mkldnn.py @@ -642,6 +642,3 @@ def check_elemwise_add_training(stype): for stype in stypes: check_elemwise_add_training(stype) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/mkl/test_quantization_mkldnn.py b/tests/python/mkl/test_quantization_mkldnn.py index 8ba2f2b01feb..f2432175720a 100644 --- a/tests/python/mkl/test_quantization_mkldnn.py +++ b/tests/python/mkl/test_quantization_mkldnn.py @@ -25,7 +25,7 @@ from test_quantization import * if __name__ == '__main__': - import nose - nose.runmodule() + import pytest + pytest.main() del os.environ['ENABLE_MKLDNN_QUANTIZATION_TEST'] del os.environ['MXNET_SUBGRAPH_BACKEND'] diff --git a/tests/python/mkl/test_subgraph.py b/tests/python/mkl/test_subgraph.py index 65b73e438ea6..862fd9078c69 100644 --- a/tests/python/mkl/test_subgraph.py +++ b/tests/python/mkl/test_subgraph.py @@ -999,7 +999,3 @@ def test_quantized_fc_bias_overflow(): helper_quantized_fc_bias_overflow(-1e-6, +1e-6, -1e-6, +1e-6) helper_quantized_fc_bias_overflow(0, 0, 0, 0) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/profiling/test_nvtx.py b/tests/python/profiling/test_nvtx.py index 507b438e300d..a80e33ec03b0 100644 --- a/tests/python/profiling/test_nvtx.py +++ b/tests/python/profiling/test_nvtx.py @@ -46,7 +46,3 @@ def test_nvtx_ranges_present_in_profile(): # Verify that we have some expected output from the engine. assert "Range \"WaitForVar\"" in profiler_output - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index 8c6100d50765..d2179759ef8f 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -1253,7 +1253,3 @@ def get_threshold(nd): assert 'layer1' in th_dict assert_almost_equal(np.array([th_dict['layer1'][1]]), expected_threshold, rtol=1e-2, atol=1e-4) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/quantization_gpu/test_quantization_gpu.py b/tests/python/quantization_gpu/test_quantization_gpu.py index 4f2d70effd49..0f14fa1ac961 100644 --- a/tests/python/quantization_gpu/test_quantization_gpu.py +++ b/tests/python/quantization_gpu/test_quantization_gpu.py @@ -25,8 +25,3 @@ from test_quantization import * set_default_context(mx.gpu(0)) - - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/tensorrt/lenet5_train.py b/tests/python/tensorrt/lenet5_train.py old mode 100755 new mode 100644 diff --git a/tests/python/tensorrt/test_cvnets.py b/tests/python/tensorrt/test_cvnets.py index 4b8eb48e926c..99312d76dc7a 100644 --- a/tests/python/tensorrt/test_cvnets.py +++ b/tests/python/tensorrt/test_cvnets.py @@ -167,8 +167,3 @@ def test_tensorrt_on_cifar_resnets(batch_size=32, tolerance=0.1, num_workers=1): finally: mx.contrib.tensorrt.set_use_fp16(original_use_fp16) - -if __name__ == '__main__': - import nose - - nose.runmodule() diff --git a/tests/python/tensorrt/test_ops.py b/tests/python/tensorrt/test_ops.py index af1c453111d9..dfbbb8e8883c 100644 --- a/tests/python/tensorrt/test_ops.py +++ b/tests/python/tensorrt/test_ops.py @@ -512,6 +512,3 @@ def test_dropout(): sym = mx.sym.Dropout(data, p=0.7, mode=mode, axes=(0,)) check_unsupported_single_sym(sym) -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/tensorrt/test_resnet18.py b/tests/python/tensorrt/test_resnet18.py index 9fd99abb121b..e146423e257d 100644 --- a/tests/python/tensorrt/test_resnet18.py +++ b/tests/python/tensorrt/test_resnet18.py @@ -69,6 +69,3 @@ def test_tensorrt_resnet18_feature_vect(): finally: mx.contrib.tensorrt.set_use_fp16(original_precision_value) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/tensorrt/test_tensorrt_lenet5.py b/tests/python/tensorrt/test_tensorrt_lenet5.py index 78f41ca53909..a34b634121d2 100644 --- a/tests/python/tensorrt/test_tensorrt_lenet5.py +++ b/tests/python/tensorrt/test_tensorrt_lenet5.py @@ -116,6 +116,3 @@ def test_tensorrt_inference(): """Absolute diff. between MXNet & TensorRT accuracy (%f) exceeds threshold (%f): MXNet = %f, TensorRT = %f""" % (absolute_accuracy_diff, epsilon, mx_pct, trt_pct) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/common.py b/tests/python/unittest/common.py index ab2d191f1360..fda43673003b 100644 --- a/tests/python/unittest/common.py +++ b/tests/python/unittest/common.py @@ -29,9 +29,33 @@ import models from contextlib import contextmanager -from nose.tools import make_decorator, assert_raises +import pytest import tempfile +def make_decorator(func): + """ + Wraps a test decorator so as to properly replicate metadata + of the decorated function, including test tool additional stuff + (namely, setup and teardown). + """ + def decorate(newfunc): + if hasattr(func, 'compat_func_name'): + name = func.compat_func_name + else: + name = func.__name__ + newfunc.__dict__ = func.__dict__ + newfunc.__doc__ = func.__doc__ + newfunc.__module__ = func.__module__ + if not hasattr(newfunc, 'compat_co_firstlineno'): + newfunc.compat_co_firstlineno = func.__code__.co_firstlineno + try: + newfunc.__name__ = name + except TypeError: + # can't set func name in 2.3 + newfunc.compat_func_name = name + return newfunc + return decorate + def assertRaises(expected_exception, func, *args, **kwargs): try: func(*args, **kwargs) @@ -43,7 +67,7 @@ def assertRaises(expected_exception, func, *args, **kwargs): def default_logger(): - """A logger used to output seed information to nosetests logs.""" + """A logger used to output seed information to logs.""" logger = logging.getLogger(__name__) # getLogger() lookups will return the same logger, but only add the handler once. if not len(logger.handlers): @@ -109,7 +133,7 @@ def less_than(version_left, version_right): left = version_left.split(".") right = version_right.split(".") - # 0 pad shortest version - e.g. + # 0 pad shortest version - e.g. # less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9") longest = max(len(left), len(right)) left.extend([0] * (longest - len(left))) @@ -131,7 +155,7 @@ def test_new(*args, **kwargs): if not cuxx_test_disabled or mx.context.current_context().device_type == 'cpu': orig_test(*args, **kwargs) else: - assert_raises((MXNetError, RuntimeError), orig_test, *args, **kwargs) + pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs) return test_new return test_helper @@ -154,7 +178,7 @@ def assert_raises_cuda_not_satisfied(min_version): def with_seed(seed=None): """ - A decorator for nosetests test functions that manages rng seeds. + A decorator for test functions that manages rng seeds. Parameters ---------- @@ -181,10 +205,10 @@ def test_not_ok_with_random_data(): can then set the environment variable MXNET_TEST_SEED to the value reported, then rerun the test with: - nosetests --verbose -s : + pytest --verbose --capture=no :: To run a test repeatedly, set MXNET_TEST_COUNT= in the environment. - To see the seeds of even the passing tests, add '--logging-level=DEBUG' to nosetests. + To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest. """ def test_helper(orig_test): @make_decorator(orig_test) @@ -206,7 +230,7 @@ def test_new(*args, **kwargs): mx.random.seed(this_test_seed) random.seed(this_test_seed) logger = default_logger() - # 'nosetests --logging-level=DEBUG' shows this msg even with an ensuing core dump. + # 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump. test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else '' test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}' ' to reproduce.').format(test_count_msg, this_test_seed) @@ -226,12 +250,12 @@ def test_new(*args, **kwargs): def setup_module(): """ - A function with a 'magic name' executed automatically before each nosetests module + A function with a 'magic name' executed automatically before each pytest module (file of tests) that helps reproduce a test segfault by setting and outputting the rng seeds. The segfault-debug procedure on a module called test_module.py is: - 1. run "nosetests --verbose test_module.py". A seg-faulting output might be: + 1. run "pytest --verbose test_module.py". A seg-faulting output might be: [INFO] np, mx and python random seeds = 4018804151 test_module.test1 ... ok @@ -239,7 +263,7 @@ def setup_module(): 2. Copy the module-starting seed into the next command, then run: - MXNET_MODULE_SEED=4018804151 nosetests --logging-level=DEBUG --verbose test_module.py + MXNET_MODULE_SEED=4018804151 pytest --logging-level=DEBUG --verbose test_module.py Output might be: @@ -251,7 +275,7 @@ def setup_module(): Illegal instruction (core dumped) 3. Copy the segfaulting-test seed into the command: - MXNET_TEST_SEED=1435005594 nosetests --logging-level=DEBUG --verbose test_module.py:test2 + MXNET_TEST_SEED=1435005594 pytest --logging-level=DEBUG --verbose test_module.py:test2 Output might be: [INFO] np, mx and python random seeds = 2481884723 @@ -301,9 +325,9 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, traceback): shutil.rmtree(self._dirname) -def teardown(): +def teardown_module(): """ - A function with a 'magic name' executed automatically after each nosetests test module. + A function with a 'magic name' executed automatically after each pytest test module. It waits for all operations in one file to finish before carrying on the next. """ @@ -373,4 +397,4 @@ def run_in_spawned_process(func, env, *args): finally: os.environ.clear() os.environ.update(orig_environ) - return True \ No newline at end of file + return True diff --git a/tests/python-pytest/onnx/README.md b/tests/python/unittest/onnx/README.md similarity index 100% rename from tests/python-pytest/onnx/README.md rename to tests/python/unittest/onnx/README.md diff --git a/tests/python-pytest/onnx/backend.py b/tests/python/unittest/onnx/backend.py similarity index 100% rename from tests/python-pytest/onnx/backend.py rename to tests/python/unittest/onnx/backend.py diff --git a/tests/python-pytest/onnx/backend_rep.py b/tests/python/unittest/onnx/backend_rep.py similarity index 100% rename from tests/python-pytest/onnx/backend_rep.py rename to tests/python/unittest/onnx/backend_rep.py diff --git a/tests/python-pytest/onnx/backend_test.py b/tests/python/unittest/onnx/backend_test.py old mode 100755 new mode 100644 similarity index 100% rename from tests/python-pytest/onnx/backend_test.py rename to tests/python/unittest/onnx/backend_test.py diff --git a/tests/python-pytest/onnx/mxnet_export_test.py b/tests/python/unittest/onnx/mxnet_export_test.py similarity index 100% rename from tests/python-pytest/onnx/mxnet_export_test.py rename to tests/python/unittest/onnx/mxnet_export_test.py diff --git a/tests/python-pytest/onnx/test_cases.py b/tests/python/unittest/onnx/test_cases.py similarity index 100% rename from tests/python-pytest/onnx/test_cases.py rename to tests/python/unittest/onnx/test_cases.py diff --git a/tests/python-pytest/onnx/test_models.py b/tests/python/unittest/onnx/test_models.py similarity index 100% rename from tests/python-pytest/onnx/test_models.py rename to tests/python/unittest/onnx/test_models.py diff --git a/tests/python-pytest/onnx/test_node.py b/tests/python/unittest/onnx/test_node.py similarity index 100% rename from tests/python-pytest/onnx/test_node.py rename to tests/python/unittest/onnx/test_node.py diff --git a/tests/python/unittest/test_autograd.py b/tests/python/unittest/test_autograd.py index 61955f034a71..69b61b47d4c9 100644 --- a/tests/python/unittest/test_autograd.py +++ b/tests/python/unittest/test_autograd.py @@ -20,7 +20,7 @@ from mxnet.ndarray import zeros_like from mxnet.autograd import * from mxnet.test_utils import * -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.test_utils import EnvManager @@ -467,7 +467,3 @@ def test_gradient(): dx.backward() assert abs(x.grad.asscalar() - 2.71828175) < 1e-7 - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_base.py b/tests/python/unittest/test_base.py index 3189729e1d10..07d429589ba2 100644 --- a/tests/python/unittest/test_base.py +++ b/tests/python/unittest/test_base.py @@ -17,7 +17,6 @@ import mxnet as mx from mxnet.base import data_dir -from nose.tools import * import os import unittest import logging @@ -47,4 +46,3 @@ def test_data_dir(self,): del os.environ['MXNET_HOME'] self.assertEqual(data_dir(), prev_data_dir) - diff --git a/tests/python/unittest/test_contrib_autograd.py b/tests/python/unittest/test_contrib_autograd.py index 1c878e322e7c..c376eb7794f8 100644 --- a/tests/python/unittest/test_contrib_autograd.py +++ b/tests/python/unittest/test_contrib_autograd.py @@ -18,7 +18,7 @@ import mxnet.ndarray as nd from mxnet.contrib.autograd import * from mxnet.test_utils import * -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module def autograd_assert(*args, **kwargs): func = kwargs["func"] @@ -190,7 +190,3 @@ def test_retain_grad(): raise AssertionError( "differentiating the same graph twice without retain_graph should fail") - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_control_flow.py b/tests/python/unittest/test_contrib_control_flow.py index a93c109d11df..6db1003fdd20 100644 --- a/tests/python/unittest/test_contrib_control_flow.py +++ b/tests/python/unittest/test_contrib_control_flow.py @@ -2168,6 +2168,3 @@ def test_foreach_with_unkown_dim(): _, output_shape, _ = outs.infer_shape_partial() assert_allclose((0, 3, 32, 32), output_shape[0]) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_hawkesll.py b/tests/python/unittest/test_contrib_hawkesll.py index a4b1d9de605f..8f02737e2d12 100644 --- a/tests/python/unittest/test_contrib_hawkesll.py +++ b/tests/python/unittest/test_contrib_hawkesll.py @@ -154,8 +154,3 @@ def test_hawkesll_backward_single_mark(): assert np.allclose(beta.grad.asnumpy().sum(), -0.05371582) - -if __name__ == "__main__": - import nose - - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_operator.py b/tests/python/unittest/test_contrib_operator.py index 476dfac24a61..717ce7f20f95 100644 --- a/tests/python/unittest/test_contrib_operator.py +++ b/tests/python/unittest/test_contrib_operator.py @@ -444,7 +444,3 @@ def test_modulated_deformable_convolution(): else: rtol, atol = 0.05, 1e-3 - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_optimizer.py b/tests/python/unittest/test_contrib_optimizer.py index 5f7c51f257b3..aae767202454 100644 --- a/tests/python/unittest/test_contrib_optimizer.py +++ b/tests/python/unittest/test_contrib_optimizer.py @@ -196,6 +196,3 @@ def run_adamw_test(nElem=1, aggregate=False): for nElem in range(6): run_adamw_test(nElem+1) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_stes_op.py b/tests/python/unittest/test_contrib_stes_op.py index 5864ec9db5b1..163ab4a7c9f8 100644 --- a/tests/python/unittest/test_contrib_stes_op.py +++ b/tests/python/unittest/test_contrib_stes_op.py @@ -132,6 +132,3 @@ def test_contrib_sign_ste(): check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=True, in_data=in_data) check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=False, in_data=in_data) -if __name__ == '__main__': - import nose - nose.runmodule() \ No newline at end of file diff --git a/tests/python/unittest/test_contrib_svrg_module.py b/tests/python/unittest/test_contrib_svrg_module.py index 79407d15fd7f..f135255753ce 100644 --- a/tests/python/unittest/test_contrib_svrg_module.py +++ b/tests/python/unittest/test_contrib_svrg_module.py @@ -307,7 +307,3 @@ def test_fit(): estimated_mse = 1e-5 assert metric.get()[1] < estimated_mse - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_svrg_optimizer.py b/tests/python/unittest/test_contrib_svrg_optimizer.py index f7d90d12872f..cb6fdcf1218a 100644 --- a/tests/python/unittest/test_contrib_svrg_optimizer.py +++ b/tests/python/unittest/test_contrib_svrg_optimizer.py @@ -95,7 +95,3 @@ def test_kvstore_init_aux_keys(): # updated with AssignmentOptimizer assert same(param_weight_full_init.asnumpy(), np.array([2, 2, 2])) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_contrib_text.py b/tests/python/unittest/test_contrib_text.py index 4072cc84f684..0778e5ace1f7 100644 --- a/tests/python/unittest/test_contrib_text.py +++ b/tests/python/unittest/test_contrib_text.py @@ -792,7 +792,3 @@ def test_get_and_pretrain_file_names(): assertRaises(KeyError, text.embedding.get_pretrained_file_names, 'unknown$$') - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_deferred_compute.py b/tests/python/unittest/test_deferred_compute.py index cebb6908768f..6d250d5fa468 100644 --- a/tests/python/unittest/test_deferred_compute.py +++ b/tests/python/unittest/test_deferred_compute.py @@ -19,11 +19,11 @@ import operator import numpy as np -from nose.tools import raises import mxnet as mx import mxnet._deferred_compute as dc from mxnet.base import MXNetError +import pytest def _all_same(arrays1, arrays2, message=''): @@ -213,7 +213,7 @@ def f(a, *, nd): _all_assert_dc(_dc_simple_setup, f) -@raises(MXNetError) # Should raise NotImplementedError https://github.com/apache/incubator-mxnet/issues/17522 +@pytest.mark.xfail(raises=MXNetError) # Should raise NotImplementedError https://github.com/apache/incubator-mxnet/issues/17522 def test_dc_inplace(): def f(a, *, nd): a[:5] = 0 @@ -245,7 +245,7 @@ def test_dc_get_symbol_called_twice(): assert sym2.list_inputs() == ['a'] -@raises(MXNetError) # Should raise ValueError https://github.com/apache/incubator-mxnet/issues/17522 +@pytest.mark.xfail(raises=MXNetError) # Should raise ValueError https://github.com/apache/incubator-mxnet/issues/17522 def test_dc_set_variable_called_twice(): a = mx.np.arange(10) dc.set_variable(a, mx.sym.var('a')) @@ -342,7 +342,7 @@ def f(a, idx, *, nd): _assert_dc(setup, f, mode=mode) -@raises(TypeError) # Advanced indexing +@pytest.mark.xfail(raises=TypeError) # Advanced indexing def test_dc_list_indexing(): def f(a, *, nd): assert nd is mx.np @@ -352,7 +352,7 @@ def f(a, *, nd): _assert_dc(_dc_simple_setup, f, mode=mode) -@raises(TypeError) # Advanced indexing +@pytest.mark.xfail(raises=TypeError) # Advanced indexing def test_dc_numpy_indexing(): def f(a, *, nd): assert nd is mx.np @@ -430,7 +430,7 @@ def forward(self, x): _assert_dc_gluon(_dc_gluon_simple_setup, net, numpy=True) -@raises(RuntimeError) +@pytest.mark.xfail(raises=RuntimeError) def test_dc_hybridblock_deferred_init_no_infer_shape(): class MyBlock(mx.gluon.HybridBlock): def __init__(self, *, prefix=None, params=None): @@ -496,7 +496,7 @@ def setup(*, nd): _assert_dc_gluon(setup, net, numpy=True) -@raises(RuntimeError) +@pytest.mark.xfail(raises=RuntimeError) def test_dc_hybridblock_symbolblock(): model = mx.gluon.nn.HybridSequential() model.add(mx.gluon.nn.Dense(128, activation='tanh')) @@ -530,7 +530,3 @@ def forward(self, x): _all_same([out], [out_hybrid]) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_dgl_graph.py b/tests/python/unittest/test_dgl_graph.py index 805adc2dac6f..89533fb119aa 100644 --- a/tests/python/unittest/test_dgl_graph.py +++ b/tests/python/unittest/test_dgl_graph.py @@ -240,6 +240,3 @@ def test_adjacency(): assert_array_equal(adj.indices, g.indices) assert_array_equal(adj.data, mx.nd.ones(shape=g.indices.shape)) -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_dlpack.py b/tests/python/unittest/test_dlpack.py index fb64f8d58831..46bdde7d0bcd 100644 --- a/tests/python/unittest/test_dlpack.py +++ b/tests/python/unittest/test_dlpack.py @@ -43,6 +43,3 @@ def from_dlpack_old(dlpack): z = from_dlpack_old(y) assert_almost_equal(x.asnumpy(), z.asnumpy(), rtol=1e-5, atol=1e-5) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_dynamic_shape.py b/tests/python/unittest/test_dynamic_shape.py index 1b043c73256d..b61fbeef6b74 100644 --- a/tests/python/unittest/test_dynamic_shape.py +++ b/tests/python/unittest/test_dynamic_shape.py @@ -48,7 +48,3 @@ def hybrid_forward(self, F, data, index): assert_almost_equal(result.asnumpy(), result_nd) assert_almost_equal(data.grad.asnumpy(), data_grad_nd) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_engine.py b/tests/python/unittest/test_engine.py index 61d94ddbf4ec..fafc6758d892 100644 --- a/tests/python/unittest/test_engine.py +++ b/tests/python/unittest/test_engine.py @@ -15,7 +15,6 @@ # specific language governing permissions and limitations # under the License. -import nose import mxnet as mx import os import unittest @@ -70,8 +69,3 @@ def test_engine_openmp_after_fork(): print("Child omp max threads: {}".format(omp_max_threads)) assert omp_max_threads == 1 - - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_engine_import.py b/tests/python/unittest/test_engine_import.py index 303f3ceb1dee..7675cf836999 100644 --- a/tests/python/unittest/test_engine_import.py +++ b/tests/python/unittest/test_engine_import.py @@ -25,7 +25,7 @@ def test_engine_import(): import mxnet - + engine_types = ['', 'NaiveEngine', 'ThreadedEngine', 'ThreadedEnginePerDevice'] for type in engine_types: @@ -35,7 +35,3 @@ def test_engine_import(): os.environ.pop('MXNET_ENGINE_TYPE', None) reload(mxnet) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_exc_handling.py b/tests/python/unittest/test_exc_handling.py index e3c333705260..8657bec1398c 100644 --- a/tests/python/unittest/test_exc_handling.py +++ b/tests/python/unittest/test_exc_handling.py @@ -18,11 +18,11 @@ import mxnet as mx import numpy as np from mxnet import gluon -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.gluon import nn from mxnet.base import MXNetError from mxnet.test_utils import assert_exception, default_context, set_default_context, use_np -from nose.tools import assert_raises +import pytest @with_seed() @@ -35,7 +35,7 @@ def imperative(exec_numpy=True): c.asnumpy() imperative(exec_numpy=False) - assert_raises(MXNetError, imperative, exec_numpy=True) + pytest.raises(MXNetError, imperative, exec_numpy=True) @with_seed() def test_exc_symbolic(): @@ -69,11 +69,11 @@ def symbolic(exec_backward=True, waitall=True): else: outputs[0].asnumpy() - assert_raises(MXNetError, symbolic, exec_backward=False) - assert_raises(MXNetError, symbolic, exec_backward=True) + pytest.raises(MXNetError, symbolic, exec_backward=False) + pytest.raises(MXNetError, symbolic, exec_backward=True) - assert_raises(MXNetError, symbolic, exec_backward=False, waitall=True) - assert_raises(MXNetError, symbolic, exec_backward=True, waitall=True) + pytest.raises(MXNetError, symbolic, exec_backward=False, waitall=True) + pytest.raises(MXNetError, symbolic, exec_backward=True, waitall=True) @with_seed() def test_exc_gluon(): @@ -93,9 +93,9 @@ def gluon(exec_wait=True, waitall=False): z.wait_to_read() gluon(exec_wait=False) - assert_raises(MXNetError, gluon, exec_wait=True) + pytest.raises(MXNetError, gluon, exec_wait=True) - assert_raises(MXNetError, gluon, waitall=True) + pytest.raises(MXNetError, gluon, waitall=True) @with_seed() def test_exc_multiple_waits(): @@ -152,8 +152,8 @@ def mutable_var_check(waitall=False): mx.nd.waitall() else: a.asnumpy() - assert_raises(MXNetError, mutable_var_check, waitall=False) - assert_raises(MXNetError, mutable_var_check, waitall=True) + pytest.raises(MXNetError, mutable_var_check, waitall=False) + pytest.raises(MXNetError, mutable_var_check, waitall=True) @with_seed() def test_multiple_waitalls(): @@ -189,16 +189,16 @@ def check_resize(): img = mx.nd.ones((1200, 1600, 3)) img = mx.image.imresize(img, 320, 320, interp=-1) img.asnumpy() - assert_raises(MXNetError, check_resize) + pytest.raises(MXNetError, check_resize) @with_seed() def test_np_reshape_exception(): a = mx.np.ones((10, 10)) a.reshape((-1,)).asnumpy() # Check no-raise - assert_raises(MXNetError, lambda: a.reshape((1,))) - assert_raises(MXNetError, lambda: mx.np.reshape(a, (1,))) - assert_raises(MXNetError, lambda: mx.np.reshape(a, (-1, 3))) + pytest.raises(MXNetError, lambda: a.reshape((1,))) + pytest.raises(MXNetError, lambda: mx.np.reshape(a, (1,))) + pytest.raises(MXNetError, lambda: mx.np.reshape(a, (-1, 3))) @with_seed() @@ -208,10 +208,6 @@ def test_np_random_incorrect_named_arguments(): for op_name in random_ops: op = getattr(mx.np.random, op_name, None) assert op is not None - assert_raises(TypeError, op, shape=()) - assert_raises(TypeError, op, shape=None) + pytest.raises(TypeError, op, shape=()) + pytest.raises(TypeError, op, shape=None) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_executor.py b/tests/python/unittest/test_executor.py index 2bc696fd4e43..300e4b2590c8 100644 --- a/tests/python/unittest/test_executor.py +++ b/tests/python/unittest/test_executor.py @@ -17,7 +17,7 @@ import numpy as np import mxnet as mx -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.test_utils import assert_almost_equal @@ -164,7 +164,3 @@ def test_reshape(): # weight ndarray is shared between exe and new_exe assert np.all(new_exe.arg_arrays[1].asnumpy() == 1) - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon.py b/tests/python/unittest/test_gluon.py index 453c31ebc5de..cca1cfdce381 100644 --- a/tests/python/unittest/test_gluon.py +++ b/tests/python/unittest/test_gluon.py @@ -27,11 +27,11 @@ from mxnet.ndarray.ndarray import _STORAGE_TYPE_STR_TO_ID from mxnet.test_utils import use_np import mxnet.numpy as _mx_np -from common import (setup_module, with_seed, assertRaises, teardown, +from common import (setup_module, with_seed, assertRaises, teardown_module, assert_raises_cudnn_not_satisfied) import numpy as np from numpy.testing import assert_array_equal -from nose.tools import raises, assert_raises +import pytest from copy import deepcopy import warnings import json @@ -54,12 +54,12 @@ def test_parameter(): assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)] @with_seed() -@raises(AssertionError) +@pytest.mark.xfail(raises=AssertionError) def test_invalid_parameter_stype(): p = gluon.Parameter('weight', shape=(10, 10), stype='invalid') @with_seed() -@raises(AssertionError) +@pytest.mark.xfail(raises=AssertionError) def test_invalid_parameter_grad_stype(): p = gluon.Parameter('weight', shape=(10, 10), grad_stype='invalid') @@ -424,7 +424,7 @@ def hybrid_forward(self, F, x): assert np.dtype(prediction.dtype) == np.dtype(np.float32) @with_seed() -@raises(AssertionError) +@pytest.mark.xfail(raises=AssertionError) def test_sparse_symbol_block(): data = mx.sym.var('data') weight = mx.sym.var('weight', stype='row_sparse') @@ -434,7 +434,7 @@ def test_sparse_symbol_block(): net = gluon.SymbolBlock(out, data) @with_seed() -@raises(RuntimeError) +@pytest.mark.xfail(raises=RuntimeError) def test_sparse_hybrid_block(): params = gluon.ParameterDict('net_') params.get('weight', shape=(5,5), stype='row_sparse', dtype='float32') @@ -501,17 +501,17 @@ def hybrid_forward(self, F, a, b): foo = FooNested() if do_hybridize: foo.hybridize() - assert_raises(ValueError, foo, None, None) + pytest.raises(ValueError, foo, None, None) # Make sure the ValueError is correctly raised foo = FooNested() foo.hybridize() foo(None, mx.nd.ones((10,))) # Pass for the first time to initialize the cached op - assert_raises(ValueError, lambda: foo(mx.nd.ones((10,)), mx.nd.ones((10,)))) + pytest.raises(ValueError, lambda: foo(mx.nd.ones((10,)), mx.nd.ones((10,)))) foo = FooNested() - assert_raises(ValueError, lambda: foo(mx.nd.ones((10,)), mx.sym.var('a'))) + pytest.raises(ValueError, lambda: foo(mx.nd.ones((10,)), mx.sym.var('a'))) foo = FooNested() - assert_raises(ValueError, lambda: foo(mx.sym.var('a'), mx.nd.ones((10,)))) + pytest.raises(ValueError, lambda: foo(mx.sym.var('a'), mx.nd.ones((10,)))) # Test the case of the default values foo1 = FooDefault() @@ -529,7 +529,7 @@ def hybrid_forward(self, F, a, b): out1 = foo1(mx.nd.ones((10,)), None) out2 = foo1(mx.nd.ones((10,))) assert_almost_equal(out1.asnumpy(), out2.asnumpy()) - assert_raises(ValueError, lambda: foo1(mx.nd.ones((10,)), mx.nd.ones((10,)))) + pytest.raises(ValueError, lambda: foo1(mx.nd.ones((10,)), mx.nd.ones((10,)))) @with_seed() @@ -567,13 +567,13 @@ def forward(self, a, b): # 4. Allow mixing of cpu_pinned and cpu foo_hybrid = FooHybrid() foo_hybrid.hybridize() - assert_raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,)), 1)) + pytest.raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,)), 1)) foo_hybrid = FooHybrid() foo_hybrid.hybridize() - assert_raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,)), mx.sym.var('a'))) + pytest.raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,)), mx.sym.var('a'))) foo_hybrid = FooHybrid() foo_hybrid.hybridize() - assert_raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,), ctx=mx.cpu(1)), + pytest.raises(ValueError, lambda: foo_hybrid(mx.nd.ones((10,), ctx=mx.cpu(1)), mx.nd.ones((10,), ctx=mx.cpu(2)))) @@ -900,7 +900,7 @@ def test_layernorm(): layer.initialize() if hybridize: layer.hybridize() - assert_raises(MXNetError, lambda: layer(mx.nd.ones((2, 11)))) + pytest.raises(MXNetError, lambda: layer(mx.nd.ones((2, 11)))) @with_seed() def test_groupnorm(): @@ -1023,7 +1023,7 @@ def test_block_attr_hidden(): b.a = 1 -@raises(TypeError) +@pytest.mark.xfail(raises=TypeError) @with_seed() def test_block_attr_block(): b = gluon.Block() @@ -1033,7 +1033,7 @@ def test_block_attr_block(): b.b = (2,) -@raises(TypeError) +@pytest.mark.xfail(raises=TypeError) @with_seed() def test_block_attr_param(): b = gluon.Block() @@ -1885,7 +1885,7 @@ def test_summary(): net3.summary(mx.nd.ones((80, 32, 5)), begin_state) net.hybridize() - assert_raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224))) + pytest.raises(AssertionError, net.summary, mx.nd.ones((32, 3, 224, 224))) @with_seed() @@ -3214,6 +3214,3 @@ def hybrid_forward(self, F, x): mx.test_utils.assert_almost_equal(grad1, grad2) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_batch_processor.py b/tests/python/unittest/test_gluon_batch_processor.py index 8604713fc129..952ed1c4a0da 100644 --- a/tests/python/unittest/test_gluon_batch_processor.py +++ b/tests/python/unittest/test_gluon_batch_processor.py @@ -27,7 +27,7 @@ from mxnet.gluon.contrib.estimator import * from mxnet.gluon.contrib.estimator.event_handler import * from mxnet.gluon.contrib.estimator.batch_processor import BatchProcessor -from nose.tools import assert_raises +import pytest def _get_test_network(): net = nn.Sequential() @@ -66,12 +66,12 @@ def test_batch_processor_fit(): est.fit(train_data=dataloader, epochs=num_epochs) - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=dataiter, epochs=num_epochs) # Input NDArray - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=[mx.nd.ones(shape=(10, 3))], epochs=num_epochs) @@ -105,12 +105,12 @@ def test_batch_processor_validation(): val_metrics = est.val_metrics validation_handler = ValidationHandler(val_data=dataloader, eval_fn=est.evaluate) - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=dataiter, val_data=dataiter, epochs=num_epochs) # Input NDArray - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=[mx.nd.ones(shape=(10, 3))], val_data=[mx.nd.ones(shape=(10, 3))], epochs=num_epochs) diff --git a/tests/python/unittest/test_gluon_contrib.py b/tests/python/unittest/test_gluon_contrib.py index 0ed0d4e8a545..b77d943e4059 100644 --- a/tests/python/unittest/test_gluon_contrib.py +++ b/tests/python/unittest/test_gluon_contrib.py @@ -25,7 +25,7 @@ Concurrent, HybridConcurrent, Identity, SparseEmbedding, PixelShuffle1D, PixelShuffle2D, PixelShuffle3D) from mxnet.test_utils import almost_equal, default_context, assert_almost_equal, assert_allclose -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module import numpy as np @@ -436,7 +436,3 @@ def test_ModulatedDeformableConvolution(): with mx.autograd.record(): y = net(x) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_data.py b/tests/python/unittest/test_gluon_data.py index 29197bd58f6c..e6e3caebdf16 100644 --- a/tests/python/unittest/test_gluon_data.py +++ b/tests/python/unittest/test_gluon_data.py @@ -23,7 +23,7 @@ import random from mxnet import gluon import platform -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.gluon.data import DataLoader import mxnet.ndarray as nd from mxnet import context @@ -382,7 +382,3 @@ def test_dataloader_scope(): assert item is not None - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_data_vision.py b/tests/python/unittest/test_gluon_data_vision.py index b53dbf0687d3..d810f32b0afa 100644 --- a/tests/python/unittest/test_gluon_data_vision.py +++ b/tests/python/unittest/test_gluon_data_vision.py @@ -25,7 +25,7 @@ from mxnet.gluon.data.vision import transforms from mxnet import image from mxnet.test_utils import * -from common import assertRaises, setup_module, with_seed, teardown +from common import assertRaises, setup_module, with_seed, teardown_module import numpy as np @@ -42,12 +42,12 @@ def test_to_tensor(): out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8')) assert_almost_equal(out_nd.asnumpy(), np.transpose( data_in.astype(dtype=np.float32) / 255.0, (0, 3, 1, 2))) - + # Invalid Input invalid_data_in = nd.random.uniform(0, 255, (5, 5, 300, 300, 3)).astype(dtype=np.uint8) transformer = transforms.ToTensor() assertRaises(MXNetError, transformer, invalid_data_in) - + # Bounds (0->0, 255->1) data_in = np.zeros((10, 20, 3)).astype(dtype=np.uint8) out_nd = transforms.ToTensor()(nd.array(data_in, dtype='uint8')) @@ -126,7 +126,7 @@ def _test_resize_with_diff_type(dtype): assertRaises(MXNetError, invalid_transform, data_in) for dtype in ['uint8', 'float32', 'float64']: - _test_resize_with_diff_type(dtype) + _test_resize_with_diff_type(dtype) @with_seed() @@ -159,7 +159,7 @@ def _test_crop_resize_with_diff_type(dtype): # test with resize height and width should be greater than 0 transformer = transforms.CropResize(0, 0, 100, 50, (-25, 25), 1) assertRaises(MXNetError, transformer, data_in) - # test height and width should be greater than 0 + # test height and width should be greater than 0 transformer = transforms.CropResize(0, 0, -100, -50) assertRaises(MXNetError, transformer, data_in) # test cropped area is bigger than input data @@ -168,7 +168,7 @@ def _test_crop_resize_with_diff_type(dtype): assertRaises(MXNetError, transformer, data_bath_in) for dtype in ['uint8', 'float32', 'float64']: - _test_crop_resize_with_diff_type(dtype) + _test_crop_resize_with_diff_type(dtype) # test nd.image.crop backward def test_crop_backward(test_nd_arr, TestCase): @@ -288,7 +288,7 @@ def test_random_rotation(): @with_seed() def test_random_transforms(): from mxnet.gluon.data.vision import transforms - + tmp_t = transforms.Compose([transforms.Resize(300), transforms.RandomResizedCrop(224)]) transform = transforms.Compose([transforms.RandomApply(tmp_t, 0.5)]) @@ -302,6 +302,3 @@ def test_random_transforms(): assert_almost_equal(num_apply/float(iteration), 0.5, 0.1) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_estimator.py b/tests/python/unittest/test_gluon_estimator.py index 2c00b1609112..e33aa74b3ca7 100644 --- a/tests/python/unittest/test_gluon_estimator.py +++ b/tests/python/unittest/test_gluon_estimator.py @@ -20,13 +20,13 @@ import sys import unittest import warnings +import pytest import mxnet as mx from mxnet import gluon from mxnet.gluon import nn from mxnet.gluon.contrib.estimator import * from mxnet.gluon.contrib.estimator.event_handler import * -from nose.tools import assert_raises def _get_test_network(params=None): @@ -70,12 +70,12 @@ def test_fit(): est.fit(train_data=dataloader, epochs=num_epochs) - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=dataiter, epochs=num_epochs) # Input NDArray - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=[mx.nd.ones(shape=(10, 3))], epochs=num_epochs) @@ -107,12 +107,12 @@ def test_validation(): val_metrics = est.val_metrics validation_handler = ValidationHandler(val_data=dataloader, eval_fn=est.evaluate) - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=dataiter, val_data=dataiter, epochs=num_epochs) # Input NDArray - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=[mx.nd.ones(shape=(10, 3))], val_data=[mx.nd.ones(shape=(10, 3))], epochs=num_epochs) @@ -180,7 +180,7 @@ def test_trainer(): # input invalid trainer trainer = 'sgd' - with assert_raises(ValueError): + with pytest.raises(ValueError): est = Estimator(net=net, loss=loss, train_metrics=acc, @@ -215,7 +215,7 @@ def test_metric(): est.fit(train_data=train_data, epochs=num_epochs) # input invalid metric - with assert_raises(ValueError): + with pytest.raises(ValueError): est = Estimator(net=net, loss=loss, train_metrics='acc', @@ -238,7 +238,7 @@ def test_loss(): net.initialize(ctx=ctx) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.001}) # input invalid loss - with assert_raises(ValueError): + with pytest.raises(ValueError): est = Estimator(net=net, loss='mse', train_metrics=acc, @@ -264,13 +264,13 @@ def test_context(): train_metrics=metrics, context=ctx) # input invalid context - with assert_raises(ValueError): + with pytest.raises(ValueError): est = Estimator(net=net, loss=loss, train_metrics=metrics, context='cpu') - with assert_raises(AssertionError): + with pytest.raises(AssertionError): est = Estimator(net=net, loss=loss, train_metrics=metrics, @@ -360,7 +360,7 @@ def test_default_handlers(): # handler with mixed metrics, some handler use metrics prepared by estimator # some handler use metrics user prepared logging = LoggingHandler(metrics=[mx.metric.RMSE("val acc")]) - with assert_raises(ValueError): + with pytest.raises(ValueError): est.fit(train_data=train_data, epochs=num_epochs, event_handlers=[logging]) # test handler order @@ -394,7 +394,7 @@ def test_val_net(): val_loss=val_loss, val_net=val_net) - with assert_raises(RuntimeError): + with pytest.raises(RuntimeError): est.fit(train_data=dataloader, val_data=dataloader, epochs=num_epochs) diff --git a/tests/python/unittest/test_gluon_model_zoo.py b/tests/python/unittest/test_gluon_model_zoo.py index d53dd403a5b8..a921d3de18e6 100644 --- a/tests/python/unittest/test_gluon_model_zoo.py +++ b/tests/python/unittest/test_gluon_model_zoo.py @@ -19,7 +19,7 @@ import mxnet as mx from mxnet.gluon.model_zoo.vision import get_model import sys -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module import multiprocessing @@ -66,6 +66,3 @@ def test_parallel_download(): for p in processes: p.join() -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_rnn.py b/tests/python/unittest/test_gluon_rnn.py index f3ffd03f5dbb..4e8db2fbcab4 100644 --- a/tests/python/unittest/test_gluon_rnn.py +++ b/tests/python/unittest/test_gluon_rnn.py @@ -842,7 +842,3 @@ def hybrid_forward(self, F, inputs, valid_len): _check_bidirectional_unroll_valid_length(1) _check_bidirectional_unroll_valid_length(3) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_gluon_trainer.py b/tests/python/unittest/test_gluon_trainer.py index 350700cc129f..414b15c3b3f2 100644 --- a/tests/python/unittest/test_gluon_trainer.py +++ b/tests/python/unittest/test_gluon_trainer.py @@ -24,7 +24,7 @@ from mxnet.test_utils import assert_almost_equal from common import setup_module, with_seed, assertRaises from copy import deepcopy -from nose.tools import raises, assert_raises +import pytest def dict_equ(a, b): assert set(a) == set(b) @@ -32,7 +32,7 @@ def dict_equ(a, b): assert (a[k].asnumpy() == b[k].asnumpy()).all() @with_seed() -@raises(RuntimeError) +@pytest.mark.xfail(raises=RuntimeError) def test_multi_trainer(): x = gluon.Parameter('x', shape=(10,), stype='row_sparse') x.initialize() @@ -78,7 +78,7 @@ def test_trainer_with_teststore(): # Expect exceptions if update_on_kvstore is set to True, # because TestStore does not support that invalid_trainer = gluon.Trainer([x], 'sgd', kvstore=kv, update_on_kvstore=True) - assert_raises(ValueError, invalid_trainer._init_kvstore) + pytest.raises(ValueError, invalid_trainer._init_kvstore) @with_seed() def test_trainer(): @@ -110,8 +110,8 @@ def test_trainer(): dict_equ(trainer._kvstore._updater.states, states) assert trainer._optimizer == trainer._kvstore._updater.optimizer # invalid usage of update and allreduce_grads if update_on_kvstore - assert_raises(AssertionError, trainer.update, 1) - assert_raises(AssertionError, trainer.allreduce_grads) + pytest.raises(AssertionError, trainer.update, 1) + pytest.raises(AssertionError, trainer.allreduce_grads) else: for updater in trainer._updaters: dict_equ(updater.states, states) diff --git a/tests/python/unittest/test_gluon_utils.py b/tests/python/unittest/test_gluon_utils.py index bc816b1794ee..4e37596fab0b 100644 --- a/tests/python/unittest/test_gluon_utils.py +++ b/tests/python/unittest/test_gluon_utils.py @@ -29,7 +29,7 @@ import mock import mxnet as mx import requests -from nose.tools import raises +import pytest class MockResponse(requests.Response): @@ -40,7 +40,7 @@ def __init__(self, status_code, content): self.raw = io.BytesIO(content.encode('utf-8')) -@raises(Exception) +@pytest.mark.xfail(raises=Exception) @mock.patch( 'requests.get', mock.Mock(side_effect=requests.exceptions.ConnectionError)) def test_download_retries(): diff --git a/tests/python/unittest/test_higher_order_grad.py b/tests/python/unittest/test_higher_order_grad.py index ad31c34bd590..28357360bc41 100644 --- a/tests/python/unittest/test_higher_order_grad.py +++ b/tests/python/unittest/test_higher_order_grad.py @@ -22,8 +22,6 @@ from operator import mul import random -from nose.tools import ok_ - from common import with_seed import mxnet from mxnet import nd, autograd, gluon @@ -648,18 +646,18 @@ def test_dense_backward_flatten(): w_grad_grad_e = nd.dot(o_y, o_x_grad, transpose_a=True) x_grad_e = nd.dot(o_y, w) x_grad_grad_e = nd.dot(o_y, o_w_grad) - ok_(w_grad.shape == w.shape) - ok_(w_grad_grad.shape == w.shape) - ok_(x_grad.shape == x.shape) - ok_(x_grad_grad.shape == x.shape) + assert w_grad.shape == w.shape + assert w_grad_grad.shape == w.shape + assert x_grad.shape == x.shape + assert x_grad_grad.shape == x.shape w_grad_check = same(flatten2d_right(w_grad), flatten2d_right(w_grad_e)) w_grad_grad_check = same(flatten2d_right(w_grad_grad), flatten2d_right(w_grad_grad_e)) x_grad_check = same(flatten2d_right(x_grad), flatten2d_right(x_grad_e)) x_grad_grad_check = same(flatten2d_right(x_grad_grad), flatten2d_right(x_grad_grad_e)) - ok_(x_grad_check) - ok_(w_grad_check) - ok_(x_grad_grad_check) - ok_(w_grad_grad_check) + assert x_grad_check + assert w_grad_check + assert x_grad_grad_check + assert w_grad_grad_check @with_seed() def test_dense_backward_no_flatten(): @@ -701,12 +699,8 @@ def test_dense_backward_no_flatten(): w_grad_grad_check = same(flatten2d_left(w_grad_grad), flatten2d_left(w_grad_grad_e)) x_grad_check = same(flatten2d_left(x_grad), flatten2d_left(x_grad_e)) x_grad_grad_check = same(flatten2d_left(x_grad_grad), flatten2d_left(x_grad_grad_e)) - ok_(x_grad_check) - ok_(w_grad_check) - ok_(x_grad_grad_check) - ok_(w_grad_grad_check) - + assert x_grad_check + assert w_grad_check + assert x_grad_grad_check + assert w_grad_grad_check -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_image.py b/tests/python/unittest/test_image.py index 033b8e5aab04..4a527948cb6f 100644 --- a/tests/python/unittest/test_image.py +++ b/tests/python/unittest/test_image.py @@ -23,8 +23,7 @@ import shutil import tempfile import unittest - -from nose.tools import raises +import pytest def _get_data(url, dirname): @@ -126,7 +125,7 @@ def teardownClass(cls): print("cleanup {}".format(cls.IMAGES_DIR)) shutil.rmtree(cls.IMAGES_DIR) - @raises(mx.base.MXNetError) + @pytest.mark.xfail(raises=mx.base.MXNetError) def test_imread_not_found(self): x = mx.img.image.imread("/139810923jadjsajlskd.___adskj/blah.jpg") @@ -162,11 +161,11 @@ def test_imdecode_bytearray(self): cv_image = cv2.imread(img) assert_almost_equal(image.asnumpy(), cv_image) - @raises(mx.base.MXNetError) + @pytest.mark.xfail(raises=mx.base.MXNetError) def test_imdecode_empty_buffer(self): mx.image.imdecode(b'', to_rgb=0) - @raises(mx.base.MXNetError) + @pytest.mark.xfail(raises=mx.base.MXNetError) def test_imdecode_invalid_image(self): image = mx.image.imdecode(b'clearly not image content') assert_equal(image, None) @@ -451,7 +450,3 @@ def test_random_rotate(self): angle_limits) self.assertEqual(out_batch_image.shape, (3, 3, 30, 60)) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_infer_shape.py b/tests/python/unittest/test_infer_shape.py index 1312be0c0081..a2f4cfef3701 100644 --- a/tests/python/unittest/test_infer_shape.py +++ b/tests/python/unittest/test_infer_shape.py @@ -18,7 +18,7 @@ # pylint: skip-file import mxnet as mx from common import models -from nose.tools import * +import pytest def test_mlp2_infer_shape(): # Build MLP @@ -36,7 +36,7 @@ def test_mlp2_infer_shape(): for k, v in true_shapes.items(): assert arg_shape_dict[k] == v -@raises(mx.MXNetError) +@pytest.mark.xfail(raises=mx.MXNetError) def test_mlp2_infer_error(): # Test shape inconsistent case out = models.mlp2() @@ -246,6 +246,3 @@ def test_where_partial_shape(): _, result, _ = where_op.infer_shape_partial(cond=(-1,), x=(2, 2), y=(2, 2)) assert result == [None] -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_infer_type.py b/tests/python/unittest/test_infer_type.py index bad83f3ef01b..286556a006c4 100644 --- a/tests/python/unittest/test_infer_type.py +++ b/tests/python/unittest/test_infer_type.py @@ -20,7 +20,6 @@ import numpy as np from common import models, with_seed from mxnet import autograd -from nose.tools import * from mxnet.test_utils import assert_almost_equal @with_seed() @@ -52,7 +51,3 @@ def test_func(a): test64.backward() assert_almost_equal(data64.grad.asnumpy(), data32.grad.asnumpy(), atol=1e-5, rtol=1e-5) - -if __name__ == '__main__': - import nose - nose.runmodule() \ No newline at end of file diff --git a/tests/python/unittest/test_io.py b/tests/python/unittest/test_io.py index a13addb0adca..a6e2cb19d21f 100644 --- a/tests/python/unittest/test_io.py +++ b/tests/python/unittest/test_io.py @@ -149,7 +149,7 @@ def _test_last_batch_handle(data, labels=None, is_image=False): batch_count_list = [40, 39, 39] else: batch_count_list = [8, 7, 7] - + for idx in range(len(last_batch_handle_list)): dataiter = mx.io.NDArrayIter( data, labels, 128, False, last_batch_handle=last_batch_handle_list[idx]) @@ -168,7 +168,7 @@ def _test_last_batch_handle(data, labels=None, is_image=False): labelcount[int(label[i])] += 1 else: assert not batch.label, 'label is not empty list' - # keep the last batch of 'pad' to be used later + # keep the last batch of 'pad' to be used later # to test first batch of roll_over in second iteration batch_count += 1 if last_batch_handle_list[idx] == 'pad' and \ @@ -243,7 +243,7 @@ def test_NDArrayIter_h5py(): with h5py.File('ndarraytest.h5') as f: f.create_dataset('data', data=data) f.create_dataset('label', data=labels) - + _test_last_batch_handle(f['data'], f['label']) _test_last_batch_handle(f['data'], []) _test_last_batch_handle(f['data']) @@ -285,7 +285,7 @@ def test_NDArrayIter_csr(): {'data': train_data}, dns, batch_size) except ImportError: pass - + # scipy.sparse.csr_matrix with shuffle csr_iter = iter(mx.io.NDArrayIter({'data': train_data}, dns, batch_size, shuffle=True, last_batch_handle='discard')) @@ -411,16 +411,15 @@ def check_libSVMIter_exception(): def test_DataBatch(): - from nose.tools import ok_ from mxnet.io import DataBatch import re batch = DataBatch(data=[mx.nd.ones((2, 3))]) - ok_(re.match( - 'DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch))) + assert re.match( + 'DataBatch: data shapes: \[\(2L?, 3L?\)\] label shapes: None', str(batch)) batch = DataBatch(data=[mx.nd.ones((2, 3)), mx.nd.ones( (7, 8))], label=[mx.nd.ones((4, 5))]) - ok_(re.match( - 'DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch))) + assert re.match( + 'DataBatch: data shapes: \[\(2L?, 3L?\), \(7L?, 8L?\)\] label shapes: \[\(4L?, 5L?\)\]', str(batch)) def test_CSVIter(): @@ -462,7 +461,7 @@ def assert_dataiter_items_equals(dataiter1, dataiter2): are the equal. """ for batch1, batch2 in zip_longest(dataiter1, dataiter2): - + # ensure iterators contain the same number of batches # zip_longest will return None if on of the iterators have run out of batches assert batch1 and batch2, 'The iterators do not contain the same number of batches' @@ -533,7 +532,7 @@ def assert_dataiter_items_not_equals(dataiter1, dataiter2): random_h=10, max_shear_ratio=2, seed_aug=seed_aug) - + assert_dataiter_items_equals(dataiter1, dataiter2) # check whether to get different images after change seed_aug @@ -573,7 +572,7 @@ def assert_dataiter_items_not_equals(dataiter1, dataiter2): data_shape=(3, 28, 28), batch_size=3, seed_aug=seed_aug) - + assert_dataiter_items_equals(dataiter1, dataiter2) if __name__ == "__main__": diff --git a/tests/python/unittest/test_kvstore.py b/tests/python/unittest/test_kvstore.py index 28d4ec262c06..a3d54469b28e 100644 --- a/tests/python/unittest/test_kvstore.py +++ b/tests/python/unittest/test_kvstore.py @@ -20,7 +20,7 @@ import numpy as np import unittest from mxnet.test_utils import rand_ndarray, assert_almost_equal -from common import setup_module, with_seed, assertRaises, teardown +from common import setup_module, with_seed, assertRaises, teardown_module from mxnet.base import py_str, MXNetError shape = (4, 4) @@ -344,6 +344,3 @@ def check_invalid_key_types_list(kv, key): check_invalid_key_types_single(kvs[i], single_keys[1 - i]) check_invalid_key_types_list(kvs[i], list_keys[1 - i]) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_kvstore_custom.py b/tests/python/unittest/test_kvstore_custom.py index 4f1f309d24c1..a8f0869f0236 100644 --- a/tests/python/unittest/test_kvstore_custom.py +++ b/tests/python/unittest/test_kvstore_custom.py @@ -20,7 +20,7 @@ import numpy as np import unittest from mxnet.test_utils import rand_ndarray, assert_almost_equal -from common import setup_module, with_seed, assertRaises, teardown +from common import setup_module, with_seed, assertRaises, teardown_module from mxnet.base import py_str, MXNetError shape = (4, 4) @@ -190,6 +190,3 @@ def check_unsupported_methods(kv): kv = mx.kv.create('teststore') check_unsupported_methods(kv) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_loss.py b/tests/python/unittest/test_loss.py index a1a49c97d7f4..e779fd672701 100644 --- a/tests/python/unittest/test_loss.py +++ b/tests/python/unittest/test_loss.py @@ -19,7 +19,7 @@ import numpy as np from mxnet import gluon, autograd from mxnet.test_utils import assert_almost_equal, default_context -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module import unittest @@ -354,7 +354,7 @@ def test_sdml_loss(): N = 5 # number of samples DIM = 10 # Dimensionality EPOCHS = 20 - + # Generate randomized data and 'positive' samples data = mx.random.uniform(-1, 1, shape=(N, DIM)) pos = data + mx.random.uniform(-0.1, 0.1, shape=(N, DIM)) # correlated paired data @@ -380,7 +380,7 @@ def test_sdml_loss(): # After training euclidean distance between aligned pairs should be lower than all non-aligned pairs avg_loss = loss.sum()/len(loss) assert(avg_loss < 0.05) - + @with_seed() def test_cosine_loss(): #Generating samples @@ -488,7 +488,3 @@ def test_bce_loss_with_pos_weight(): npy_bce_loss = (- label_npy * np.log(prob_npy)*pos_weight_npy - (1 - label_npy) * np.log(1 - prob_npy)).mean(axis=1) assert_almost_equal(mx_bce_loss, npy_bce_loss, rtol=1e-4, atol=1e-5) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_metric.py b/tests/python/unittest/test_metric.py index e7273fba35d5..d1e1c5a35fb3 100644 --- a/tests/python/unittest/test_metric.py +++ b/tests/python/unittest/test_metric.py @@ -407,7 +407,3 @@ def test_single_array_input(): rmse.get() _, rmse_res = rmse.get() np.testing.assert_almost_equal(rmse_res, 0.1) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_metric_perf.py b/tests/python/unittest/test_metric_perf.py index 36cbc685797c..fc0f8da5d451 100644 --- a/tests/python/unittest/test_metric_perf.py +++ b/tests/python/unittest/test_metric_perf.py @@ -118,7 +118,3 @@ def test_metric_performance(): run_metric(k, v[1], (data_size * 128)//(n * c), n, c, pred_ctx, label_ctx, **v[0]) print("{:-^90}".format(''), file=sys.stderr) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_module.py b/tests/python/unittest/test_module.py index b82933126d67..65d86f62baf4 100644 --- a/tests/python/unittest/test_module.py +++ b/tests/python/unittest/test_module.py @@ -22,7 +22,7 @@ import numpy as np from functools import reduce from mxnet.module.executor_group import DataParallelExecutorGroup -from common import setup_module, with_seed, assertRaises, teardown +from common import setup_module, with_seed, assertRaises, teardown_module from collections import namedtuple curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, "../train")) @@ -1029,7 +1029,3 @@ def get_module_idx2name(mod): mod2.init_optimizer(optimizer=opt) assert mod2._optimizer.idx2name == get_module_idx2name(mod2) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_ndarray.py b/tests/python/unittest/test_ndarray.py index 3a9bd9e93126..22caf77536d1 100644 --- a/tests/python/unittest/test_ndarray.py +++ b/tests/python/unittest/test_ndarray.py @@ -23,7 +23,7 @@ import pickle as pkl import random import functools -from nose.tools import assert_raises, raises +import pytest from common import with_seed, assertRaises, TemporaryDirectory from mxnet.test_utils import almost_equal from mxnet.test_utils import assert_almost_equal, assert_exception @@ -1341,7 +1341,7 @@ def check_fluent_regular(func, kwargs, shape=(5, 17, 1), equal_nan=False): check_fluent_regular('squeeze', {'axis': (1, 3)}, shape=(2, 1, 3, 1, 4)) -@raises(ValueError) +@pytest.mark.xfail(raises=ValueError) def test_bool_ambiguous(): bool(mx.nd.ones((2,3,4))) @@ -1603,10 +1603,10 @@ def convert(num): # Test basic indexing with newaxis (None, False), ((1, None, -2, 3, -4), False), - ((1, slice(2, 5), None), False), - ((slice(None), slice(1, 4), None, slice(2, 3)), False), - ((slice(1, 3), slice(1, 3), slice(1, 3), slice(1, 3), None), False), - ((slice(1, 3), slice(1, 3), None, slice(1, 3), slice(1, 3)), False), + ((1, slice(2, 5), None), False), + ((slice(None), slice(1, 4), None, slice(2, 3)), False), + ((slice(1, 3), slice(1, 3), slice(1, 3), slice(1, 3), None), False), + ((slice(1, 3), slice(1, 3), None, slice(1, 3), slice(1, 3)), False), ((None, slice(1, 2), 3, None), False), ((1, None, 2, 3, None, None, 4), False), # Advanced indexing @@ -2062,7 +2062,3 @@ def test_load_saved_gpu_array_when_no_gpus_are_present(): # but there are no GPUs array.__setstate__(ndarray_state) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_numpy_gluon.py b/tests/python/unittest/test_numpy_gluon.py index f3f01fc2ae92..713591795a43 100644 --- a/tests/python/unittest/test_numpy_gluon.py +++ b/tests/python/unittest/test_numpy_gluon.py @@ -431,7 +431,3 @@ def hybrid_forward(self, F, valid_length): assert mx.test_utils.same(out1.asnumpy(), out2.asnumpy()) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_numpy_interoperability.py b/tests/python/unittest/test_numpy_interoperability.py index 1f795721b820..9a737f4d80b8 100644 --- a/tests/python/unittest/test_numpy_interoperability.py +++ b/tests/python/unittest/test_numpy_interoperability.py @@ -1760,7 +1760,7 @@ def test_shapes(): a = np.ones((2,), dtype=dt) b = np.ones((2,), dtype=dt) OpArgMngr.add_workload('matmul', a, b) - + def test_result_types(): mat = np.ones((1,1)) vec = np.ones((1,)) @@ -1769,7 +1769,7 @@ def test_result_types(): v = vec.astype(dt) for arg in [(m, v), (v, m), (m, m)]: OpArgMngr.add_workload('matmul', *arg) - + def test_scalar_output(): vec1 = np.array([2]) vec2 = np.array([3, 4]).reshape(1, -1) @@ -1778,7 +1778,7 @@ def test_scalar_output(): v2 = vec2.astype(dt) OpArgMngr.add_workload('matmul', v1, v2) OpArgMngr.add_workload('matmul', v2.T, v1) - + def test_vector_vector_values(): vec1 = np.array([1, 2]) vec2 = np.array([3, 4]).reshape(-1, 1) @@ -1810,7 +1810,7 @@ def test_matrix_vector_values(): m2 = mat2.astype(dt) OpArgMngr.add_workload('matmul', m1, v) OpArgMngr.add_workload('matmul', m2, v) - + def test_matrix_matrix_values(): mat1 = np.array([[1, 2], [3, 4]]) mat2 = np.array([[1, 0], [1, 1]]) @@ -3174,7 +3174,3 @@ def test_np_fallback_ops(): op_list = np.fallback.__all__ + ['linalg.{}'.format(op_name) for op_name in np.fallback_linalg.__all__] check_interoperability(op_list) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_numpy_ndarray.py b/tests/python/unittest/test_numpy_ndarray.py index 3ce53c6a6e80..a45ce408802b 100644 --- a/tests/python/unittest/test_numpy_ndarray.py +++ b/tests/python/unittest/test_numpy_ndarray.py @@ -623,7 +623,7 @@ def test_nd_no_format(): if str(context)[:3] != 'gpu': test_0d() test_nd_format() - test_nd_no_format() + test_nd_no_format() # if the program is running in GPU, the formatted string would be appended with context notation # for exmpale, if a = np.array([np.pi]), the return value of '{}'.format(a) is '[3.1415927] @gpu(0)' @@ -1237,7 +1237,7 @@ def test_boolean_indexing_assign(): mx_mask = np.array([[False,True, True],[False, True,False]],dtype=np.bool) np_mask = mx_mask.asnumpy() - + np_data[0, np_mask] = 5 mx_data[0, mx_mask] = 5 assert_almost_equal(mx_data.asnumpy(), np_data, rtol=1e-3, atol=1e-5, use_broadcast=False) @@ -1315,7 +1315,3 @@ def test_np_ndarray_pickle(): a_load = pickle.load(f) same(a.asnumpy(), a_load.asnumpy()) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 063c3b7a58a6..ac2eb48f8446 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -26,7 +26,7 @@ import mxnet as mx import scipy.stats as ss import scipy.special as scipy_special -from nose.tools import assert_raises +import pytest from mxnet import np, npx from mxnet.gluon import HybridBlock from mxnet.base import MXNetError @@ -552,7 +552,7 @@ def ShapeReduce(mat, shape, is_b=False): for shape_a, shape_b in bad_shapes: a = np.random.uniform(size=shape_a) b = np.random.uniform(size=shape_b) - assert_raises(MXNetError, lambda: np.matmul(a, b)) + pytest.raises(MXNetError, lambda: np.matmul(a, b)) @with_seed() @@ -1478,7 +1478,7 @@ def gt_grad_batch_dot_numpy(lhs, rhs, ograd, transpose_a, transpose_b, lhs_req, for dtype in dtypes: lhs_val = mx.np.array(_np.random.uniform(-1.0, 1.0, lhs_shape), dtype=dtype) rhs_val = mx.np.array(_np.random.uniform(-1.0, 1.0, rhs_shape), dtype=dtype) - assert_raises(MXNetError, lambda: mx.npx.batch_dot(lhs_val, rhs_val, + pytest.raises(MXNetError, lambda: mx.npx.batch_dot(lhs_val, rhs_val, transpose_a=transpose_a, transpose_b=transpose_b)) @@ -1962,8 +1962,8 @@ def hybrid_forward(self, F, a): assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) # Test for error raising dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) - assert_raises(ValueError, lambda: dat.transpose((0, 0, 1))) - assert_raises(MXNetError, lambda: dat.transpose((0, 1, 3))) + pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) + pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) @@ -5556,7 +5556,7 @@ class TestLstsq(HybridBlock): def __init__(self, rcond): super(TestLstsq, self).__init__() self._rcond = rcond - + def hybrid_forward(self, F, a, b, rcond='warn'): return F.np.linalg.lstsq(a, b, rcond=self._rcond) @@ -6876,10 +6876,10 @@ def __init__(self, n, k=0, m=None): if m is None: m = n self._m = m - + def hybrid_forward(self, F, x, *args, **kwargs): return x, F.np.tril_indices(n=self._n, k=self._k, m=self._m) - + for n in _np.random.random_integers(-10, 50, 2): for k in _np.random.random_integers(-50, 50, 2): for m in _np.random.random_integers(-10, 50, 2): @@ -6900,7 +6900,7 @@ def hybrid_forward(self, F, x, *args, **kwargs): np_data[np_out] = -10 mx_data[mx_out] = -10 assert same(np_data, mx_data.asnumpy()) - + @with_seed() @use_np @@ -7614,7 +7614,7 @@ def hybrid_forward(self, F, a): a = np.random.uniform(-1.0, 1.0, size=a_shape) np_out = _np.median(a.asnumpy(), axis=axis, keepdims=keepdims) mx_out = test_median(a) - + assert mx_out.shape == np_out.shape assert_almost_equal(mx_out.asnumpy(), np_out, atol=atol, rtol=rtol) @@ -8630,10 +8630,10 @@ def __init__(self, left=None, right=None, period=None): self._left = left self._right = right self._period = period - + def hybrid_forward(self, F, x, xp, fp): return F.np.interp(x, xp, fp, left=self._left, right=self._right, period=self._period) - + class TestInterpScalar(HybridBlock): def __init__(self, x=None, left=None, right=None, period=None): super(TestInterpScalar, self).__init__() @@ -8641,7 +8641,7 @@ def __init__(self, x=None, left=None, right=None, period=None): self._left = left self._right = right self._period = period - + def hybrid_forward(self, F, xp, fp): return F.np.interp(self._x, xp, fp, left=self._left, right=self._right, period=self._period) @@ -8668,13 +8668,13 @@ def hybrid_forward(self, F, xp, fp): else: x = np.random.uniform(0, 100, size=xshape).astype(xtype) xp = np.sort(np.random.choice(100, dsize, replace=False).astype(dtype)) - fp = np.random.uniform(-50, 50, size=dsize).astype(dtype) + fp = np.random.uniform(-50, 50, size=dsize).astype(dtype) np_x = x.asnumpy() if x_scalar and xshape == (): x = x.item() np_x = x test_interp = TestInterpScalar(x=x, left=left, right=right, period=period) - else: + else: test_interp = TestInterp(left=left, right=right, period=period) if hybridize: test_interp.hybridize() @@ -8804,7 +8804,7 @@ def __init__(self, axis=0, start=0): super(TestRollaxis, self).__init__() self._axis = axis self._start = start - + def hybrid_forward(self, F, a, *args, **kwargs): return F.np.rollaxis(a, axis=self._axis, start=self._start) @@ -8835,7 +8835,3 @@ def hybrid_forward(self, F, a, *args, **kwargs): assert np_out.dtype == mx_out.dtype assert same(mx_out.asnumpy(), np_out) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_operator.py b/tests/python/unittest/test_operator.py index 0c795db9e381..a1d467eff319 100644 --- a/tests/python/unittest/test_operator.py +++ b/tests/python/unittest/test_operator.py @@ -29,9 +29,9 @@ from mxnet.test_utils import * from mxnet.operator import * from mxnet.base import py_str, MXNetError, _as_list -from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises +from common import setup_module, with_seed, teardown_module, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises from common import run_in_spawned_process -from nose.tools import assert_raises, ok_ +import pytest import unittest import os @@ -6069,7 +6069,7 @@ def f(in_data, out_data): b = mx.nd.zeros((1, 4)) c = mx.nd.Custom(a, b, op_type='Dot1') c.wait_to_read() - assert_raises(MXNetError, custom_exc1) + pytest.raises(MXNetError, custom_exc1) # 2. error in pushing operator to engine def custom_exc2(): @@ -6081,7 +6081,7 @@ def f(in_data, out_data): # trigger error by invalid input shapes of operands c = mx.nd.Custom(a, b, op_type='Dot2') c.wait_to_read() - assert_raises(MXNetError, custom_exc2) + pytest.raises(MXNetError, custom_exc2) # 3. error in real execution if default_context().device_type == 'cpu': @@ -6098,7 +6098,7 @@ def f(in_data, out_data): b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot3') c.wait_to_read() - assert_raises(MXNetError, custom_exc3) + pytest.raises(MXNetError, custom_exc3) def custom_exc4(): def f(in_data, out_data): @@ -6112,7 +6112,7 @@ def f(in_data, out_data): b = mx.nd.zeros((1, 2)) c = mx.nd.Custom(a, b, op_type='Dot4') c.wait_to_read() - assert_raises(MXNetError, custom_exc4) + pytest.raises(MXNetError, custom_exc4) @with_seed() @@ -7157,11 +7157,11 @@ def test_dropout_reproducibility(): assert_almost_equal(result1.asnumpy(), result5.asnumpy()) assert_almost_equal(result2.asnumpy(), result6.asnumpy()) - with assert_raises(AssertionError): + with pytest.raises(AssertionError): assert_almost_equal(result1.asnumpy(), result2.asnumpy()) - with assert_raises(AssertionError): + with pytest.raises(AssertionError): assert_almost_equal(result1.asnumpy(), result3.asnumpy()) - with assert_raises(AssertionError): + with pytest.raises(AssertionError): assert_almost_equal(result2.asnumpy(), result4.asnumpy()) @unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290") @@ -7704,8 +7704,8 @@ def max(): a = mx.nd.zeros(shape=(5, 0)) a.max() - assert_raises(MXNetError, min) - assert_raises(MXNetError, max) + pytest.raises(MXNetError, min) + pytest.raises(MXNetError, max) @with_seed() @@ -9330,18 +9330,18 @@ def test_add_n(): def test_get_all_registered_operators(): ops = get_all_registered_operators() - ok_(isinstance(ops, list)) - ok_(len(ops) > 0) - ok_('Activation' in ops) + assert isinstance(ops, list) + assert len(ops) > 0 + assert 'Activation' in ops def test_get_operator_arguments(): operator_arguments = get_operator_arguments('Activation') - ok_(isinstance(operator_arguments, OperatorArguments)) - ok_(operator_arguments.names == ['data', 'act_type']) - ok_(operator_arguments.types - == ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"]) - ok_(operator_arguments.narg == 2) + assert isinstance(operator_arguments, OperatorArguments) + assert operator_arguments.names == ['data', 'act_type'] + assert operator_arguments.types \ + == ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"] + assert operator_arguments.narg == 2 def test_transpose_infer_shape_back(): @@ -9924,7 +9924,3 @@ def test_elemwise_sum_for_gradient_accumulation(): assert stored_grad['write'] == stored_grad['add'] assert stored_grad['write'] == 2 * nrepeat - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_optimizer.py b/tests/python/unittest/test_optimizer.py old mode 100755 new mode 100644 index 2a15e3407862..4488d5a62e45 --- a/tests/python/unittest/test_optimizer.py +++ b/tests/python/unittest/test_optimizer.py @@ -22,10 +22,10 @@ import mxnet.lr_scheduler as lr_scheduler from mxnet import gluon import unittest -from nose.tools import raises +import pytest import math from mxnet.test_utils import * -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module @with_seed() def test_learning_rate(): @@ -44,7 +44,7 @@ def test_learning_rate(): assert o3.learning_rate == 1024 -@raises(UserWarning) +@pytest.mark.xfail(raises=UserWarning) @with_seed() def test_learning_rate_expect_user_warning(): lr_s = lr_scheduler.FactorScheduler(step=1) @@ -284,7 +284,7 @@ def test_lars(): def test_lamb(): opt1 = mx.optimizer.LAMB opt2 = mx.optimizer.LAMB - + shapes = [(3, 4, 5), (10, 4), (7,)] beta1_options = [{}, {'beta1': 0.5}] beta2_options = [{}, {'beta2': 0.8}] @@ -952,8 +952,3 @@ def test_cosine_scheduler(): np.testing.assert_almost_equal(cosine_sched(steps), final_lr) assert (cosine_sched(500) > 1.5) - -if __name__ == '__main__': - import nose - nose.runmodule() - diff --git a/tests/python/unittest/test_predictor.py b/tests/python/unittest/test_predictor.py index fc2fbf600cbc..325b830e4226 100644 --- a/tests/python/unittest/test_predictor.py +++ b/tests/python/unittest/test_predictor.py @@ -26,7 +26,7 @@ import mxnet.ndarray as nd from mxnet import gluon from mxnet.test_utils import assert_almost_equal -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module @with_seed() def test_predictor(): @@ -81,7 +81,3 @@ def test_load_ndarray(): for k in nd_data.keys(): assert_almost_equal(nd_data[k].asnumpy(), nd_load[k], rtol=1e-5, atol=1e-6) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_profiler.py b/tests/python/unittest/test_profiler.py index 5a0baca1b684..bf745043e871 100644 --- a/tests/python/unittest/test_profiler.py +++ b/tests/python/unittest/test_profiler.py @@ -263,7 +263,7 @@ def check_sorting(debug_str, sort_by, ascending): for domain_name, domain in target_dict['Time'].items(): lst = [item[sort_by_options[sort_by]] for item_name, item in domain.items()] check_ascending(lst, ascending) - # Memory items do not have stat 'Total' + # Memory items do not have stat 'Total' if sort_by != 'total': for domain_name, domain in target_dict['Memory'].items(): lst = [item[sort_by_options[sort_by]] for item_name, item in domain.items()] @@ -372,7 +372,7 @@ def check_custom_operator_profiling_multiple_custom_ops_output(debug_str): def custom_operator_profiling_multiple_custom_ops(seed, mode, file_name): class MyAdd(mx.operator.CustomOp): - def forward(self, is_train, req, in_data, out_data, aux): + def forward(self, is_train, req, in_data, out_data, aux): self.assign(out_data[0], req[0], in_data[0] + 1) def backward(self, req, out_grad, in_data, out_data, in_grad, aux): @@ -610,7 +610,3 @@ def test_gpu_memory_profiler_gluon(): row['Attribute Name'] == ":": assert False, "Unknown allocation entry has been encountered" - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_random.py b/tests/python/unittest/test_random.py index efcf16dc78da..ac20a99481cf 100644 --- a/tests/python/unittest/test_random.py +++ b/tests/python/unittest/test_random.py @@ -22,7 +22,7 @@ from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, retry, assert_almost_equal import numpy as np import random as rnd -from common import setup_module, with_seed, random_seed, teardown +from common import setup_module, with_seed, random_seed, teardown_module import scipy.stats as ss import unittest from mxnet.test_utils import * @@ -339,7 +339,7 @@ def check_with_device(device, dtype): un1 = np.maximum(un1, 1e-1) if name == 'uniform': un1 = np.minimum(np.maximum(un1.reshape((un1.shape[0],un1.shape[1],-1)), p1.reshape((p1.shape[0],p1.shape[1],-1))+1e-4), - p2.reshape((p2.shape[0],p2.shape[1],-1))-1e-4).reshape(un1.shape) + p2.reshape((p2.shape[0],p2.shape[1],-1))-1e-4).reshape(un1.shape) for use_log in [False, True]: test_pdf = symbol(v0, v1, is_log=use_log) if single_param else symbol(v0, v1, v2, is_log=use_log) forw_atol = 1e-7 if dtype != np.float16 else 1e-3 @@ -349,7 +349,7 @@ def check_with_device(device, dtype): if single_param: res = pdffunc(un1.reshape((un1.shape[0],un1.shape[1],-1)), p1.reshape((p1.shape[0],p1.shape[1],-1))).reshape(un1.shape) - if use_log: + if use_log: res = np.log(res) check_symbolic_forward(test_pdf, [un1, p1], [res], atol=forw_atol, rtol=forw_rtol, dtype=dtype) if dtype == np.float64: @@ -1035,7 +1035,3 @@ def test_sample_multinomial_num_outputs(): assert isinstance(out, list) assert len(out) == 2 - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_recordio.py b/tests/python/unittest/test_recordio.py index 81561b987839..6db54d541244 100644 --- a/tests/python/unittest/test_recordio.py +++ b/tests/python/unittest/test_recordio.py @@ -22,7 +22,7 @@ import tempfile import random import string -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module @with_seed() def test_recordio(): diff --git a/tests/python/unittest/test_rnn.py b/tests/python/unittest/test_rnn.py index a5588250e515..ab333f93c918 100644 --- a/tests/python/unittest/test_rnn.py +++ b/tests/python/unittest/test_rnn.py @@ -308,7 +308,4 @@ def test_encode_sentences(): print(result, vocab) assert vocab == {'a': 1, 'b': 2, 'c': 3, 'UNK': 0} assert result == [[1,2,3],[2,3,0]] - -if __name__ == '__main__': - import nose - nose.runmodule() + diff --git a/tests/python/unittest/test_runtime.py b/tests/python/unittest/test_runtime.py index 82e2314532b8..0bd4d4c4dcb1 100644 --- a/tests/python/unittest/test_runtime.py +++ b/tests/python/unittest/test_runtime.py @@ -19,14 +19,14 @@ import sys from mxnet.runtime import * from mxnet.base import MXNetError -from nose.tools import * +import pytest def test_features(): features = Features() print(features) - ok_('CUDA' in features) - ok_(len(features) >= 30) + assert 'CUDA' in features + assert len(features) >= 30 def test_is_singleton(): @@ -39,17 +39,13 @@ def test_is_enabled(): features = Features() for f in features: if features[f].enabled: - ok_(features.is_enabled(f)) + assert features.is_enabled(f) else: - ok_(not features.is_enabled(f)) + assert not features.is_enabled(f) -@raises(RuntimeError) +@pytest.mark.xfail(raises=RuntimeError) def test_is_enabled_not_existing(): features = Features() features.is_enabled('this girl is on fire') - -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_sparse_ndarray.py b/tests/python/unittest/test_sparse_ndarray.py index 9a1fce4ff197..de06c7be0777 100644 --- a/tests/python/unittest/test_sparse_ndarray.py +++ b/tests/python/unittest/test_sparse_ndarray.py @@ -19,7 +19,7 @@ from mxnet.ndarray import NDArray from mxnet.test_utils import * -from common import setup_module, with_seed, random_seed, teardown +from common import setup_module, with_seed, random_seed, teardown_module from mxnet.base import mx_real_t from numpy.testing import assert_allclose import numpy.random as rnd @@ -1056,6 +1056,3 @@ def check_sparse_getnnz(density, axis): for a in axis: check_sparse_getnnz(d, a) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_sparse_operator.py b/tests/python/unittest/test_sparse_operator.py index 4c4e3dbdfc51..f23f59414816 100644 --- a/tests/python/unittest/test_sparse_operator.py +++ b/tests/python/unittest/test_sparse_operator.py @@ -17,7 +17,7 @@ from mxnet.test_utils import * from mxnet.base import MXNetError -from common import setup_module, with_seed, teardown, assertRaises +from common import setup_module, with_seed, teardown_module, assertRaises import random import warnings @@ -2346,6 +2346,3 @@ def test_reshape_backward_fallback(): assert_almost_equal(grad_w_nd.asnumpy(), expected_grad_nd) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_subgraph.py b/tests/python/unittest/test_subgraph.py index 3da125a946bc..b1a7aa3f3abb 100644 --- a/tests/python/unittest/test_subgraph.py +++ b/tests/python/unittest/test_subgraph.py @@ -21,7 +21,7 @@ import mxnet as mx import copy from mxnet.test_utils import * -from common import setup_module, with_seed, teardown +from common import setup_module, with_seed, teardown_module from mxnet.gluon.model_zoo.vision import get_model def make_subgraph(subg, *args): @@ -190,6 +190,3 @@ def create_operator(self, ctx, shapes, dtypes): c.bind(mx.cpu(), {'a': inp}).forward() mx.nd.waitall() -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_subgraph_op.py b/tests/python/unittest/test_subgraph_op.py index e414a9836ccb..bffb38e6dd5d 100644 --- a/tests/python/unittest/test_subgraph_op.py +++ b/tests/python/unittest/test_subgraph_op.py @@ -41,7 +41,7 @@ def network_structure_2(): ret1 = mx.sym.cos(ret) ret2 = mx.sym.sin(ret) ret = ret1 + ret2 - return (ret, ['data'], [(2, 3, 10, 10)]) + return (ret, ['data'], [(2, 3, 10, 10)]) def network_structure_3(): # this tests whether the partitioned sym can distinguish in_args and aux_states @@ -74,7 +74,7 @@ def network_structure_6(): data3 = mx.sym.sin(data2) conv = mx.sym.Convolution(data=data1, weight=data3, kernel=(2, 2), num_filter=1) return (conv, ['data1'], [(3, 3, 10, 10)]) - + def network_structure_7(): # in this graph, the subgraph node and the other two external nodes form a cycle data = mx.sym.Variable('data', shape=(1,)) @@ -85,7 +85,7 @@ def network_structure_7(): ret = ret1 + ret2 return (ret, ['data'], [(1,)]) -def get_graphs(): +def get_graphs(): return [ (network_structure_1(), ['Convolution']), (network_structure_2(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']), @@ -271,7 +271,7 @@ def check_subgraph_exe5(sym, subgraph_backend, op_names): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,))) def check_subgraph_exe6(sym, subgraph_backend, op_names): - """Call optimize_for to trigger graph partitioning with shapes/types, then simple_bind + """Call optimize_for to trigger graph partitioning with shapes/types, then simple_bind and compare results of the partitioned sym and the original sym.""" # simple_bind exe1 = sym.simple_bind(ctx=mx.current_context(), grad_req='null') @@ -340,17 +340,17 @@ def check_subgraph_exe8(sym, subgraph_backend, op_names): exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null') exe2.forward() - + # compare outputs outputs1 = exe1.outputs outputs2 = exe2.outputs assert len(outputs1) == len(outputs2) for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,))) - + def check_subgraph_exe9(sym, subgraph_backend, op_names): - """Call hybridize() to partition the graph, and then compare results of the partitioned - sym and the original sym. Here do an inference before hybridizing with the subgraph_backend + """Call hybridize() to partition the graph, and then compare results of the partitioned + sym and the original sym. Here do an inference before hybridizing with the subgraph_backend which means we'll pass shapes/types""" # create Gluon block for given symbol inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]] @@ -487,6 +487,3 @@ def hybrid_forward(self, F, x): for i in range(len(outputs1)): assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,))) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_symbol.py b/tests/python/unittest/test_symbol.py index c913f5c07c9f..d5437b994ee3 100644 --- a/tests/python/unittest/test_symbol.py +++ b/tests/python/unittest/test_symbol.py @@ -559,6 +559,3 @@ def test_infershape_happens_for_all_ops_in_graph(): assert False -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_test_utils.py b/tests/python/unittest/test_test_utils.py index 49f0b932fdd5..98e7d94ff793 100644 --- a/tests/python/unittest/test_test_utils.py +++ b/tests/python/unittest/test_test_utils.py @@ -19,10 +19,11 @@ import tempfile import mxnet as mx -from nose.tools import * +import pytest -@raises(Exception) + +@pytest.mark.xfail(raises=Exception) def test_download_retries(): mx.test_utils.download("http://doesnotexist.notfound") @@ -31,4 +32,4 @@ def test_download_successful(): tmpfile = os.path.join(tmp, 'README.md') mx.test_utils.download("https://raw.githubusercontent.com/apache/incubator-mxnet/master/README.md", fname=tmpfile) - assert os.path.getsize(tmpfile) > 100 \ No newline at end of file + assert os.path.getsize(tmpfile) > 100 diff --git a/tests/python/unittest/test_thread_local.py b/tests/python/unittest/test_thread_local.py index 63d97f17ebba..7e875c8fb835 100644 --- a/tests/python/unittest/test_thread_local.py +++ b/tests/python/unittest/test_thread_local.py @@ -221,7 +221,3 @@ def f(): finally: set_np_shape(0) - -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_tvm_op.py b/tests/python/unittest/test_tvm_op.py index e325edcf4d75..55bb7cc2bd92 100644 --- a/tests/python/unittest/test_tvm_op.py +++ b/tests/python/unittest/test_tvm_op.py @@ -67,6 +67,3 @@ def test_tvm_broadcast_add(): assert same(a.grad.asnumpy(), expected_grad_a) assert same(b.grad.asnumpy(), expected_grad_b) -if __name__ == '__main__': - import nose - nose.runmodule() diff --git a/tests/python/unittest/test_viz.py b/tests/python/unittest/test_viz.py index 13210993014a..5c9b78a017d2 100644 --- a/tests/python/unittest/test_viz.py +++ b/tests/python/unittest/test_viz.py @@ -62,6 +62,3 @@ def test_plot_network(): assert "There are multiple variables with the same name in your graph" in str(w[-1].message) assert "fc" in str(w[-1].message) -if __name__ == "__main__": - import nose - nose.runmodule() diff --git a/tests/requirements.txt b/tests/requirements.txt index e16b764d2746..f7ee514760b7 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -1,8 +1,10 @@ # Requirements for tests, those are installed before running on the virtualenv # Requirements for tests run within the qemu requirement see ci/qemu/test_requirements.txt mock -nose -nose-timer +pytest==5.3.2 +pytest-env==0.6.2 +pytest-cov==2.8.1 +pytest-xdist==1.31.0 ipython numpy scipy diff --git a/tools/caffe_converter/test_converter.py b/tools/caffe_converter/test_converter.py old mode 100755 new mode 100644 diff --git a/tools/dependencies/README.md b/tools/dependencies/README.md index ec1e80088895..ba92cc875c8f 100644 --- a/tools/dependencies/README.md +++ b/tools/dependencies/README.md @@ -204,7 +204,7 @@ pip install -e python export NCCL_DEBUG=VERSION vim tests/python/gpu/test_nccl.py # Remove @unittest.skip("Test requires NCCL library installed and enabled during build") then run -nosetests --verbose tests/python/gpu/test_nccl.py +pytest --verbose tests/python/gpu/test_nccl.py # test_nccl.test_nccl_pushpull ... NCCL version 2.4.2+cuda10.0 # ok # ---------------------------------------------------------------------- diff --git a/tools/diagnose.py b/tools/diagnose.py old mode 100755 new mode 100644 diff --git a/tools/flakiness_checker.py b/tools/flakiness_checker.py index 79fa3b1854f0..85eae14c9557 100644 --- a/tools/flakiness_checker.py +++ b/tools/flakiness_checker.py @@ -39,7 +39,7 @@ def run_test_trials(args): new_env = os.environ.copy() new_env["MXNET_TEST_COUNT"] = str(args.num_trials) - + if args.seed is None: logging.info("No test seed provided, using random seed") else: @@ -47,17 +47,17 @@ def run_test_trials(args): verbosity = "--verbosity=" + str(args.verbosity) - code = subprocess.call(["nosetests", verbosity, test_path], + code = subprocess.call(["pytest", verbosity, test_path], env = new_env) - - logging.info("Nosetests terminated with exit code %d", code) + + logging.info("Test terminated with exit code %d", code) def find_test_path(test_file): """Searches for the test file and returns the path if found As a default, the currend working directory is the top of the search. If a directory was provided as part of the argument, the directory will be joined with cwd unless it was an absolute path, in which case, the - absolute path will be used instead. + absolute path will be used instead. """ test_file += ".py" test_path = os.path.split(test_file) @@ -66,7 +66,7 @@ def find_test_path(test_file): for (path, dirs, files) in os.walk(top): if test_path[1] in files: return os.path.join(path, test_path[1]) - raise FileNotFoundError("Could not find " + test_path[1] + + raise FileNotFoundError("Could not find " + test_path[1] + "in directory: " + top) class NameAction(argparse.Action): @@ -82,12 +82,12 @@ def __call__(self, parser, namespace, values, option_string=None): def parse_args(): parser = argparse.ArgumentParser(description="Check test for flakiness") - + parser.add_argument("test", action=NameAction, help="file name and and function name of test, " "provided in the format: . " "or /:") - + parser.add_argument("-n", "--num-trials", metavar="N", default=DEFAULT_NUM_TRIALS, type=int, help="number of test trials, passed as " @@ -95,11 +95,11 @@ def parse_args(): parser.add_argument("-s", "--seed", type=int, help="test seed, passed as MXNET_TEST_SEED, " - "defaults to random seed") + "defaults to random seed") parser.add_argument("-v", "--verbosity", default=DEFAULT_VERBOSITY, type=int, - help="logging level, passed to nosetests") + help="logging level, passed to pytest") args = parser.parse_args() diff --git a/tools/im2rec.py b/tools/im2rec.py old mode 100755 new mode 100644 diff --git a/tools/ipynb2md.py b/tools/ipynb2md.py old mode 100755 new mode 100644 diff --git a/tools/launch.py b/tools/launch.py old mode 100755 new mode 100644 diff --git a/tools/parse_log.py b/tools/parse_log.py old mode 100755 new mode 100644