diff --git a/docs/tutorials/gluon/datasets.md b/docs/tutorials/gluon/datasets.md index c029124af8b6..6f645bb4384f 100644 --- a/docs/tutorials/gluon/datasets.md +++ b/docs/tutorials/gluon/datasets.md @@ -157,7 +157,7 @@ def construct_net(): return net # construct and initialize network. -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() net = construct_net() net.hybridize() diff --git a/docs/tutorials/gluon/info_gan.md b/docs/tutorials/gluon/info_gan.md index 93fd6cb5e7fd..91adf6c75108 100644 --- a/docs/tutorials/gluon/info_gan.md +++ b/docs/tutorials/gluon/info_gan.md @@ -51,7 +51,7 @@ batch_size = 64 z_dim = 100 n_continuous = 2 n_categories = 10 -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() ``` Some functions to load and normalize images. diff --git a/docs/tutorials/gluon/learning_rate_finder.md b/docs/tutorials/gluon/learning_rate_finder.md index 30c66e302766..b580bee7c5fb 100644 --- a/docs/tutorials/gluon/learning_rate_finder.md +++ b/docs/tutorials/gluon/learning_rate_finder.md @@ -231,7 +231,7 @@ Using a Pre-activation ResNet-18 from the Gluon model zoo, we instantiate our Le ```python -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() net = mx.gluon.model_zoo.vision.resnet18_v2(classes=10) learner = Learner(net=net, data_loader=data_loader, ctx=ctx) lr_finder = LRFinder(learner) diff --git a/docs/tutorials/gluon/learning_rate_schedules.md b/docs/tutorials/gluon/learning_rate_schedules.md index 46c79ebc249b..119677373577 100644 --- a/docs/tutorials/gluon/learning_rate_schedules.md +++ b/docs/tutorials/gluon/learning_rate_schedules.md @@ -140,7 +140,7 @@ As discussed above, the schedule should return a learning rate given an (1-based ```python # Use GPU if one exists, else use CPU -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() # MNIST images are 28x28. Total pixels in input layer is 28x28 = 784 num_inputs = 784 diff --git a/docs/tutorials/gluon/save_load_params.md b/docs/tutorials/gluon/save_load_params.md index 26d6b8924b3c..c82ec5ac35a7 100644 --- a/docs/tutorials/gluon/save_load_params.md +++ b/docs/tutorials/gluon/save_load_params.md @@ -50,7 +50,7 @@ Let's define a helper function to build a LeNet model and another helper to trai ```python # Use GPU if one exists, else use CPU -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() # MNIST images are 28x28. Total pixels in input layer is 28x28 = 784 num_inputs = 784 diff --git a/docs/tutorials/nlp/cnn.md b/docs/tutorials/nlp/cnn.md index e671de3a1f57..105bf03f9e2b 100644 --- a/docs/tutorials/nlp/cnn.md +++ b/docs/tutorials/nlp/cnn.md @@ -300,7 +300,7 @@ import time CNNModel = namedtuple("CNNModel", ['cnn_exec', 'symbol', 'data', 'label', 'param_blocks']) # Define what device to train/test on, use GPU if available -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() arg_names = cnn.list_arguments() diff --git a/docs/tutorials/python/kvstore.md b/docs/tutorials/python/kvstore.md index 42debab9b83e..4807475faa12 100644 --- a/docs/tutorials/python/kvstore.md +++ b/docs/tutorials/python/kvstore.md @@ -57,9 +57,9 @@ values and then push the aggregated value: ```python # The numbers used below assume 4 GPUs -gpus = mx.test_utils.list_gpus() -if len(gpus) > 1: - contexts = [mx.gpu(i) for i in gpus] +gpus = mx.context.num_gpus() +if gpus > 0: + contexts = [mx.gpu(i) for i in range(gpus)] else: contexts = [mx.cpu(i) for i in range(4)] b = [mx.nd.ones(shape, ctx) for ctx in contexts] @@ -173,4 +173,4 @@ When the distributed version is ready, we will update this section. ## Next Steps * [MXNet tutorials index](http://mxnet.io/tutorials/index.html) - \ No newline at end of file + diff --git a/docs/tutorials/python/mnist.md b/docs/tutorials/python/mnist.md index 9d641b36c202..ac965617e347 100644 --- a/docs/tutorials/python/mnist.md +++ b/docs/tutorials/python/mnist.md @@ -50,7 +50,7 @@ mnist = mx.test_utils.get_mnist() mx.random.seed(42) # Set the compute context, GPU is available otherwise CPU -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() ``` After running the above source code, the entire MNIST dataset should be fully loaded into memory. Note that for large datasets it is not feasible to pre-load the entire dataset first like we did here. What is needed is a mechanism by which we can quickly and efficiently stream data directly from the source. MXNet Data iterators come to the rescue here by providing exactly that. Data iterator is the mechanism by which we feed input data into an MXNet training algorithm and they are very simple to initialize and use and are optimized for speed. During training, we typically process training samples in small batches and over the entire training lifetime will end up processing each training example multiple times. In this tutorial, we'll configure the data iterator to feed examples in batches of 100. Keep in mind that each example is a 28x28 grayscale image and the corresponding label. diff --git a/docs/tutorials/python/profiler.md b/docs/tutorials/python/profiler.md index d3e3355b8f4a..808030949aee 100644 --- a/docs/tutorials/python/profiler.md +++ b/docs/tutorials/python/profiler.md @@ -111,7 +111,7 @@ Let's define a method that will run one training iteration given data and label. ```python # Use GPU if available -if len(mx.test_utils.list_gpus())!=0: +if mx.context.num_gpus(): ctx=mx.gpu() else: ctx=mx.cpu() diff --git a/docs/tutorials/unsupervised_learning/gan.md b/docs/tutorials/unsupervised_learning/gan.md index ca0fb15e01c5..0416593b6c1e 100644 --- a/docs/tutorials/unsupervised_learning/gan.md +++ b/docs/tutorials/unsupervised_learning/gan.md @@ -240,7 +240,7 @@ sigma = 0.02 lr = 0.0002 beta1 = 0.5 # Define the compute context, use GPU if available -ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu() #=============Generator Module============= generator = mx.mod.Module(symbol=generatorSymbol, data_names=('rand',), label_names=None, context=ctx) diff --git a/example/adversary/adversary_generation.ipynb b/example/adversary/adversary_generation.ipynb index 0b45366504e3..76c5f4cff569 100644 --- a/example/adversary/adversary_generation.ipynb +++ b/example/adversary/adversary_generation.ipynb @@ -45,7 +45,7 @@ }, "outputs": [], "source": [ - "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()\n", + "ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()\n", "batch_size = 128" ] }, diff --git a/example/autoencoder/convolutional_autoencoder.ipynb b/example/autoencoder/convolutional_autoencoder.ipynb index c42ad900ec98..a49eba0fcc10 100644 --- a/example/autoencoder/convolutional_autoencoder.ipynb +++ b/example/autoencoder/convolutional_autoencoder.ipynb @@ -50,7 +50,7 @@ "outputs": [], "source": [ "batch_size = 512\n", - "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()" + "ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()" ] }, { diff --git a/example/bi-lstm-sort/bi-lstm-sort.ipynb b/example/bi-lstm-sort/bi-lstm-sort.ipynb index 085117674b58..5d18be35e079 100644 --- a/example/bi-lstm-sort/bi-lstm-sort.ipynb +++ b/example/bi-lstm-sort/bi-lstm-sort.ipynb @@ -39,7 +39,7 @@ "seq_len = 5\n", "split = 0.8\n", "batch_size = 512\n", - "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()" + "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()" ] }, { diff --git a/example/distributed_training-horovod/gluon_mnist.py b/example/distributed_training-horovod/gluon_mnist.py index 753758b234e7..7b39f5776a42 100644 --- a/example/distributed_training-horovod/gluon_mnist.py +++ b/example/distributed_training-horovod/gluon_mnist.py @@ -45,7 +45,7 @@ if not args.no_cuda: # Disable CUDA if there are no GPUs. - if not mx.test_utils.list_gpus(): + if mx.context.num_gpus() == 0: args.no_cuda = True logging.basicConfig(level=logging.INFO) diff --git a/example/distributed_training-horovod/module_mnist.py b/example/distributed_training-horovod/module_mnist.py index 86fbb0fc27d9..4fcb02a46996 100644 --- a/example/distributed_training-horovod/module_mnist.py +++ b/example/distributed_training-horovod/module_mnist.py @@ -42,7 +42,7 @@ if not args.no_cuda: # Disable CUDA if there are no GPUs. - if not mx.test_utils.list_gpus(): + if mx.context.num_gpus() == 0: args.no_cuda = True logging.basicConfig(level=logging.INFO) diff --git a/example/image-classification/test_score.py b/example/image-classification/test_score.py index 0789c9270fff..e41d4e62ea8e 100644 --- a/example/image-classification/test_score.py +++ b/example/image-classification/test_score.py @@ -51,11 +51,10 @@ def test_imagenet1k_inception_bn(**kwargs): assert r > g and r < g + .1 if __name__ == '__main__': - gpus = mx.test_utils.list_gpus() - assert len(gpus) > 0 - batch_size = 16 * len(gpus) - gpus = ','.join([str(i) for i in gpus]) - + num_gpus = mx.context.num_gpus() + assert num_gpus > 0 + batch_size = 16 * num_gpus + gpus = ','.join(map(str, range(num_gpus))) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs) diff --git a/example/multi-task/multi-task-learning.ipynb b/example/multi-task/multi-task-learning.ipynb index 6e03e2b61f8c..048d6d9862b8 100644 --- a/example/multi-task/multi-task-learning.ipynb +++ b/example/multi-task/multi-task-learning.ipynb @@ -58,7 +58,7 @@ "source": [ "batch_size = 128\n", "epochs = 5\n", - "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()\n", + "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()\n", "lr = 0.01" ] }, diff --git a/example/recommenders/demo2-dssm.ipynb b/example/recommenders/demo2-dssm.ipynb index 49450c56ebc6..d0cd3ed65771 100644 --- a/example/recommenders/demo2-dssm.ipynb +++ b/example/recommenders/demo2-dssm.ipynb @@ -41,7 +41,7 @@ "hidden_units = 128\n", "epsilon_proj = 0.25\n", "\n", - "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()" + "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()" ] }, { diff --git a/example/svm_mnist/svm_mnist.py b/example/svm_mnist/svm_mnist.py index 3fc0362f6b01..e166cb6ac707 100644 --- a/example/svm_mnist/svm_mnist.py +++ b/example/svm_mnist/svm_mnist.py @@ -82,7 +82,7 @@ # Article's suggestion on batch size batch_size = 200 -ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu() +ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu() results = {} for output in [mlp_svm_l2, mlp_svm_l1, mlp_softmax]: @@ -121,4 +121,4 @@ #svm_l2 97.85 %s #svm_l1 98.15 %s -#softmax 97.69 %s \ No newline at end of file +#softmax 97.69 %s diff --git a/python/mxnet/gluon/contrib/nn/basic_layers.py b/python/mxnet/gluon/contrib/nn/basic_layers.py index 6cbf988fc94a..706e5e4dfb12 100644 --- a/python/mxnet/gluon/contrib/nn/basic_layers.py +++ b/python/mxnet/gluon/contrib/nn/basic_layers.py @@ -24,7 +24,7 @@ 'PixelShuffle3D'] import warnings -from .... import nd, test_utils +from .... import nd, context from ...block import HybridBlock, Block from ...nn import Sequential, HybridSequential, BatchNorm @@ -233,7 +233,7 @@ def _get_num_devices(self): warnings.warn("Caution using SyncBatchNorm: " "if not using all the GPUs, please mannually set num_devices", UserWarning) - num_devices = len(test_utils.list_gpus()) + num_devices = context.num_gpus() num_devices = num_devices if num_devices > 0 else 1 return num_devices diff --git a/tests/python/gpu/test_nccl.py b/tests/python/gpu/test_nccl.py index 40ef6fdfd0af..275dae009a21 100644 --- a/tests/python/gpu/test_nccl.py +++ b/tests/python/gpu/test_nccl.py @@ -22,7 +22,7 @@ shapes = [(10), (100), (1000), (10000), (100000), (2,2), (2,3,4,5,6,7,8)] keys = [1,2,3,4,5,6,7] -num_gpus = len(mx.test_utils.list_gpus()) +num_gpus = mx.context.num_gpus() if num_gpus > 8 : diff --git a/tests/python/profiling/test_nvtx.py b/tests/python/profiling/test_nvtx.py index 35b209ebb6eb..507b438e300d 100644 --- a/tests/python/profiling/test_nvtx.py +++ b/tests/python/profiling/test_nvtx.py @@ -25,7 +25,7 @@ def test_nvtx_ranges_present_in_profile(): - if not mx.test_utils.list_gpus(): + if not mx.context.num_gpus(): unittest.skip('Test only applicable to machines with GPUs') # Build a system independent wrapper to execute simple_forward with nvprof diff --git a/tools/caffe_converter/test_converter.py b/tools/caffe_converter/test_converter.py index 3c325d6bdd63..a4f744556fed 100644 --- a/tools/caffe_converter/test_converter.py +++ b/tools/caffe_converter/test_converter.py @@ -90,9 +90,9 @@ def main(): gpus = [-1] default_batch_size = 32 else: - gpus = mx.test_utils.list_gpus() - assert gpus, 'At least one GPU is needed to run test_converter in GPU mode' - default_batch_size = 32 * len(gpus) + num_gpus = mx.context.num_gpus() + assert num_gpus, 'At least one GPU is needed to run test_converter in GPU mode' + default_batch_size = 32 * num_gpus models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']