Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

[DOC][v2.0] Part3: Evaluate Notebooks #20490

Merged
merged 29 commits into from
Aug 17, 2021
Merged
Show file tree
Hide file tree
Changes from 27 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1143,8 +1143,9 @@ build_python_docs() {
export PATH=/home/jenkins_slave/.local/bin:$PATH

pushd python
cp tutorials/getting-started/crash-course/prepare_dataset.py .
make clean
make html EVAL=0
make html EVAL=1

GZIP=-9 tar zcvf python-artifacts.tgz -C build/_build/html .
popd
Expand Down
6 changes: 3 additions & 3 deletions ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -1070,11 +1070,11 @@ def should_pack_website() {
// Call this function from Jenkins to generate just the Python API microsite artifacts.
def docs_python(lib_name) {
return ['Python Docs': {
node(NODE_LINUX_CPU) {
node(NODE_LINUX_GPU_G4) {
ws('workspace/docs') {
timeout(time: max_time, unit: 'MINUTES') {
utils.unpack_and_init(lib_name, mx_lib, false)
utils.docker_run('ubuntu_cpu', 'build_python_docs', false)
utils.unpack_and_init(lib_name, mx_lib_cython)
utils.docker_run('ubuntu_gpu_cu111', 'build_python_docs', true)
if (should_pack_website()) {
utils.pack_lib('python-artifacts', 'docs/_build/python-artifacts.tgz', false)
}
Expand Down
6 changes: 4 additions & 2 deletions ci/jenkins/Jenkinsfile_website_beta
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,15 @@ utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-m
utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_cpu_openblas('libmxnet'),
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Build Docs', [
// Only building a subset of the docs for previewing on staging
custom_steps.docs_jekyll(),
custom_steps.docs_python('libmxnet')
custom_steps.docs_c('libmxnet'),
custom_steps.docs_python('libmxnet_gpu')
])

utils.parallel_stage('Prepare', [
Expand Down
5 changes: 3 additions & 2 deletions ci/jenkins/Jenkinsfile_website_full
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,14 @@ utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-m
utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_cpu_openblas('libmxnet'),
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Build Docs', [
custom_steps.docs_jekyll(),
custom_steps.docs_c('libmxnet'),
custom_steps.docs_python('libmxnet'),
custom_steps.docs_python('libmxnet_gpu'),
])

utils.parallel_stage('Prepare', [
Expand Down
7 changes: 4 additions & 3 deletions ci/jenkins/Jenkinsfile_website_full_pr
Original file line number Diff line number Diff line change
Expand Up @@ -29,19 +29,20 @@ node('utility') {
utils = load('ci/Jenkinsfile_utils.groovy')
custom_steps = load('ci/jenkins/Jenkins_steps.groovy')
}
utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu')
utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_gpu_g4: 'mxnetlinux-gpu-g4', linux_gpu: 'mxnetlinux-gpu')

utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_cpu_openblas('libmxnet'),
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Build Docs', [
// Optimization would be to flag these not to stash if not previewing them
custom_steps.docs_jekyll(),
custom_steps.docs_c('libmxnet'),
custom_steps.docs_python('libmxnet'),
custom_steps.docs_python('libmxnet_gpu'),
])

// TODO: add a website preview function
Expand Down
7 changes: 4 additions & 3 deletions ci/jenkins/Jenkinsfile_website_nightly
Original file line number Diff line number Diff line change
Expand Up @@ -30,18 +30,19 @@ node('restricted-utility') {
custom_steps = load('ci/jenkins/Jenkins_steps.groovy')
}

utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-mxnetlinux-cpu')
utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-mxnetlinux-cpu', linux_gpu: 'restricted-mxnetlinux-gpu')

utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_cpu_openblas('libmxnet'),
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Build Docs', [
custom_steps.docs_jekyll(),
custom_steps.docs_c('libmxnet'),
custom_steps.docs_python('libmxnet'),
custom_steps.docs_python('libmxnet_gpu'),
])

utils.parallel_stage('Prepare', [
Expand Down
6 changes: 3 additions & 3 deletions ci/jenkins/Jenkinsfile_website_python_docs
Original file line number Diff line number Diff line change
Expand Up @@ -29,16 +29,16 @@ node('utility') {
utils = load('ci/Jenkinsfile_utils.groovy')
custom_steps = load('ci/jenkins/Jenkins_steps.groovy')
}
utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu')
utils.assign_node_labels(utility: 'utility', linux_cpu: 'mxnetlinux-cpu', linux_gpu: 'mxnetlinux-gpu', linux_gpu_g4: 'mxnetlinux-gpu-g4')

utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Python Docs', [
custom_steps.docs_python('libmxnet')
custom_steps.docs_python('libmxnet_gpu')
])

}
Expand Down
5 changes: 3 additions & 2 deletions ci/jenkins/Jenkinsfile_website_version_artifacts
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,14 @@ utils.assign_node_labels(utility: 'restricted-utility', linux_cpu: 'restricted-m
utils.main_wrapper(
core_logic: {
utils.parallel_stage('Build', [
custom_steps.compile_unix_cpu_openblas('libmxnet')
custom_steps.compile_unix_cpu_openblas('libmxnet'),
custom_steps.compile_unix_full_gpu('libmxnet_gpu')
])

utils.parallel_stage('Build Docs', [
custom_steps.docs_jekyll(),
custom_steps.docs_c('libmxnet'),
custom_steps.docs_python('libmxnet'),
custom_steps.docs_python('libmxnet_gpu'),
])

utils.parallel_stage('Build Full Website', [
Expand Down
19 changes: 15 additions & 4 deletions docs/python_docs/python/scripts/md2ipynb.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,28 @@ def md2ipynb():
(src_fn, input_fn, output_fn) = sys.argv

# timeout for each notebook, in sec
timeout = 20 * 60
timeout = 60 * 60
# if enable evaluation
do_eval = int(os.environ.get('EVAL', True))

# Skip these notebooks as some APIs will no longer be used
skip_list = ["pytorch.md", "mnist.md", "custom-loss.md", "fit_api_tutorial.md", \
"01-ndarray-intro.md", "02-ndarray-operations.md", "03-ndarray-contexts.md", \
"gotchas_numpy_in_mxnet.md", "csr.md", "row_sparse.md", "fine_tuning_gluon.md", \
"inference_on_onnx_model.md", "amp.md", "profiler.md"]
szha marked this conversation as resolved.
Show resolved Hide resolved

require_gpu = []
# the files will be ignored for execution
ignore_execution = skip_list + require_gpu

reader = notedown.MarkdownReader(match='strict')
with open(input_fn, 'r', encoding="utf8") as f:
notebook = reader.read(f)
if do_eval:
tic = time.time()
notedown.run(notebook, timeout)
print('%s: Evaluated %s in %f sec'%(src_fn, input_fn, time.time()-tic))
if not any([i in input_fn for i in ignore_execution]):
tic = time.time()
notedown.run(notebook, timeout)
print('%s: Evaluated %s in %f sec'%(src_fn, input_fn, time.time()-tic))
# need to add language info to for syntax highlight
notebook['metadata'].update({'language_info':{'name':'python'}})
with open(output_fn, 'w', encoding='utf-8') as f:
Expand Down
48 changes: 27 additions & 21 deletions docs/python_docs/python/tutorials/deploy/export/onnx.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,41 +28,47 @@ In this tutorial, we will learn how to use MXNet to ONNX exporter on pre-trained
## Prerequisites

To run the tutorial you will need to have installed the following python modules:
- [MXNet >= 1.3.0](https://mxnet.apache.org/get_started)
- [onnx]( https://github.com/onnx/onnx#user-content-installation) v1.2.1 (follow the install guide)
- [MXNet >= 2.0.0](https://mxnet.apache.org/get_started)
- [onnx]( https://github.com/onnx/onnx#user-content-installation) v1.7 & v1.8 (follow the install guide)

*Note:* MXNet-ONNX importer and exporter follows version 7 of ONNX operator set which comes with ONNX v1.2.1.
*Note:* MXNet-ONNX importer and exporter follows version 12 & 13 of ONNX operator set which comes with ONNX v1.7 & v1.8.


```{.python .input}
import mxnet as mx
import numpy as np
from mxnet.contrib import onnx as onnx_mxnet
from mxnet import initializer as init, np, onnx as mxnet_onnx
from mxnet.gluon import nn
import logging
logging.basicConfig(level=logging.INFO)
```

## Downloading a model from the MXNet model zoo
## Create a model from the MXNet Gluon

We download the pre-trained ResNet-18 [ImageNet](http://www.image-net.org/) model from the [MXNet Model Zoo](../../../api/gluon/model_zoo/index.rst).
We will also download synset file to match labels.
Let's build a concise model with [MXNet gluon](../../../api/gluon/index.rst) package. The model is multilayer perceptrons with two fully-connected layers. The first one is our hidden layer, which contains 256 hidden units and applies ReLU activation function. The second is our output layer.

```{.python .input}
# Download pre-trained resnet model - json and params by running following code.
path='http://data.mxnet.io/models/imagenet/'
[mx.test_utils.download(path+'resnet/18-layers/resnet-18-0000.params'),
mx.test_utils.download(path+'resnet/18-layers/resnet-18-symbol.json'),
mx.test_utils.download(path+'synset.txt')]
net = nn.HybridSequential()
net.add(nn.Dense(256, activation='relu'), nn.Dense(10))
```

Now, we have downloaded ResNet-18 symbol, params and synset file on the disk.
Then we initialize the model and export it into symbol file and parameter file.

```{.python .input}
net.initialize(init.Normal(sigma=0.01))
net.hybridize()
input = np.ones(shape=(50,), dtype=np.float32)
output = net(input)
net.export("mlp")
```

Now, we have exported the model symbol, params file on the disk.

## MXNet to ONNX exporter API

Let us describe the MXNet's `export_model` API.

```{.python .input}
help(onnx_mxnet.export_model)
help(mxnet_onnx.export_model)
```

Output:
Expand Down Expand Up @@ -110,22 +116,22 @@ Since we have downloaded pre-trained model files, we will use the `export_model`
We will use the downloaded pre-trained model files (sym, params) and define input variables.

```{.python .input}
# Downloaded input symbol and params files
sym = './resnet-18-symbol.json'
params = './resnet-18-0000.params'
# The input symbol and params files
sym = './mlp-symbol.json'
params = './mlp-0000.params'

# Standard Imagenet input - 3 channels, 224*224
input_shape = (1,3,224,224)
input_shape = (50,)

# Path of the output file
onnx_file = './mxnet_exported_resnet50.onnx'
onnx_file = './mxnet_exported_mlp.onnx'
```

We have defined the input parameters required for the `export_model` API. Now, we are ready to covert the MXNet model into ONNX format.

```{.python .input}
# Invoke export model API. It returns path of the converted onnx model
converted_model_path = onnx_mxnet.export_model(sym, params, [input_shape], np.float32, onnx_file)
converted_model_path = mxnet_onnx.export_model(sym, params, [input_shape], [np.float32], onnx_file)
```

This API returns path of the converted model which you can later use to import the model into other frameworks.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ from mxnet import gluon
import mxnet as mx

# set context
ctx = mx.gpu()
gpus = mx.test_utils.list_gpus()
ctx = mx.gpu() if gpus else mx.cpu()

# load pre-trained model
net = gluon.model_zoo.vision.resnet50_v1(pretrained=True, ctx=ctx)
Expand All @@ -94,17 +95,17 @@ img_path = gluon.utils.download('https://github.com/dmlc/web-data/blob/master/mx
img = mx.image.imread(img_path)
img = mx.image.imresize(img, 224, 224) # resize
img = mx.image.color_normalize(img.astype(dtype='float32')/255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225])) # normalize
mean=mx.np.array([0.485, 0.456, 0.406]),
std=mx.np.array([0.229, 0.224, 0.225])) # normalize
img = img.transpose((2, 0, 1)) # channel first
img = img.expand_dims(axis=0) # batchify
img = img.as_in_context(ctx)
img = mx.np.expand_dims(img, axis=0) # batchify
img = img.as_in_ctx(ctx)

prob = net(img).softmax() # predict and normalize output
idx = prob.topk(k=5)[0] # get top 5 result
prob = mx.npx.softmax(net(img)) # predict and normalize output
idx = mx.npx.topk(prob, k=5)[0] # get top 5 result
for i in idx:
i = int(i.asscalar())
print('With prob = %.5f, it contains %s' % (prob[0,i].asscalar(), labels[i]))
i = int(i.item())
print('With prob = %.5f, it contains %s' % (prob[0,i].item(), labels[i]))
```

After running the above script, you should get the following output showing the five classes that the image most relates to with probability:
Expand Down
7 changes: 4 additions & 3 deletions docs/python_docs/python/tutorials/extend/customop.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,12 @@ import numpy as np
import mxnet as mx
from mxnet import gluon, autograd
import os
mx.npx.reset_np()
```

## Parameter-less operators

This operator implements the standard sigmoid activation function. This is only for illustration purposes, in real life you would use the built-in operator `mx.nd.relu`.
This operator implements the standard sigmoid activation function. This is only for illustration purposes, in real life you would use the built-in operator `mx.npx.relu`.

### Forward & backward implementation

Expand Down Expand Up @@ -218,7 +219,7 @@ print(y)
## Using custom operators with fork
In Linux systems, the default method in multiprocessing to create process is by using fork. If there are unfinished async custom operations when forking, the program will be blocked because of python GIL. Always use sync calls like `wait_to_read` or `waitall` before calling fork.

```{.python .input}
```{.python}
x = mx.nd.array([0, 1, 2, 3])
y = mx.nd.Custom(x, op_type='sigmoid')
# unfinished async sigmoid operation will cause blocking
Expand All @@ -227,7 +228,7 @@ os.fork()

Correctly handling this will make mxnet depend upon libpython, so the workaround now is to ensure that all custom operations are executed before forking process.

```{.python .input}
```{.python}
x = mx.nd.array([0, 1, 2, 3])
y = mx.nd.Custom(x, op_type='sigmoid')
# force execution by reading y
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,19 @@ class Net(nn.Block):

```{.python .input}
class MLP(nn.Block):
def __init__(self): super().__init__() self.dense1 = nn.Dense(5,activation='relu') self.dense2 = nn.Dense(25,activation='relu') self.dense3 = nn.Dense(2)
def forward(self, x): layer1 = self.dense1(x) layer2 = self.dense2(layer1) layer3 = self.dense3(layer2) return layer3 net = MLP()
def __init__(self):
super().__init__()
self.dense1 = nn.Dense(5,activation='relu')
self.dense2 = nn.Dense(25,activation='relu')
self.dense3 = nn.Dense(2)

def forward(self, x):
layer1 = self.dense1(x)
layer2 = self.dense2(layer1)
layer3 = self.dense3(layer2)
return layer3

net = MLP()
net
```

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ print(curr_weight)

```{.python .input}
batch_size = len(nn_input)
trainer.step(batch_size)
trainer.step(batch_size, ignore_stale_grad=True)
print(net.weight.data())
```

Expand Down Expand Up @@ -364,7 +364,7 @@ p = precision()
And finally, call the `update` method to return the results of `precision` for your data

```{.python .input}
p.update(np.array(y_true), np.array(y_pred))
p.update(np.array(labels), np.array(preds))
```

## Next steps
Expand Down
Loading