From cd8bed2893ecfa1388ab4e8c31debdf9d7289628 Mon Sep 17 00:00:00 2001 From: Naoto Mizuno Date: Thu, 15 Feb 2024 13:46:59 +0900 Subject: [PATCH] Remove MXNet examples --- .github/workflows/mxnet.yml | 45 ------------ README.md | 3 - mxnet/gluon_simple.py | 134 ----------------------------------- mxnet/mxnet_integration.py | 136 ------------------------------------ mxnet/mxnet_simple.py | 125 --------------------------------- mxnet/requirements.txt | 3 - 6 files changed, 446 deletions(-) delete mode 100644 .github/workflows/mxnet.yml delete mode 100644 mxnet/gluon_simple.py delete mode 100644 mxnet/mxnet_integration.py delete mode 100644 mxnet/mxnet_simple.py delete mode 100644 mxnet/requirements.txt diff --git a/.github/workflows/mxnet.yml b/.github/workflows/mxnet.yml deleted file mode 100644 index ba4c2f82..00000000 --- a/.github/workflows/mxnet.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: mxnet - -on: - schedule: - - cron: '0 15 * * *' - pull_request: - paths: - - 'mxnet/**' - - '.github/workflows/mxnet.yml' - -jobs: - examples: - if: (github.event_name == 'schedule' && github.repository == 'optuna/optuna-examples') || (github.event_name != 'schedule') - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] - - steps: - - uses: actions/checkout@v3 - - name: setup-python${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - name: Install (apt) - run: | - sudo apt-get update - sudo apt-get -y install libopenblas-dev - - name: Install (Python) - run: | - python -m pip install --upgrade pip - pip install --progress-bar off -U setuptools - pip install git+https://github.com/optuna/optuna.git - python -c 'import optuna' - pip install git+https://github.com/optuna/optuna-integration.git - python -c 'import optuna_integration' - - pip install -r mxnet/requirements.txt - - name: Run examples - run: | - python mxnet/gluon_simple.py - python mxnet/mxnet_simple.py - python mxnet/mxnet_integration.py - env: - OMP_NUM_THREADS: 1 diff --git a/README.md b/README.md index 7cda1938..1e097f45 100644 --- a/README.md +++ b/README.md @@ -41,11 +41,9 @@ The examples below provide codeblocks similar to the example above for various d * [FastAI V1](./fastai/fastaiv1_simple.py) * [FastAI V2](./fastai/fastaiv2_simple.py) * [Haiku](./haiku/haiku_simple.py) -* [Gluon](./mxnet/gluon_simple.py) * [Keras](./keras/keras_simple.py) * [LightGBM](./lightgbm/lightgbm_simple.py) * [LightGBM Tuner](./lightgbm/lightgbm_tuner_simple.py) -* [MXNet](./mxnet/mxnet_simple.py) * [PyTorch](./pytorch/pytorch_simple.py) * [PyTorch Ignite](./pytorch/pytorch_ignite_simple.py) * [PyTorch Lightning](./pytorch/pytorch_lightning_simple.py) @@ -87,7 +85,6 @@ In addition, integration modules are available for the following libraries, prov * [Pruning with FastAI V2 integration module](./fastai/fastaiv2_simple.py) * [Pruning with Keras integration module](./keras/keras_integration.py) * [Pruning with LightGBM integration module](./lightgbm/lightgbm_integration.py) -* [Pruning with MXNet integration module](./mxnet/mxnet_integration.py) * [Pruning with PyTorch integration module](./pytorch/pytorch_simple.py) * [Pruning with PyTorch Ignite integration module](./pytorch/pytorch_ignite_simple.py) * [Pruning with PyTorch Lightning integration module](./pytorch/pytorch_lightning_simple.py) diff --git a/mxnet/gluon_simple.py b/mxnet/gluon_simple.py deleted file mode 100644 index 18b337cd..00000000 --- a/mxnet/gluon_simple.py +++ /dev/null @@ -1,134 +0,0 @@ -import urllib - -import numpy as np -import optuna - -import mxnet as mx -from mxnet import autograd -from mxnet import gluon -from mxnet.gluon import nn - - -# TODO(crcrpar): Remove the below three lines once everything is ok. -# Register a global custom opener to avoid HTTP Error 403: Forbidden when downloading MNIST. -opener = urllib.request.build_opener() -opener.addheaders = [("User-agent", "Mozilla/5.0")] -urllib.request.install_opener(opener) - - -CUDA = False -EPOCHS = 10 -BATCHSIZE = 128 -LOG_INTERVAL = 100 - - -def define_model(trial): - net = nn.Sequential() - n_layers = trial.suggest_int("n_layers", 1, 3) - for i in range(n_layers): - nodes = trial.suggest_int("n_units_l{}".format(i), 4, 128) - net.add(nn.Dense(nodes, activation="relu")) - net.add(nn.Dense(10)) - return net - - -def transform(data, label): - data = data.reshape((-1,)).astype(np.float32) / 255 - return data, label - - -def validate(ctx, val_data, net): - metric = mx.metric.Accuracy() - for data, label in val_data: - data = data.as_in_context(ctx) - label = label.as_in_context(ctx) - output = net(data) - metric.update([label], [output]) - - return metric.get() - - -def objective(trial): - if CUDA: - ctx = mx.gpu(0) - else: - ctx = mx.cpu() - - train_data = gluon.data.DataLoader( - gluon.data.vision.MNIST("./data", train=True).transform(transform), - shuffle=True, - batch_size=BATCHSIZE, - last_batch="discard", - ) - - val_data = gluon.data.DataLoader( - gluon.data.vision.MNIST("./data", train=False).transform(transform), - batch_size=BATCHSIZE, - shuffle=False, - ) - - net = define_model(trial) - - # Collect all parameters from net and its children, then initialize them. - net.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx) - optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "RMSprop", "SGD"]) - # Trainer is for updating parameters with gradient. - lr = trial.suggest_float("lr", 1e-5, 1e-1, log=True) - trainer = gluon.Trainer(net.collect_params(), optimizer_name, {"learning_rate": lr}) - metric = mx.metric.Accuracy() - loss = gluon.loss.SoftmaxCrossEntropyLoss() - val_acc = 0 - - for epoch in range(EPOCHS): - # Reset data iterator and metric at beginning of epoch. - metric.reset() - for i, (data, label) in enumerate(train_data): - # Copy data to ctx if necessary. - data = data.as_in_context(ctx) - label = label.as_in_context(ctx) - # Start recording computation graph with record() section. - # Recorded graphs can then be differentiated with backward. - with autograd.record(): - output = net(data) - L = loss(output, label) - L.backward() - # Take a gradient step with batch_size equal to data.shape[0]. - trainer.step(data.shape[0]) - # Update metric at last. - metric.update([label], [output]) - - if i % LOG_INTERVAL == 0 and i > 0: - name, acc = metric.get() - print(f"[Epoch {epoch} Batch {i}] Training: {name}={acc}") - - name, acc = metric.get() - print(f"[Epoch {epoch}] Training: {name}={acc}") - - name, val_acc = validate(ctx, val_data, net) - print(f"[Epoch {epoch}] Validation: {name}={val_acc}") - - trial.report(val_acc, epoch) - - # Handle pruning based on the intermediate value. - if trial.should_prune(): - raise optuna.exceptions.TrialPruned() - - net.save_parameters("mnist.params") - - return val_acc - - -if __name__ == "__main__": - study = optuna.create_study(direction="maximize") - study.optimize(objective, n_trials=100, timeout=600) - - print("Number of finished trials: ", len(study.trials)) - - print("Best trial:") - trial = study.best_trial - - print(" Value: ", trial.value) - - print(" Params: ") - for key, value in trial.params.items(): - print(" {}: {}".format(key, value)) diff --git a/mxnet/mxnet_integration.py b/mxnet/mxnet_integration.py deleted file mode 100644 index 9b95a4c0..00000000 --- a/mxnet/mxnet_integration.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Optuna example that demonstrates a pruner for MXNet. - -In this example, we optimize the validation accuracy of hand-written digit recognition using -MXNet and MNIST, where the architecture of the neural network and the learning rate of optimizer -is optimized. Throughout the training of neural networks, a pruner observes intermediate -results and stops unpromising trials. - -You can run this example as follows: - $ python mxnet_integration.py - -""" - -import logging -import urllib - -import numpy as np -import optuna -from optuna.integration import MXNetPruningCallback -from optuna.trial import TrialState - -import mxnet as mx - - -# TODO(crcrpar): Remove the below three lines once everything is ok. -# Register a global custom opener to avoid HTTP Error 403: Forbidden when downloading MNIST. -opener = urllib.request.build_opener() -opener.addheaders = [("User-agent", "Mozilla/5.0")] -urllib.request.install_opener(opener) - - -N_TRAIN_EXAMPLES = 3000 -N_TEST_EXAMPLES = 1000 -BATCHSIZE = 128 -EPOCH = 10 - -# Set log level for MXNet. -logger = logging.getLogger() -logger.setLevel(logging.INFO) - - -def create_model(trial): - # We optimize the number of layers and hidden units in each layer. - n_layers = trial.suggest_int("n_layers", 1, 3) - - data = mx.symbol.Variable("data") - data = mx.sym.flatten(data=data) - for i in range(n_layers): - num_hidden = trial.suggest_int("n_units_1{}".format(i), 4, 128, log=True) - data = mx.symbol.FullyConnected(data=data, num_hidden=num_hidden) - data = mx.symbol.Activation(data=data, act_type="relu") - - data = mx.symbol.FullyConnected(data=data, num_hidden=10) - mlp = mx.symbol.SoftmaxOutput(data=data, name="softmax") - - return mlp - - -def create_optimizer(trial): - # We optimize over the type of optimizer to use (Adam or SGD with momentum). - # We also optimize over the learning rate and weight decay of the selected optimizer. - weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) - optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "MomentumSGD"]) - - if optimizer_name == "Adam": - adam_lr = trial.suggest_float("adam_lr", 1e-5, 1e-1, log=True) - optimizer = mx.optimizer.Adam(learning_rate=adam_lr, wd=weight_decay) - else: - momentum_sgd_lr = trial.suggest_float("momentum_sgd_lr", 1e-5, 1e-1, log=True) - optimizer = mx.optimizer.SGD(momentum=momentum_sgd_lr, wd=weight_decay) - - return optimizer - - -def objective(trial): - # Generate trial model and trial optimizer. - mlp = create_model(trial) - optimizer = create_optimizer(trial) - - # Load the test and train MNIST dataset. - # Use test data as a validation set. - mnist = mx.test_utils.get_mnist() - rng = np.random.RandomState(0) - permute_train = rng.permutation(len(mnist["train_data"])) - train = mx.io.NDArrayIter( - data=mnist["train_data"][permute_train][:N_TRAIN_EXAMPLES], - label=mnist["train_label"][permute_train][:N_TRAIN_EXAMPLES], - batch_size=BATCHSIZE, - shuffle=True, - ) - permute_valid = rng.permutation(len(mnist["test_data"])) - val = mx.io.NDArrayIter( - data=mnist["test_data"][permute_valid][:N_TEST_EXAMPLES], - label=mnist["test_label"][permute_valid][:N_TEST_EXAMPLES], - batch_size=BATCHSIZE, - ) - - # Create our MXNet trainable model and fit it on MNIST data. - model = mx.mod.Module(symbol=mlp) - model.fit( - train_data=train, - eval_data=val, - eval_end_callback=MXNetPruningCallback(trial, eval_metric="accuracy"), - optimizer=optimizer, - optimizer_params={"rescale_grad": 1.0 / BATCHSIZE}, - num_epoch=EPOCH, - ) - - # Compute the accuracy on the entire validation set. - valid = mx.io.NDArrayIter( - data=mnist["test_data"], label=mnist["test_label"], batch_size=BATCHSIZE - ) - accuracy = model.score(eval_data=valid, eval_metric="acc")[0] - - return accuracy[1] - - -if __name__ == "__main__": - study = optuna.create_study(direction="maximize", pruner=optuna.pruners.MedianPruner()) - study.optimize(objective, n_trials=100, timeout=600) - pruned_trials = study.get_trials(deepcopy=False, states=[TrialState.PRUNED]) - complete_trials = study.get_trials(deepcopy=False, states=[TrialState.COMPLETE]) - - print("Study statistics: ") - print(" Number of finished trials: ", len(study.trials)) - print(" Number of pruned trials: ", len(pruned_trials)) - print(" Number of complete trials: ", len(complete_trials)) - - print("Best trial:") - trial = study.best_trial - - print(" Value: ", trial.value) - - print(" Params: ") - for key, value in trial.params.items(): - print(" {}: {}".format(key, value)) diff --git a/mxnet/mxnet_simple.py b/mxnet/mxnet_simple.py deleted file mode 100644 index 47ba52d6..00000000 --- a/mxnet/mxnet_simple.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -Optuna example that optimizes multi-layer perceptrons using MXNet. - -In this example, we optimize the validation accuracy of hand-written digit recognition using -MXNet and MNIST. We optimize the neural network architecture as well as the optimizer -configuration. As it is too time consuming to use the whole MNIST dataset, we here use a small -subset of it. - -""" - -import logging -import urllib - -import numpy as np -import optuna - -import mxnet as mx - - -# TODO(crcrpar): Remove the below three lines once everything is ok. -# Register a global custom opener to avoid HTTP Error 403: Forbidden when downloading MNIST. -opener = urllib.request.build_opener() -opener.addheaders = [("User-agent", "Mozilla/5.0")] -urllib.request.install_opener(opener) - - -N_TRAIN_EXAMPLES = 3000 -N_VALID_EXAMPLES = 1000 -BATCHSIZE = 128 -EPOCH = 10 - -# Set log level for MXNet. -logger = logging.getLogger() -logger.setLevel(logging.INFO) - - -def create_model(trial): - # We optimize the number of layers and hidden units in each layer. - n_layers = trial.suggest_int("n_layers", 1, 3) - - data = mx.symbol.Variable("data") - data = mx.sym.flatten(data=data) - for i in range(n_layers): - num_hidden = trial.suggest_int("n_units_l{}".format(i), 4, 128, log=True) - data = mx.symbol.FullyConnected(data=data, num_hidden=num_hidden) - data = mx.symbol.Activation(data=data, act_type="relu") - - data = mx.symbol.FullyConnected(data=data, num_hidden=10) - mlp = mx.symbol.SoftmaxOutput(data=data, name="softmax") - - return mlp - - -def create_optimizer(trial): - # We optimize over the type of optimizer to use (Adam or SGD with momentum). - # We also optimize over the learning rate and weight decay of the selected optimizer. - weight_decay = trial.suggest_float("weight_decay", 1e-10, 1e-3, log=True) - optimizer_name = trial.suggest_categorical("optimizer", ["Adam", "MomentumSGD"]) - - if optimizer_name == "Adam": - adam_lr = trial.suggest_float("adam_lr", 1e-5, 1e-1, log=True) - optimizer = mx.optimizer.Adam(learning_rate=adam_lr, wd=weight_decay) - else: - momentum_sgd_lr = trial.suggest_float("momentum_sgd_lr", 1e-5, 1e-1, log=True) - optimizer = mx.optimizer.SGD(momentum=momentum_sgd_lr, wd=weight_decay) - - return optimizer - - -def objective(trial): - # Generate trial model and trial optimizer. - mlp = create_model(trial) - optimizer = create_optimizer(trial) - - # Load the test and train MNIST dataset. - # Use test as a validation set. - mnist = mx.test_utils.get_mnist() - rng = np.random.RandomState(0) - permute_train = rng.permutation(len(mnist["train_data"])) - train = mx.io.NDArrayIter( - data=mnist["train_data"][permute_train][:N_TRAIN_EXAMPLES], - label=mnist["train_label"][permute_train][:N_TRAIN_EXAMPLES], - batch_size=BATCHSIZE, - shuffle=True, - ) - permute_valid = rng.permutation(len(mnist["test_data"])) - val = mx.io.NDArrayIter( - data=mnist["test_data"][permute_valid][:N_VALID_EXAMPLES], - label=mnist["test_label"][permute_valid][:N_VALID_EXAMPLES], - batch_size=BATCHSIZE, - ) - - # Create our MXNet trainable model and fit it on MNIST data. - model = mx.mod.Module(symbol=mlp) - model.fit( - train_data=train, - eval_data=val, - optimizer=optimizer, - optimizer_params={"rescale_grad": 1.0 / BATCHSIZE}, - num_epoch=EPOCH, - ) - - # Compute the accuracy on the entire validation set. - valid = mx.io.NDArrayIter( - data=mnist["test_data"], label=mnist["test_label"], batch_size=BATCHSIZE - ) - accuracy = model.score(eval_data=valid, eval_metric="acc")[0] - - return accuracy[1] - - -if __name__ == "__main__": - study = optuna.create_study(direction="maximize") - study.optimize(objective, n_trials=100, timeout=600) - - print("Number of finished trials: ", len(study.trials)) - - print("Best trial:") - trial = study.best_trial - - print(" Value: ", trial.value) - - print(" Params: ") - for key, value in trial.params.items(): - print(" {}: {}".format(key, value)) diff --git a/mxnet/requirements.txt b/mxnet/requirements.txt deleted file mode 100644 index 16110a7f..00000000 --- a/mxnet/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -numpy<1.24.0 -mxnet -optuna