Skip to content

pull code #114

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 19 commits into from
Sep 16, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,12 @@ _END := $(shell echo -e '\033[0m')
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S), Linux)
OS_SPEC := linux
NODE_URL := https://nodejs.org/dist/v10.22.1/node-v10.22.1-linux-x64.tar.xz
else ifeq ($(UNAME_S), Darwin)
OS_SPEC := darwin
NODE_URL := https://nodejs.org/dist/v10.22.1/node-v10.22.1-darwin-x64.tar.xz
else
$(error platform $(UNAME_S) not supported)
$(error platform $(UNAME_S) not supported)
endif

## Install directories
Expand Down Expand Up @@ -143,11 +145,11 @@ clean:

$(NNI_NODE_TARBALL):
#$(_INFO) Downloading Node.js $(_END)
wget https://aka.ms/nni/nodejs-download/$(OS_SPEC) -O $(NNI_NODE_TARBALL)
wget $(NODE_URL) -O $(NNI_NODE_TARBALL)

$(NNI_YARN_TARBALL):
#$(_INFO) Downloading Yarn $(_END)
wget https://aka.ms/yarn-download -O $(NNI_YARN_TARBALL)
wget https://github.com/yarnpkg/yarn/releases/download/v1.22.5/yarn-v1.22.5.tar.gz -O $(NNI_YARN_TARBALL)

.PHONY: install-dependencies
install-dependencies: $(NNI_NODE_TARBALL) $(NNI_YARN_TARBALL)
Expand Down
3 changes: 2 additions & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ jobs:
- script: |
set -e
sudo apt-get install -y pandoc
python3 -m pip install pygments --user --upgrade
python3 -m pip install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html --user
python3 -m pip install tensorflow==2.2.0 --user
python3 -m pip install keras==2.4.2 --user
Expand Down Expand Up @@ -57,7 +58,7 @@ jobs:
- script: |
set -e
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install pylint==2.3.1 astroid==2.2.5 --user
python3 -m pip install pylint==2.6.0 astroid==2.4.2 --user
python3 -m pip install coverage --user
python3 -m pip install thop --user
echo "##vso[task.setvariable variable=PATH]${HOME}/.local/bin:${PATH}"
Expand Down
6 changes: 4 additions & 2 deletions deployment/pypi/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@ UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S), Linux)
OS_SPEC := linux
WHEEL_SPEC := manylinux1_x86_64
NODE_URL := https://nodejs.org/dist/v10.22.1/node-v10.22.1-linux-x64.tar.xz
else ifeq ($(UNAME_S), Darwin)
OS_SPEC := darwin
WHEEL_SPEC := macosx_10_9_x86_64
NODE_URL := https://nodejs.org/dist/v10.22.1/node-v10.22.1-darwin-x64.tar.xz
else
$(error platform $(UNAME_S) not supported)
endif
Expand All @@ -28,11 +30,11 @@ NNI_YARN := PATH=$(CWD)node-$(OS_SPEC)-x64/bin:$${PATH} $(NNI_YARN_FOLDER)/bin/y
build:
# Building version $(NNI_VERSION_VALUE)
python3 -m pip install --user --upgrade setuptools wheel
wget -q https://aka.ms/nni/nodejs-download/$(OS_SPEC) -O $(CWD)node-$(OS_SPEC)-x64.tar.xz
wget -q $(NODE_URL) -O $(CWD)node-$(OS_SPEC)-x64.tar.xz
rm -rf $(CWD)node-$(OS_SPEC)-x64
mkdir $(CWD)node-$(OS_SPEC)-x64
tar xf $(CWD)node-$(OS_SPEC)-x64.tar.xz -C node-$(OS_SPEC)-x64 --strip-components 1
wget -q https://aka.ms/yarn-download -O $(NNI_YARN_TARBALL)
wget -q https://github.com/yarnpkg/yarn/releases/download/v1.22.5/yarn-v1.22.5.tar.gz -O $(NNI_YARN_TARBALL)
rm -rf $(NNI_YARN_FOLDER)
mkdir $(NNI_YARN_FOLDER)
tar -xf $(NNI_YARN_TARBALL) -C $(NNI_YARN_FOLDER) --strip-components 1
Expand Down
4 changes: 2 additions & 2 deletions deployment/pypi/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ This is the PyPI build and upload tool for NNI project.
Ubuntu 16.04 LTS
make
wget
Python >= 3.5
Python >= 3.6
Pip
Node.js
Yarn
Expand Down Expand Up @@ -45,7 +45,7 @@ This is the PyPI build and upload tool for NNI project.
```
Windows 10
powershell
Python >= 3.5
Python >= 3.6
Pip
Yarn
```
Expand Down
5 changes: 3 additions & 2 deletions deployment/pypi/install.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,12 @@ $OS_SPEC = "windows"
if($version_os -eq 64){
$OS_VERSION = 'win64'
$WHEEL_SPEC = 'win_amd64'
$NODE_URL = 'https://nodejs.org/download/release/v10.22.1/node-v10.22.1-win-x64.zip'
}
else{
$OS_VERSION = 'win32'
$WHEEL_SPEC = 'win32'
$NODE_URL = 'https://nodejs.org/download/release/v10.22.1/node-v10.22.1-win-x86.zip'
}

$TIME_STAMP = date -u "+%y%m%d%H%M"
Expand All @@ -28,11 +30,10 @@ $NNI_VERSION_TEMPLATE = "999.0.0-developing"

python -m pip install --upgrade setuptools wheel

$nodeUrl = "https://aka.ms/nni/nodejs-download/" + $OS_VERSION
$NNI_NODE_ZIP = "$CWD\node-$OS_SPEC.zip"
$NNI_NODE_FOLDER = "$CWD\node-$OS_SPEC"
$unzipNodeDir = "node-v*"
(New-Object Net.WebClient).DownloadFile($nodeUrl, $NNI_NODE_ZIP)
(New-Object Net.WebClient).DownloadFile($NODE_URL, $NNI_NODE_ZIP)
if(Test-Path $NNI_NODE_FOLDER){
Remove-Item $NNI_NODE_FOLDER -Recurse -Force
}
Expand Down
39 changes: 39 additions & 0 deletions docs/en_US/CommunitySharings/AutoCompletion.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Auto Completion for nnictl Commands

NNI's command line tool __nnictl__ support auto-completion, i.e., you can complete a nnictl command by pressing the `tab` key.

For example, if the current command is
```
nnictl cre
```
By pressing the `tab` key, it will be completed to
```
nnictl create
```

For now, auto-completion will not be enabled by default if you install NNI through `pip`, and it only works on Linux with bash shell. If you want to enable this feature on your computer, please refer to the following steps:

### Step 1. Download `bash-completion`
```
cd ~
wget https://raw.githubusercontent.com/microsoft/nni/{nni-version}/tools/bash-completion
```
Here, {nni-version} should by replaced by the version of NNI, e.g., `master`, `v1.9`. You can also check the latest `bash-completion` script [here](https://github.com/microsoft/nni/blob/master/tools/bash-completion).

### Step 2. Install the script
If you are running a root account and want to install this script for all the users
```
install -m644 ~/bash-completion /usr/share/bash-completion/completions/nnictl
```
If you just want to install this script for your self
```
mkdir -p ~/.bash_completion.d
install -m644 ~/bash-completion ~/.bash_completion.d/nnictl
echo '[[ -f ~/.bash_completion.d/nnictl ]] && source ~/.bash_completion.d/nnictl' >> ~/.bash_completion
```

### Step 3. Reopen your terminal
Reopen your terminal and you should be able to use the auto-completion feature. Enjoy!

### Step 4. Uninstall
If you want to uninstall this feature, just revert the changes in the steps above.
1 change: 1 addition & 0 deletions docs/en_US/CommunitySharings/community_sharings.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,4 @@ Different from the tutorials and examples in the rest of the document which show
Feature Engineering <feature_engineering>
Performance measurement, comparison and analysis <perf_compare>
Use NNI on Google Colab <NNI_colab_support>
Auto Completion for nnictl Commands <AutoCompletion>
29 changes: 27 additions & 2 deletions docs/en_US/Compressor/QuickStart.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ It means following the algorithm's default setting for compressed operations wit

#### Quantization specific keys

**If you use quantization algorithms, you need to specify more keys. If you use pruning algorithms, you can safely skip these keys**
Besides the keys explained above, if you use quantization algorithms you need to specify more keys in `config_list`, which are explained below.

* __quant_types__ : list of string.

Expand All @@ -148,6 +148,31 @@ when the value is int type, all quantization types share same bits length. eg.
}
```

The following example shows a more complete `config_list`, it uses `op_names` (or `op_types`) to specify the target layers along with the quantization bits for those layers.
```
configure_list = [{
'quant_types': ['weight'],
'quant_bits': 8,
'op_names': ['conv1']
}, {
'quant_types': ['weight'],
'quant_bits': 4,
'quant_start_step': 0,
'op_names': ['conv2']
}, {
'quant_types': ['weight'],
'quant_bits': 3,
'op_names': ['fc1']
},
{
'quant_types': ['weight'],
'quant_bits': 2,
'op_names': ['fc2']
}
]
```
In this example, 'op_names' is the name of layer and four layers will be quantized to different quant_bits.

### APIs for Updating Fine Tuning Status

Some compression algorithms use epochs to control the progress of compression (e.g. [AGP](https://nni.readthedocs.io/en/latest/Compressor/Pruner.html#agp-pruner)), and some algorithms need to do something after every minibatch. Therefore, we provide another two APIs for users to invoke: `pruner.update_epoch(epoch)` and `pruner.step()`.
Expand All @@ -168,4 +193,4 @@ pruner.export_model(model_path='model.pth')
pruner.export_model(model_path='model.pth', mask_path='mask.pth', onnx_path='model.onnx', input_shape=[1, 1, 28, 28])
```

If you want to really speed up the compressed model, please refer to [NNI model speedup](./ModelSpeedup.md) for details.
If you want to really speed up the compressed model, please refer to [NNI model speedup](./ModelSpeedup.md) for details.
4 changes: 2 additions & 2 deletions docs/en_US/Overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ NNI (Neural Network Intelligence) is a toolkit to help users design and tune mac
The figure below shows high-level architecture of NNI.

<p align="center">
<img src="https://user-images.githubusercontent.com/23273522/51816536-ed055580-2301-11e9-8ad8-605a79ee1b9a.png" alt="drawing" width="700"/>
<img src="https://user-images.githubusercontent.com/16907603/92089316-94147200-ee00-11ea-9944-bf3c4544257f.png" alt="drawing" width="700"/>
</p>

## Key Concepts
Expand Down Expand Up @@ -86,4 +86,4 @@ The auto-feature-engineering algorithms usually have a bunch of hyperparameters
* [Examples](TrialExample/MnistExamples.md)
* [Neural Architecture Search on NNI](NAS/Overview.md)
* [Automatic model compression on NNI](Compressor/Overview.md)
* [Automatic feature engineering on NNI](FeatureEngineering/Overview.md)
* [Automatic feature engineering on NNI](FeatureEngineering/Overview.md)
4 changes: 2 additions & 2 deletions docs/en_US/Tuner/BuiltinTuner.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Built-in Tuners for Hyperparameter Tuning
# HyperParameter Tuning with NNI Built-in Tuners

NNI provides state-of-the-art tuning algorithms as part of our built-in tuners and makes them easy to use. Below is the brief summary of NNI's current built-in tuners:
To fit a machine/deep learning model into different tasks/problems, hyperparameters always need to be tuned. Automating the process of hyperparaeter tuning always requires a good tuning algorithm. NNI has provided state-of-the-art tuning algorithms as part of our built-in tuners and makes them easy to use. Below is the brief summary of NNI's current built-in tuners:

Note: Click the **Tuner's name** to get the Tuner's installation requirements, suggested scenario, and an example configuration. A link for a detailed description of each algorithm is located at the end of the suggested scenario for each tuner. Here is an [article](../CommunitySharings/HpoComparison.md) comparing different Tuners on several problems.

Expand Down
2 changes: 1 addition & 1 deletion docs/en_US/Tutorial/ExperimentConfig.md
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ Used to specify designated GPU devices for NNI, if it is set, only the specified

#### maxTrialNumPerGpu

Optional. Integer. Default: 99999.
Optional. Integer. Default: 1.

Used to specify the max concurrency trial number on a GPU device.

Expand Down
19 changes: 17 additions & 2 deletions docs/en_US/Tutorial/Nnictl.md
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ Debug mode will disable version check function in Trialkeeper.
> update experiment's trial num

```bash
nnictl update trialnum --id [experiment_id] --value [trial_num]
nnictl update trialnum [experiment_id] --value [trial_num]
```

<a name="trial"></a>
Expand Down Expand Up @@ -347,7 +347,7 @@ Debug mode will disable version check function in Trialkeeper.
> kill trail job

```bash
nnictl trial [trial_id] --experiment [experiment_id]
nnictl trial kill [experiment_id] --trial_id [trial_id]
```

<a name="top"></a>
Expand Down Expand Up @@ -704,6 +704,21 @@ Debug mode will disable version check function in Trialkeeper.
### Manage webui

* __nnictl webui url__
* Description

Show an experiment's webui url

* Usage

```bash
nnictl webui url [options]
```

* Options

|Name, shorthand|Required|Default|Description|
|------|------|------ |------|
|id| False| |Experiment ID|

<a name="tensorboard"></a>
### Manage tensorboard
Expand Down
3 changes: 3 additions & 0 deletions docs/en_US/Tutorial/QuickStart.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@ python -m pip install --upgrade nni
```eval_rst
.. Note:: For the system requirements of NNI, please refer to :doc:`Install NNI on Linux & Mac <InstallationLinux>` or :doc:`Windows <InstallationWin>`.
```
### Enable NNI Command-line Auto-Completion (Optional)

After the installation, you may want to enable the auto-completion feature for __nnictl__ commands. Please refer to this [tutorial](../CommunitySharings/AutoCompletion.md).

## "Hello World" example on MNIST

Expand Down
40 changes: 15 additions & 25 deletions examples/model_compress/model_prune_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,31 +28,21 @@ def get_dataset(dataset_name='mnist'):

def create_model(model_name='naive'):
assert model_name == 'naive'
return NaiveModel()

class NaiveModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.seq_layers = [
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=500),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(units=10),
tf.keras.layers.Softmax()
]

def call(self, x):
for layer in self.seq_layers:
x = layer(x)
return x
return tf.keras.Sequential([
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Conv2D(filters=20, kernel_size=5),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.MaxPool2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=500),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(units=10),
tf.keras.layers.Softmax()
])


def create_pruner(model, pruner_name):
Expand Down
5 changes: 3 additions & 2 deletions install.ps1
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,14 @@ $install_yarn = $true

if ([Environment]::Is64BitOperatingSystem) {
$OS_VERSION = 'win64'
$nodeUrl = "https://nodejs.org/download/release/v10.22.1/node-v10.22.1-win-x64.zip"
}
else {
$OS_VERSION = 'win32'
$nodeUrl = "https://nodejs.org/download/release/v10.22.1/node-v10.22.1-win-x86.zip"
}
# nodejs
$nodeUrl = "https://aka.ms/nni/nodejs-download/" + $OS_VERSION
$yarnUrl = "https://yarnpkg.com/latest.tar.gz"
$yarnUrl = "https://github.com/yarnpkg/yarn/releases/download/v1.22.5/yarn-v1.22.5.tar.gz"
$unzipNodeDir = "node-v*"
$unzipYarnDir = "yarn-v*"

Expand Down
2 changes: 1 addition & 1 deletion src/nni_manager/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
"stream-buffers": "^3.0.2",
"tail-stream": "^0.3.4",
"tar": "^6.0.2",
"tree-kill": "^1.2.0",
"tree-kill": "^1.2.2",
"ts-deferred": "^1.0.4",
"typescript-ioc": "^1.2.4",
"typescript-string-operations": "^1.3.1",
Expand Down
1 change: 1 addition & 0 deletions src/nni_manager/training_service/reusable/aml/amlClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ export class AMLClient {
const deferred: Deferred<string> = new Deferred<string>();
this.pythonShellClient = new PythonShell('amlUtil.py', {
scriptPath: './config/aml',
pythonPath: process.platform === 'win32' ? 'python' : 'python3',
pythonOptions: ['-u'], // get print results in real-time
args: [
'--subscription_id', this.subscriptionId,
Expand Down
4 changes: 2 additions & 2 deletions src/nni_manager/training_service/reusable/commandChannel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ export abstract class CommandChannel {
protected abstract sendCommandInternal(environment: EnvironmentInformation, message: string): Promise<void>;
protected abstract createRunnerConnection(environment: EnvironmentInformation): RunnerConnection;

public async sendCommand(environment: EnvironmentInformation, commantType: string, data: any): Promise<void> {
const command = encodeCommand(commantType, JSON.stringify(data));
public async sendCommand(environment: EnvironmentInformation, commandType: string, data: any): Promise<void> {
const command = encodeCommand(commandType, JSON.stringify(data));
this.log.debug(`CommandChannel: env ${environment.id} sending command: ${command}`);
await this.sendCommandInternal(environment, command.toString("utf8"));
}
Expand Down
Loading