Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions azdev/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def operation_group(name):

with CommandGroup(self, 'perf', operation_group('performance')) as g:
g.command('load-times', 'check_load_time')
g.command('benchmark', 'benchmark', is_preview=True)

with CommandGroup(self, 'extension', operation_group('extensions')) as g:
g.command('add', 'add_extension')
Expand Down
6 changes: 6 additions & 0 deletions azdev/help.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,12 @@
short-summary: Verify that all modules load within an acceptable timeframe.
"""

helps['perf benchmark'] = """
short-summary: Display benchmark staticstic of Azure CLI (Extensions) commands via execute it with "python -m azure.cli {COMMAND}" in a separate process.
examples:
- name: Run benchmark on "network application-gateway" and "storage account"
text: azdev perf benchmark "network application-gateway -h" "storage account" "version" "group list"
"""

helps['extension'] = """
short-summary: Control which CLI extensions are visible in the development environment.
Expand Down
114 changes: 109 additions & 5 deletions azdev/operations/performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@
# -----------------------------------------------------------------------------

import re
import timeit

from knack.log import get_logger
from knack.util import CLIError

from azdev.utilities import (
display, heading, subheading, cmd, require_azure_cli)
display, heading, subheading, cmd, py_cmd, require_azure_cli)

logger = get_logger(__name__)

Expand Down Expand Up @@ -106,17 +107,20 @@ def _claim_higher_threshold(val):
in any modified files.
""")

display('== PASSED MODULES ==')
display("== PASSED MODULES ==")
display_table(passed_mods)
display('\nPASSED: Average load time all modules: {} ms'.format(
int(passed_mods[TOTAL]['average'])))
display(
"\nPASSED: Average load time all modules: {} ms".format(
int(passed_mods[TOTAL]["average"])
)
)


def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('len < 1')
raise ValueError("len < 1")
return sum(data) / float(n)


Expand All @@ -140,3 +144,103 @@ def display_table(data):
for key, val in data.items():
display('{:<20} {:>12.0f} {:>12.0f} {:>12.0f} {:>25}'.format(
key, val['average'], val['threshold'], val['stdev'], str(val['values'])))


# require azdev setup
def benchmark(commands, runs=20):
if runs <= 0:
raise CLIError("Number of runs must be greater than 0.")

max_len_cmd = max(commands, key=len)

line_tmpl = "| {" + "cmd:" + "<" + str(len(max_len_cmd)) + "s} |"
line_tmpl = line_tmpl + " {min:10s} | {max:10s} | {avg:10s} | {mid:10s} | {std:10s} | {runs:10s} |"

line_head = line_tmpl.format(
cmd="Command",
min="Min",
max="Max",
avg="Mean",
mid="Median",
std="Std",
runs="Runs",
)

logger.warning(line_head)
logger.warning("-" * (85 + len(max_len_cmd)))

import multiprocessing

# Measure every wanted commands
for raw_command in commands:
logger.info("Measuring %s...", raw_command)

pool = multiprocessing.Pool(multiprocessing.cpu_count(), _benchmark_process_pool_init)

# try/except like this because of a bug of Python multiprocessing.Pool (https://bugs.python.org/issue8296)
# Discussion on StackOverflow:
# https://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool/1408476
try:
time_series = pool.map_async(_benchmark_cmd_timer, [raw_command] * runs).get(1000)
except multiprocessing.TimeoutError:
pool.terminate()
break
else:
pool.close()
pool.join()

staticstic = _benchmark_cmd_staticstic(time_series)

line_body = line_tmpl.format(
cmd=raw_command,
min=str(staticstic["min"]),
max=str(staticstic["max"]),
avg=str(staticstic["avg"]),
mid=str(staticstic["media"]),
std=str(staticstic["std"]),
runs=str(runs),
)
logger.warning(line_body)

logger.warning("-" * (85 + len(max_len_cmd)))


def _benchmark_process_pool_init():
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)


def _benchmark_cmd_timer(raw_command):
s = timeit.default_timer()
py_cmd("azure.cli {}".format(raw_command), is_module=True)
e = timeit.default_timer()
return round(e - s, 4)


def _benchmark_cmd_staticstic(time_series: list):
from math import sqrt

time_series.sort()

size = len(time_series)

if size % 2 == 0:
mid_time = (time_series[size // 2 - 1] + time_series[size // 2]) / 2
else:
mid_time = time_series[(size - 1) // 2]

min_time = time_series[0]
max_time = time_series[-1]
avg_time = sum(time_series) / size

std_deviation = sqrt(
sum([(t - avg_time) * (t - avg_time) for t in time_series]) / size
)

return {
"min": round(min_time, 4),
"max": round(max_time, 4),
"media": round(mid_time, 4),
"avg": round(avg_time, 4),
"std": round(std_deviation, 4),
}
4 changes: 4 additions & 0 deletions azdev/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ def load_arguments(self, _):
with ArgumentsContext(self, 'perf') as c:
c.argument('runs', type=int, help='Number of runs to average performance over.')

with ArgumentsContext(self, 'perf benchmark') as c:
c.positional('commands', nargs="+", help="Command prefix to run benchmark")
c.argument('top', type=int, help='Show N slowest commands. 0 for all.')

with ArgumentsContext(self, 'extension') as c:
c.argument('dist_dir', help='Name of a directory in which to save the resulting WHL files.')

Expand Down
80 changes: 79 additions & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
inputs:
targetType: 'filePath'
filePath: scripts/ci/run_tox.sh

- job: Tox38
displayName: 'Tox: Python 3.8'
condition: succeeded()
Expand Down Expand Up @@ -267,3 +267,81 @@ jobs:
# verify azdev style works
azdev style redis
displayName: 'Test azdev style'

- job: PerformanceCheckOnLinux
displayName: "Performance Check on Linux"
dependsOn: BuildPythonWheel
pool:
vmImage: 'ubuntu-16.04'
strategy:
matrix:
Python36:
python.version: '3.6'
Python38:
python.version: '3.8'
steps:
- task: DownloadPipelineArtifact@1
displayName: 'Download Build'
inputs:
TargetPath: '$(Build.ArtifactStagingDirectory)/pypi'
artifactName: pypi
- task: UsePythonVersion@0
displayName: 'Use Python $(python.version)'
inputs:
versionSpec: '$(python.version)'
- bash: |
set -ev
. scripts/ci/install.sh
azdev --version
displayName: 'Azdev Setup'
- bash: |
set -ev
. env/bin/activate
azdev perf load-times
displayName: "Load Performance"
- bash: |
set -ev
. env/bin/activate
azdev perf benchmark "version" "network vnet -h" "rest -h" "storage account"
displayName: "Execution Performance"

# - job: PerformanceCheckOnWindows
# displayName: "Performance Check on Windows"
# dependsOn: BuildPythonWheel
# pool:
# vmImage: 'vs2017-win2016'
# strategy:
# matrix:
# Python36:
# python.version: '3.6'
# Python38:
# python.version: '3.8'
# steps:
# - task: DownloadPipelineArtifact@1
# displayName: 'Download Build'
# inputs:
# TargetPath: '$(Build.ArtifactStagingDirectory)/pypi'
# artifactName: pypi
# - task: UsePythonVersion@0
# displayName: 'Use Python $(python.version)'
# inputs:
# versionSpec: '$(python.version)'
# - powershell: |
# python -m venv env
# .\env\Scripts\Activate.ps1
# pip install --user -U pip setuptools wheel -q
# pip install --user $(find ${BUILD_ARTIFACTSTAGINGDIRECTORY}/pypi -name *.tar.gz) -q
# git clone https://github.com/Azure/azure-cli.git
# git clone https://github.com/Azure/azure-cli-extensions.git
# azdev setup -c -r azure-cli-extensions

# azdev --version
# displayName: 'Azdev Setup'
# - powershell: |
# .\env\Scripts\Activate.ps1
# azdev perf load-times
# displayName: "Load Performance"
# - powershell: |
# .\env\Scripts\Activate.ps1
# azdev perf benchmark "version" "network vnet -h" "rest -h" "storage account"
# displayName: "Execution Performance"