Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions azdev/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def operation_group(name):

with CommandGroup(self, 'perf', operation_group('performance')) as g:
g.command('load-times', 'check_load_time')
g.command('benchmark', 'benchmark', is_preview=True)

with CommandGroup(self, 'extension', operation_group('extensions')) as g:
g.command('add', 'add_extension')
Expand Down
6 changes: 6 additions & 0 deletions azdev/help.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,12 @@
short-summary: Verify that all modules load within an acceptable timeframe.
"""

helps['perf benchmark'] = """
short-summary: Display benchmark staticstic of Azure CLI (Extensions) commands via execute it with -h in a separate process.
examples:
- name: Run benchmark on "network application-gateway" and "storage account"
text: azdev perf benchmark --prefix "network application-gateway" --prefix "storage account"
"""

helps['extension'] = """
short-summary: Control which CLI extensions are visible in the development environment.
Expand Down
15 changes: 15 additions & 0 deletions azdev/operations/actions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------

from argparse import Action


class PerfBenchmarkCommandPrefixAction(Action):
def __call__(self, parser, namespace, values, option_string=None):
if not namespace.command_prefixes:
namespace.command_prefixes = []

namespace.command_prefixes.append(' '.join(values))
128 changes: 124 additions & 4 deletions azdev/operations/performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# -----------------------------------------------------------------------------

import re
import timeit

from knack.log import get_logger
from knack.util import CLIError
Expand Down Expand Up @@ -106,17 +107,20 @@ def _claim_higher_threshold(val):
in any modified files.
""")

display('== PASSED MODULES ==')
display("== PASSED MODULES ==")
display_table(passed_mods)
display('\nPASSED: Average load time all modules: {} ms'.format(
int(passed_mods[TOTAL]['average'])))
display(
"\nPASSED: Average load time all modules: {} ms".format(
int(passed_mods[TOTAL]["average"])
)
)


def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('len < 1')
raise ValueError("len < 1")
return sum(data) / float(n)


Expand All @@ -140,3 +144,119 @@ def display_table(data):
for key, val in data.items():
display('{:<20} {:>12.0f} {:>12.0f} {:>12.0f} {:>25}'.format(
key, val['average'], val['threshold'], val['stdev'], str(val['values'])))


# require azdev setup
def benchmark(command_prefixes=None, runs=20):
if runs <= 0:
raise CLIError("Number of runs must be greater than 0.")

import multiprocessing
from azure.cli.core import get_default_cli
from azure.cli.core.file_util import create_invoker_and_load_cmds_and_args

def _process_pool_init():
import signal

def sigint_dummay_pass(signal_num, frame): # pylint: disable=unused-argument
pass

signal.signal(signal.SIGINT, sigint_dummay_pass)

# load command table
az_cli = get_default_cli()
create_invoker_and_load_cmds_and_args(az_cli)
raw_command_table = az_cli.invocation.commands_loader.command_table

command_table = []
if command_prefixes:
for k in raw_command_table:
if any(prefix for prefix in command_prefixes if k.startswith(prefix)):
command_table.append(k)
else:
command_table = list(raw_command_table.keys())

max_len_cmd = max(command_table, key=len)

line_tmpl = "| {" + "cmd:" + "<" + str(len(max_len_cmd)) + "s} |"
line_tmpl = line_tmpl + " {min:10s} | {max:10s} | {avg:10s} | {mid:10s} | {std:10s} | {runs:10s} |"

line_head = line_tmpl.format(
cmd="Command",
min="Min",
max="Max",
avg="Mean",
mid="Median",
std="Std",
runs="Runs",
)

logger.warning(line_head)
logger.warning("-" * (85 + len(max_len_cmd)))

# Measure every wanted commands
for raw_command in command_table:
cmd_tpl = "az {} -h".format(raw_command)

logger.info("Measuring %s...", raw_command)

pool = multiprocessing.Pool(multiprocessing.cpu_count(), _process_pool_init)
try:
time_series = pool.map_async(_benchmark_cmd_timer, [cmd_tpl] * runs).get(1000)
except multiprocessing.TimeoutError:
pool.terminate()
break
else:
pool.close()
pool.join()

staticstic = _benchmark_cmd_staticstic(time_series)

line_body = line_tmpl.format(
cmd=raw_command,
min=str(staticstic["min"]),
max=str(staticstic["max"]),
avg=str(staticstic["avg"]),
mid=str(staticstic["media"]),
std=str(staticstic["std"]),
runs=str(runs),
)
logger.warning(line_body)

logger.warning("-" * (85 + len(max_len_cmd)))


def _benchmark_cmd_timer(cmd_tpl):
s = timeit.default_timer()
cmd(cmd_tpl)
e = timeit.default_timer()
return round(e - s, 4)


def _benchmark_cmd_staticstic(time_series: list):
from math import sqrt

time_series.sort()

size = len(time_series)

if size % 2 == 0:
mid_time = (time_series[size // 2 - 1] + time_series[size // 2]) / 2
else:
mid_time = time_series[(size - 1) // 2]

min_time = time_series[0]
max_time = time_series[-1]
avg_time = sum(time_series) / size

std_deviation = sqrt(
sum([(t - avg_time) * (t - avg_time) for t in time_series]) / size
)

return {
"min": round(min_time, 4),
"max": round(max_time, 4),
"media": round(mid_time, 4),
"avg": round(avg_time, 4),
"std": round(std_deviation, 4),
}
7 changes: 7 additions & 0 deletions azdev/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

from azdev.completer import get_test_completion
from azdev.operations.linter import linter_severity_choices
from azdev.operations.actions import PerfBenchmarkCommandPrefixAction


class Flag(object):
Expand Down Expand Up @@ -100,6 +101,12 @@ def load_arguments(self, _):

with ArgumentsContext(self, 'perf') as c:
c.argument('runs', type=int, help='Number of runs to average performance over.')
c.argument('command_prefixes',
nargs="+",
action=PerfBenchmarkCommandPrefixAction,
options_list="--prefix",
help="Command prefix to run benchmark")
c.argument('top', type=int, help='Show N slowest commands. 0 for all.')

with ArgumentsContext(self, 'extension') as c:
c.argument('dist_dir', help='Name of a directory in which to save the resulting WHL files.')
Expand Down
41 changes: 40 additions & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
inputs:
targetType: 'filePath'
filePath: scripts/ci/run_tox.sh

- job: Tox38
displayName: 'Tox: Python 3.8'
condition: succeeded()
Expand Down Expand Up @@ -267,3 +267,42 @@ jobs:
# verify azdev style works
azdev style redis
displayName: 'Test azdev style'

- job: PerformanceCheck
displayName: "PerformanceCheck"
pool:
vmImage: 'ubuntu-16.04'
strategy:
matrix:
Python36:
python.version: '3.6'
Python38:
python.version: '3.8'
steps:
- task: UsePythonVersion@0
displayName: 'Use Python $(python.version)'
inputs:
versionSpec: '$(python.version)'
- bash: |
python -m venv env
chmod +x env/bin/activate
. env/bin/activate

pip install -e .
azdev --version

git clone https://github.com/Azure/azure-cli.git
git clone https://github.com/Azure/azure-cli-extensions.git

azdev setup -c ./azure-cli -r ./azure-cli-extensions
displayName: 'Azdev Setup'
- bash: |
set -ev
. env/bin/activate
azdev perf load-times
displayName: "Load Performance"
- bash: |
set -ev
. env/bin/activate
azdev perf benchmark --prefix "version" --prefix "network vnet "
displayName: "Execution Performance"