Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 65 additions & 2 deletions benchmarks/bm_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def add_arguments(self) -> None:
def add_asv_arguments(self) -> None:
self.subparser.add_argument(
"asv_args",
nargs=argparse.REMAINDER,
nargs="*",
help="Any number of arguments to pass down to the ASV benchmark command.",
)

Expand Down Expand Up @@ -537,6 +537,69 @@ def func(args: argparse.Namespace) -> None:
_subprocess_runner([args.asv_sub_command, *args.asv_args], asv=True)


class TrialRun(_SubParserGenerator):
name = "trialrun"
description = (
"Fast trial-run a given benchmark, to check it works : "
"in a provided or latest-lockfile environment, "
"with no repeats for accuracy of measurement."
)
epilog = (
"e.g. python bm_runner.py trialrun "
"MyBenchmarks.time_calc /tmp/testpython/bin/python"
"\n\n NOTE#1: setting $DATA_GEN_PYTHON is equivalent to the "
"'runpath' argument."
"\n NOTE#2: setting $OVERRIDE_TEST_DATA_REPOSITORY avoids the runner "
"installing iris-test-data."
"\n NOTE#3: setting $BENCHMARK_DATA may be desirable to specify "
"where is safe to create potentially large (Gb) test data."
)

def add_arguments(self) -> None:
self.subparser.add_argument(
"benchmark",
type=str,
help=(
"A benchmark name, possibly including wildcards, "
"as supported by the ASV '--bench' argument."
),
)
self.subparser.add_argument(
"runpath",
type=str,
nargs="?",
help=(
"A path to an existing python executable, "
"to completely bypass environment building."
),
)

@staticmethod
def func(args: argparse.Namespace) -> None:
if args.runpath:
# Shortcut creation of a data-gen environment
# - which is also the trial-run env.
python_path = Path(args.runpath).resolve()
environ["DATA_GEN_PYTHON"] = str(python_path)
_setup_common()
# get path of data-gen environment, setup by previous call
python_path = environ["DATA_GEN_PYTHON"]
# allow 'on-demand' benchmarks
environ["ON_DEMAND_BENCHMARKS"] = "1"
asv_command = [
"run",
"--bench",
args.benchmark,
# no repeats for timing accuracy
"--quick",
"--show-stderr",
# do not build a unique env : run test in data-gen environment
"--environment",
f"existing:{python_path}",
] + args.asv_args
_subprocess_runner(asv_command, asv=True)


class GhPost(_SubParserGenerator):
name = "_gh_post"
description = (
Expand Down Expand Up @@ -566,7 +629,7 @@ def main():
)
subparsers = parser.add_subparsers(required=True)

for gen in (Overnight, Branch, CPerf, SPerf, Custom, GhPost):
for gen in (Overnight, Branch, CPerf, SPerf, Custom, TrialRun, GhPost):
_ = gen(subparsers).subparser

parsed = parser.parse_args()
Expand Down
4 changes: 2 additions & 2 deletions noxfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,5 +313,5 @@ def benchmarks(session: nox.sessions.Session):
)
session.error(message)
session.install("asv", "nox")
with session.chdir(Path(__file__).parent / "benchmarks"):
session.run("python", "bm_runner.py", *session.posargs)
bm_runner_path = Path(__file__).parent / "benchmarks" / "bm_runner.py"
session.run("python", bm_runner_path, *session.posargs)