Skip to content

Commit fb36b1b

Browse files
committed
Setup benchmark suite
Signed-off-by: Tim Paine <[email protected]>
1 parent 323122e commit fb36b1b

File tree

11 files changed

+203
-0
lines changed

11 files changed

+203
-0
lines changed

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,11 @@ csp/lib/
104104
*.so
105105
*.tsbuildinfo
106106

107+
# Benchmarks
108+
.asv
109+
ci/benchmarks/*
110+
!ci/benchmarks/benchmarks.json
111+
107112
# Jupyter / Editors
108113
.ipynb_checkpoints
109114
.autoversion

Makefile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,28 @@ dockerps: ## spin up docker compose services for adapter testing
118118
dockerdown: ## spin up docker compose services for adapter testing
119119
$(DOCKER) compose -f ci/$(ADAPTER)/docker-compose.yml down
120120

121+
##############
122+
# BENCHMARKS #
123+
##############
124+
.PHONY: benchmark benchmarks benchmark-regen benchmark-view benchmarks-regen benchmarks-view
125+
benchmark: ## run benchmarks
126+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`^!
127+
128+
# https://github.com/airspeed-velocity/asv/issues/1027
129+
# https://github.com/airspeed-velocity/asv/issues/488
130+
benchmark-regen:
131+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.4^!
132+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.5^!
133+
134+
benchmark-view: ## generate viewable website of benchmark results
135+
python -m asv publish --config csp/benchmarks/asv.conf.jsonc
136+
python -m asv preview --config csp/benchmarks/asv.conf.jsonc
137+
138+
# Alias
139+
benchmarks: benchmark
140+
benchmarks-regen: benchmark-regen
141+
benchmarks-view: benchmark-view
142+
121143
###########
122144
# VERSION #
123145
###########

ci/benchmarks/benchmarks.json

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
"stats.basic.StatsBenchmarkSuite.time_stats": {
3+
"code": "class StatsBenchmarkSuite:\n def time_stats(self, function):\n def g():\n data = csp.curve(typ=np.ndarray, data=self.data)\n value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))\n csp.add_graph_output(\"final_value\", value, tick_count=1)\n \n timer = Timer(\n lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))\n )\n elapsed = timer.timeit(1)\n return elapsed\n\n def setup(self, _):\n self.start_date = datetime(2020, 1, 1)\n self.num_rows = 1_000\n self.array_size = 100\n self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]\n self.random_values = [\n np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)\n ] # 100 element np array\n self.data = list(zip(self.test_times, self.random_values))\n self.interval = 500",
4+
"min_run_count": 2,
5+
"name": "stats.basic.StatsBenchmarkSuite.time_stats",
6+
"number": 0,
7+
"param_names": [
8+
"function"
9+
],
10+
"params": [
11+
[
12+
"'median'",
13+
"'quantile'",
14+
"'rank'"
15+
]
16+
],
17+
"rounds": 2,
18+
"sample_time": 0.01,
19+
"type": "time",
20+
"unit": "seconds",
21+
"version": "f57f3ee288b0805597f9edee91b4d1dddf41046d34fbd46cfbd7135f459e62e3",
22+
"warmup_time": -1
23+
},
24+
"version": 2
25+
}

conda/dev-environment-unix.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ channels:
33
- conda-forge
44
- nodefaults
55
dependencies:
6+
- asv
67
- bison
78
- brotli
89
- build

conda/dev-environment-win.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ channels:
33
- conda-forge
44
- nodefaults
55
dependencies:
6+
- asv
67
- brotli
78
- build
89
- bump2version>=1

csp/benchmarks/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .common import *

csp/benchmarks/asv.conf.jsonc

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
// https://asv.readthedocs.io/en/v0.6.3/asv.conf.json.html
2+
{
3+
"version": 1,
4+
"project": "csp",
5+
"project_url": "https://github.com/Point72/csp",
6+
"repo": "../..",
7+
"branches": ["main", "tkp/bm"],
8+
"dvcs": "git",
9+
10+
"install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
11+
"uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
12+
"build_command": [
13+
"python -m pip install build",
14+
"python -m build --wheel -o {build_cache_dir} {build_dir}"
15+
],
16+
"environment_type": "virtualenv",
17+
"install_timeout": 600,
18+
"show_commit_url": "https://github.com/point72/csp/commit/",
19+
20+
"pythons": ["3.11"],
21+
22+
// "environment_type": "mamba",
23+
// "conda_channels": ["conda-forge"],
24+
// "conda_environment_file": "conda/dev-environment-unix.yml",
25+
26+
"benchmark_dir": "../../csp/benchmarks",
27+
"env_dir": "../../.asv/env",
28+
"results_dir": "../../ci/benchmarks",
29+
"html_dir": "../../.asv/html",
30+
31+
"hash_length": 8,
32+
"build_cache_size": 2
33+
}

csp/benchmarks/common.py

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
from asv_runner.benchmarks import benchmark_types
2+
from asv_runner.benchmarks.mark import SkipNotImplemented
3+
from logging import getLogger
4+
5+
__all__ = ("ASVBenchmarkHelper",)
6+
7+
8+
class ASVBenchmarkHelper:
9+
"""A helper base class to mimic some of what ASV does when running benchmarks, to
10+
test them outside of ASV.
11+
12+
NOTE: should be removed in favor of calling ASV itself from python, if possible.
13+
"""
14+
15+
def __init__(self, *args, **kwargs):
16+
self.log = getLogger(self.__class__.__name__)
17+
18+
def run_all(self):
19+
# https://asv.readthedocs.io/en/v0.6.3/writing_benchmarks.html#benchmark-types
20+
benchmarks = {}
21+
22+
for method in dir(self):
23+
for cls in benchmark_types:
24+
if cls.name_regex.match(method):
25+
benchmark_type = cls.__name__.replace("Benchmark", "")
26+
if benchmark_type not in benchmarks:
27+
benchmarks[benchmark_type] = []
28+
29+
name = f"{self.__class__.__qualname__}.{method}"
30+
func = getattr(self, method)
31+
benchmarks[benchmark_type].append(cls(name, func, (func, self)))
32+
33+
def run_benchmark(benchmark):
34+
skip = benchmark.do_setup()
35+
try:
36+
if skip:
37+
return
38+
try:
39+
benchmark.do_run()
40+
except SkipNotImplemented:
41+
pass
42+
finally:
43+
benchmark.do_teardown()
44+
45+
for type, benchmarks_to_run in benchmarks.items():
46+
if benchmarks_to_run:
47+
self.log.warn(f"Running benchmarks for {type}")
48+
for benchmark in benchmarks_to_run:
49+
if len(getattr(self, "params", [])):
50+
# TODO: cleaner
51+
param_count = 0
52+
while param_count < 100:
53+
try:
54+
benchmark.set_param_idx(param_count)
55+
params = benchmark._current_params
56+
self.log.warn(f"[{type}][{benchmark.name}][{'.'.join(str(_) for _ in params)}]")
57+
run_benchmark(benchmark=benchmark)
58+
param_count += 1
59+
except ValueError:
60+
break
61+
else:
62+
self.log.warn(f"Running [{type}][{benchmark.func.__name__}]")
63+
run_benchmark(benchmark=benchmark)

csp/benchmarks/stats/__init__.py

Whitespace-only changes.

csp/benchmarks/stats/basic.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import numpy as np
2+
from datetime import datetime, timedelta
3+
from timeit import Timer
4+
5+
import csp
6+
from csp.benchmarks import ASVBenchmarkHelper
7+
8+
__all__ = ("StatsBenchmarkSuite",)
9+
10+
11+
class StatsBenchmarkSuite(ASVBenchmarkHelper):
12+
"""
13+
python -m csp.benchmarks.stats.basic
14+
"""
15+
16+
params = (("median", "quantile", "rank"),)
17+
param_names = ("function",)
18+
19+
rounds = 5
20+
repeat = (100, 200, 60.0)
21+
22+
function_args = {"quantile": {"quant": 0.95}}
23+
24+
def setup(self, _):
25+
self.start_date = datetime(2020, 1, 1)
26+
self.num_rows = 1_000
27+
self.array_size = 100
28+
self.test_times = [self.start_date + timedelta(seconds=i) for i in range(self.num_rows)]
29+
self.random_values = [
30+
np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)
31+
] # 100 element np array
32+
self.data = list(zip(self.test_times, self.random_values))
33+
self.interval = 500
34+
35+
def time_stats(self, function):
36+
def g():
37+
data = csp.curve(typ=np.ndarray, data=self.data)
38+
value = getattr(csp.stats, function)(data, interval=self.interval, **self.function_args.get(function, {}))
39+
csp.add_graph_output("final_value", value, tick_count=1)
40+
41+
timer = Timer(
42+
lambda: csp.run(g, realtime=False, starttime=self.start_date, endtime=timedelta(seconds=self.num_rows))
43+
)
44+
elapsed = timer.timeit(1)
45+
return elapsed
46+
47+
48+
if __name__ == "__main__":
49+
sbs = StatsBenchmarkSuite()
50+
sbs.run_all()

0 commit comments

Comments
 (0)