Skip to content

Commit

Permalink
Time tests improvements (#2642)
Browse files Browse the repository at this point in the history
* Remove extra functions from run_timetest.py

* Add `log.debug` of raw and aggregated statistics in run_timetest.py

* Implement storing of models locally for test_timetest.py
  • Loading branch information
vurusovs authored Oct 14, 2020
1 parent 4f80da4 commit 12b6909
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 18 deletions.
27 changes: 11 additions & 16 deletions tests/time_tests/scripts/run_timetest.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,27 +50,13 @@ def run_cmd(args: list, log=None, verbose=True):
return proc.returncode, ''.join(output)


def read_stats(stats_path, stats: dict):
"""Read statistics from a file and extend provided statistics"""
with open(stats_path, "r") as file:
parsed_data = yaml.safe_load(file)
return dict((step_name, stats.get(step_name, []) + [duration])
for step_name, duration in parsed_data.items())


def aggregate_stats(stats: dict):
"""Aggregate provided statistics"""
return {step_name: {"avg": statistics.mean(duration_list),
"stdev": statistics.stdev(duration_list) if len(duration_list) > 1 else 0}
for step_name, duration_list in stats.items()}


def write_aggregated_stats(stats_path, stats: dict):
"""Write aggregated statistics to a file in YAML format"""
with open(stats_path, "w") as file:
yaml.safe_dump(stats, file)


def prepare_executable_cmd(args: dict):
"""Generate common part of cmd from arguments to execute"""
return [str(args["executable"].resolve(strict=True)),
Expand All @@ -96,10 +82,18 @@ def run_timetest(args: dict, log=None):
"Statistics aggregation is skipped.".format(args["executable"], retcode, msg))
return retcode, {}

stats = read_stats(tmp_stats_path, stats)
# Read raw statistics
with open(tmp_stats_path, "r") as file:
raw_data = yaml.safe_load(file)
log.debug("Raw statistics after run of executable #{}: {}".format(run_iter, raw_data))

# Combine statistics from several runs
stats = dict((step_name, stats.get(step_name, []) + [duration])
for step_name, duration in raw_data.items())

# Aggregate results
aggregated_stats = aggregate_stats(stats)
log.debug("Aggregated statistics after full run: {}".format(aggregated_stats))

return 0, aggregated_stats

Expand Down Expand Up @@ -154,7 +148,8 @@ def cli_parser():

if args.stats_path:
# Save aggregated results to a file
write_aggregated_stats(args.stats_path, aggr_stats)
with open(args.stats_path, "w") as file:
yaml.safe_dump(aggr_stats, file)
logging.info("Aggregated statistics saved to a file: '{}'".format(
args.stats_path.resolve()))
else:
Expand Down
11 changes: 11 additions & 0 deletions tests/time_tests/test_runner/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import hashlib
import shutil
import logging
import tempfile

from test_runner.utils import upload_timetest_data, \
DATABASE, DB_COLLECTIONS
Expand Down Expand Up @@ -107,6 +108,16 @@ def niter(request):
# -------------------- CLI options --------------------


@pytest.fixture(scope="function")
def temp_dir(pytestconfig):
"""Create temporary directory for test purposes.
It will be cleaned up after every test run.
"""
temp_dir = tempfile.TemporaryDirectory()
yield Path(temp_dir.name)
temp_dir.cleanup()


@pytest.fixture(scope="function")
def cl_cache_dir(pytestconfig):
"""Generate directory to save OpenCL cache before test run and clean up after run.
Expand Down
13 changes: 11 additions & 2 deletions tests/time_tests/test_runner/test_timetest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,29 +17,38 @@
from pathlib import Path
import logging
import os
import shutil

from scripts.run_timetest import run_timetest
from test_runner.utils import expand_env_vars

REFS_FACTOR = 1.2 # 120%


def test_timetest(instance, executable, niter, cl_cache_dir, test_info):
def test_timetest(instance, executable, niter, cl_cache_dir, test_info, temp_dir):
"""Parameterized test.
:param instance: test instance. Should not be changed during test run
:param executable: timetest executable to run
:param niter: number of times to run executable
:param cl_cache_dir: directory to store OpenCL cache
:param test_info: custom `test_info` field of built-in `request` pytest fixture
:param temp_dir: path to a temporary directory. Will be cleaned up after test run
"""
# Prepare model to get model_path
model_path = instance["model"].get("path")
assert model_path, "Model path is empty"
model_path = Path(expand_env_vars(model_path))

# Copy model to a local temporary directory
model_dir = temp_dir / "model"
shutil.copytree(model_path.parent, model_dir)
model_path = model_dir / model_path.name

# Run executable
exe_args = {
"executable": Path(executable),
"model": Path(expand_env_vars(model_path)),
"model": Path(model_path),
"device": instance["device"]["name"],
"niter": niter
}
Expand Down

0 comments on commit 12b6909

Please sign in to comment.