Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions devops/scripts/benchmarks/benches/compute.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2024-2025 Intel Corporation
# Copyright (C) 2024-2026 Intel Corporation
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
# See LICENSE.TXT
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Expand Down Expand Up @@ -1027,9 +1027,11 @@ def _supported_runtimes(self) -> list[RUNTIMES]:

def _bin_args(self, run_trace: TracingType = TracingType.NONE) -> list[str]:
iters = self._get_iters(run_trace)
return [f"--iterations={iters}"] + [
f"--{k}={v}" for k, v in self._torch_params.items()
]
return (
[f"--iterations={iters}"]
+ [f"--profilerType={self._profiler_type.value}"]
+ [f"--{k}={v}" for k, v in self._torch_params.items()]
)


class TorchSingleQueue(TorchBenchmark):
Expand Down
4 changes: 2 additions & 2 deletions devops/scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ def run_iterations(
Unless options.exit_on_failure is set, then exception is raised.
"""

log.info(f"Running '{benchmark.name()}' {iters}x iterations...")
for iter in range(iters):
log.info(f"running {benchmark.name()}, iteration {iter}... ")
try:
bench_results = benchmark.run(
env_vars, run_trace=run_trace, force_trace=force_trace
Expand Down Expand Up @@ -145,7 +145,7 @@ def run_iterations(
log.error(f"{failure_label}: verification failed: {str(e)}.")
continue

# Iterations completed successfully
log.info(f"Completed '{benchmark.name()}' {iters}x iterations")
return True


Expand Down
37 changes: 29 additions & 8 deletions devops/scripts/benchmarks/tests/test_integration.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2025 Intel Corporation
# Copyright (C) 2025-2026 Intel Corporation
# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
# See LICENSE.TXT
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Expand Down Expand Up @@ -63,7 +63,7 @@ def remove_dirs(self):
if d is not None:
shutil.rmtree(d, ignore_errors=True)

def run_main(self, *args):
def run_main(self, *args) -> subprocess.CompletedProcess:

# TODO: not yet tested: "--detect-version", "sycl,compute_runtime"

Expand Down Expand Up @@ -101,9 +101,9 @@ def run_main(self, *args):
"MAIN_PY_STDERR:",
"\n" + proc.stderr.decode() if proc.stderr else " <empty>",
)
return proc.returncode
return proc

def get_output(self):
def get_benchmark_output_data(self):
with open(os.path.join(self.OUTPUT_DIR, "data.json")) as f:
out = json.load(f)
return DataJson(
Expand Down Expand Up @@ -169,11 +169,30 @@ def _checkGroup(
def _checkResultsExist(self, caseName: str, out: DataJson):
self.assertIn(caseName, [r.name for r in out.runs[0].results])

def _checkCase(self, caseName: str, groupName: str, tags: set[str]):
run_result = self.app.run_main("--filter", caseName + "$")
self.assertEqual(run_result, 0, "Subprocess did not exit cleanly")
def _checkExistsInProcessOutput(
self, proc: subprocess.CompletedProcess, expected: str
):
"""
Check that expected regex string exists in process output.
It's useful for checking e.g. if expected params are passed to the benchmark's bin execution.
"""
stdout = proc.stdout.decode()
self.assertRegex(stdout, expected, "Expected string not found in output")

def _checkCase(
self,
caseName: str,
groupName: str,
tags: set[str],
expected_in_output: str = None,
):
return_proc = self.app.run_main("--filter", caseName + "$")
self.assertEqual(return_proc.returncode, 0, "Subprocess did not exit cleanly")

if expected_in_output:
self._checkExistsInProcessOutput(return_proc, expected_in_output)

out = self.app.get_output()
out = self.app.get_benchmark_output_data()
self._checkResultsExist(caseName, out)

metadata = out.metadata[caseName]
Expand All @@ -199,11 +218,13 @@ def test_torch_l0(self):
"torch_benchmark_l0 KernelSubmitSingleQueue KernelBatchSize 512, KernelDataType Int32, KernelName Add, KernelParamsNum 5, KernelSubmitPattern Single, KernelWGCount 4096, KernelWGSize 512",
"KernelSubmitSingleQueue Int32Large",
{"pytorch", "L0"},
"--test=KernelSubmitSingleQueue.*--profilerType=timer",
)
self._checkCase(
"torch_benchmark_l0 KernelSubmitSingleQueue KernelBatchSize 512, KernelDataType Int32, KernelName Add, KernelParamsNum 5, KernelSubmitPattern Single, KernelWGCount 4096, KernelWGSize 512 CPU count",
"KernelSubmitSingleQueue Int32Large, CPU count",
{"pytorch", "L0"},
"--test=KernelSubmitSingleQueue.*--profilerType=cpuCounter",
)
self._checkCase(
"torch_benchmark_l0 KernelSubmitMultiQueue kernelsPerQueue 20, workgroupCount 4096, workgroupSize 512",
Expand Down
2 changes: 1 addition & 1 deletion devops/scripts/benchmarks/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def run(
command_str = " ".join(command)
env_str = " ".join(f"{key}={value}" for key, value in env_vars.items())
full_command_str = f"{env_str} {command_str}".strip()
log.debug(f"Running: {full_command_str}")
log.info(f"Running: {full_command_str}")

for key, value in env_vars.items():
# Only PATH and LD_LIBRARY_PATH should be prepended to existing values
Expand Down