Skip to content

Commit

Permalink
add building compute-runtime UMD in benchmarks jobs
Browse files Browse the repository at this point in the history
  • Loading branch information
pbalcer committed Jan 17, 2025
1 parent 023a847 commit 9e06068
Show file tree
Hide file tree
Showing 16 changed files with 182 additions and 46 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/benchmarks-reusable.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ on:
required: false
type: boolean
default: false
compute_runtime_commit:
required: false
type: string
default: ''

permissions:
contents: read
Expand Down Expand Up @@ -200,6 +204,7 @@ jobs:
--ur ${{ github.workspace }}/ur_install
--umf ${{ github.workspace }}/umf_build
--adapter ${{ matrix.adapter.str_name }}
--compute-runtime ${{ inputs.compute_runtime_commit }}
${{ inputs.upload_report && '--output-html' || '' }}
${{ inputs.bench_script_params }}
Expand Down
6 changes: 6 additions & 0 deletions .github/workflows/benchmarks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@ on:
type: string
required: false
default: ''
compute_runtime_commit:
description: 'Compute Runtime commit'
type: string
required: false
default: ''
upload_report:
description: 'Upload HTML report'
type: boolean
Expand All @@ -65,4 +70,5 @@ jobs:
sycl_config_params: ${{ inputs.sycl_config_params }}
sycl_repo: ${{ inputs.sycl_repo }}
sycl_commit: ${{ inputs.sycl_commit }}
compute_runtime_commit: ${{ inputs.compute_runtime_commit }}
upload_report: ${{ inputs.upload_report }}
15 changes: 10 additions & 5 deletions scripts/benchmarks/benches/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import shutil
from pathlib import Path
from .result import Result
from .options import options
from options import options
from utils.utils import download, run
import urllib.request
import tarfile
Expand All @@ -28,17 +28,22 @@ def get_adapter_full_path():
f"could not find adapter file {adapter_path} (and in similar lib paths)"

def run_bench(self, command, env_vars, ld_library=[], add_sycl=True):
env_vars_with_forced_adapter = env_vars.copy()
env_vars = env_vars.copy()
if options.ur is not None:
env_vars_with_forced_adapter.update(
env_vars.update(
{'UR_ADAPTERS_FORCE_LOAD': Benchmark.get_adapter_full_path()})

env_vars.update(options.extra_env_vars)

ld_libraries = options.extra_ld_libraries.copy()
ld_libraries.extend(ld_library)

return run(
command=command,
env_vars=env_vars_with_forced_adapter,
env_vars=env_vars,
add_sycl=add_sycl,
cwd=options.benchmark_cwd,
ld_library=ld_library
ld_library=ld_libraries
).stdout.decode()

def create_data_path(self, name, skip_data_dir = False):
Expand Down
4 changes: 2 additions & 2 deletions scripts/benchmarks/benches/compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from utils.utils import run, git_clone, create_build_path
from .base import Benchmark, Suite
from .result import Result
from .options import options
from options import options

class ComputeBench(Suite):
def __init__(self, directory):
Expand All @@ -22,7 +22,7 @@ def setup(self):
if options.sycl is None:
return

repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "df38bc342641d7e83fbb4fe764a23d21d734e07b")
repo_path = git_clone(self.directory, "compute-benchmarks-repo", "https://github.com/intel/compute-benchmarks.git", "d13e5b4d8dd3d28926a74ab7f67f78c10f708a01")
build_path = create_build_path(self.directory, 'compute-benchmarks-build')

configure_command = [
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/llamacpp.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import os

Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/oneapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from pathlib import Path
from utils.utils import download, run
from .options import options
from options import options
import os

class OneAPI:
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/syclbench.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from utils.utils import run, git_clone, create_build_path
from .base import Benchmark, Suite
from .result import Result
from .options import options
from options import options

class SyclBench(Suite):
def __init__(self, directory):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
import os

class TestSuite(Suite):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/benches/umf.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import os
import csv
Expand Down
28 changes: 15 additions & 13 deletions scripts/benchmarks/benches/velocity.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from .base import Benchmark, Suite
from .result import Result
from utils.utils import run, create_build_path
from .options import options
from options import options
from .oneapi import get_oneapi
import shutil

Expand Down Expand Up @@ -54,7 +54,6 @@ def __init__(self, name: str, bin_name: str, vb: VelocityBench, unit: str):
self.bench_name = name
self.bin_name = bin_name
self.unit = unit
self.code_path = os.path.join(self.vb.repo_path, self.bench_name, 'SYCL')

def download_deps(self):
return
Expand All @@ -66,6 +65,7 @@ def ld_libraries(self) -> list[str]:
return []

def setup(self):
self.code_path = os.path.join(self.vb.repo_path, self.bench_name, 'SYCL')
self.download_deps()
self.benchmark_bin = os.path.join(self.directory, self.bench_name, self.bin_name)

Expand Down Expand Up @@ -130,12 +130,13 @@ def parse_output(self, stdout: str) -> float:
class Bitcracker(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("bitcracker", "bitcracker", vb, "s")
self.data_path = os.path.join(vb.repo_path, "bitcracker", "hash_pass")

def name(self):
return "Velocity-Bench Bitcracker"

def bin_args(self) -> list[str]:
self.data_path = os.path.join(self.vb.repo_path, "bitcracker", "hash_pass")

return ["-f", f"{self.data_path}/img_win8_user_hash.txt",
"-d", f"{self.data_path}/user_passwords_60000.txt",
"-b", "60000"]
Expand Down Expand Up @@ -175,7 +176,6 @@ def parse_output(self, stdout: str) -> float:
class QuickSilver(VelocityBase):
def __init__(self, vb: VelocityBench):
super().__init__("QuickSilver", "qs", vb, "MMS/CTT")
self.data_path = os.path.join(vb.repo_path, "QuickSilver", "Examples", "AllScattering")

def run(self, env_vars) -> list[Result]:
# TODO: fix the crash in QuickSilver when UR_L0_USE_IMMEDIATE_COMMANDLISTS=0
Expand All @@ -191,6 +191,8 @@ def lower_is_better(self):
return False

def bin_args(self) -> list[str]:
self.data_path = os.path.join(self.vb.repo_path, "QuickSilver", "Examples", "AllScattering")

return ["-i", f"{self.data_path}/scatteringOnly.inp"]

def extra_env_vars(self) -> dict:
Expand Down Expand Up @@ -266,20 +268,20 @@ def parse_output(self, stdout: str) -> float:

class DLCifar(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("dl-cifar", "dl-cifar_sycl", vb, "s")

def ld_libraries(self):
return self.oneapi.ld_libraries()
return get_oneapi().ld_libraries()

def download_deps(self):
# TODO: dl-cifar hardcodes the path to this dataset as "../../datasets/cifar-10-binary"...
self.download("datasets", "https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz", "cifar-10-binary.tar.gz", untar=True, skip_data_dir=True)
return

def extra_cmake_args(self):
oneapi = get_oneapi()
return [
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{self.oneapi.dnn_include()} -I{self.oneapi.mkl_include()} -L{self.oneapi.dnn_lib()} -L{self.oneapi.mkl_lib()}"
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{oneapi.dnn_include()} -I{oneapi.mkl_include()} -L{oneapi.dnn_lib()} -L{oneapi.mkl_lib()}"
]

def name(self):
Expand All @@ -294,11 +296,10 @@ def parse_output(self, stdout: str) -> float:

class DLMnist(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("dl-mnist", "dl-mnist-sycl", vb, "s")

def ld_libraries(self):
return self.oneapi.ld_libraries()
return get_oneapi().ld_libraries()

def download_deps(self):
# TODO: dl-mnist hardcodes the path to this dataset as "../../datasets/"...
Expand All @@ -308,8 +309,9 @@ def download_deps(self):
self.download("datasets", "https://raw.githubusercontent.com/fgnt/mnist/master/t10k-labels-idx1-ubyte.gz", "t10k-labels.idx1-ubyte.gz", unzip=True, skip_data_dir=True)

def extra_cmake_args(self):
oneapi = get_oneapi()
return [
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{self.oneapi.dnn_include()} -I{self.oneapi.mkl_include()} -L{self.oneapi.dnn_lib()} -L{self.oneapi.mkl_lib()}"
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{oneapi.dnn_include()} -I{oneapi.mkl_include()} -L{oneapi.dnn_lib()} -L{oneapi.mkl_lib()}"
]

def name(self):
Expand Down Expand Up @@ -337,15 +339,15 @@ def parse_output(self, stdout: str) -> float:

class SVM(VelocityBase):
def __init__(self, vb: VelocityBench):
self.oneapi = get_oneapi()
super().__init__("svm", "svm_sycl", vb, "s")

def ld_libraries(self):
return self.oneapi.ld_libraries()
return get_oneapi().ld_libraries()

def extra_cmake_args(self):
oneapi = get_oneapi()
return [
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{self.oneapi.dnn_include()} -I{self.oneapi.mkl_include()} -L{self.oneapi.dnn_lib()} -L{self.oneapi.mkl_lib()}"
f"-DCMAKE_CXX_FLAGS=-O3 -fsycl -ffast-math -I{oneapi.dnn_include()} -I{oneapi.mkl_include()} -L{oneapi.dnn_lib()} -L{oneapi.mkl_lib()}"
]

def name(self):
Expand Down
2 changes: 1 addition & 1 deletion scripts/benchmarks/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import json
from pathlib import Path
from benches.result import Result, BenchmarkRun
from benches.options import Compare, options
from options import Compare, options
from datetime import datetime, timezone
from utils.utils import run;

Expand Down
41 changes: 24 additions & 17 deletions scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@
from benches.llamacpp import *
from benches.umf import *
from benches.test import TestSuite
from benches.options import Compare, options
from options import Compare, options
from output_markdown import generate_markdown
from output_html import generate_html
from history import BenchmarkHistory
from utils.utils import prepare_workdir;
from utils.utils import prepare_workdir
from utils.compute_runtime import *

import argparse
import re
Expand Down Expand Up @@ -117,6 +118,13 @@ def process_results(results: dict[str, list[Result]], stddev_threshold_override)
def main(directory, additional_env_vars, save_name, compare_names, filter):
prepare_workdir(directory, INTERNAL_WORKDIR_VERSION)

if options.build_compute_runtime:
print(f"Setting up Compute Runtime {options.compute_runtime_tag}")
cr = get_compute_runtime()
print("Compute Runtime setup complete.")
options.extra_ld_libraries.extend(cr.ld_libraries())
options.extra_env_vars.update(cr.env_vars())

suites = [
ComputeBench(directory),
VelocityBench(directory),
Expand All @@ -129,15 +137,15 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
benchmarks = []

for s in suites:
print(f"Setting up {type(s).__name__}")
s.setup()
print(f"{type(s).__name__} setup complete.")

for s in suites:
benchmarks += s.benchmarks()
suite_benchmarks = s.benchmarks()
if filter:
suite_benchmarks = [benchmark for benchmark in suite_benchmarks if filter.search(benchmark.name())]

if filter:
benchmarks = [benchmark for benchmark in benchmarks if filter.search(benchmark.name())]
if suite_benchmarks:
print(f"Setting up {type(s).__name__}")
s.setup()
print(f"{type(s).__name__} setup complete.")
benchmarks += suite_benchmarks

for b in benchmarks:
print(b.name())
Expand Down Expand Up @@ -241,7 +249,7 @@ def validate_and_parse_env_args(env_args):
parser.add_argument("--save", type=str, help='Save the results for comparison under a specified name.')
parser.add_argument("--compare", type=str, help='Compare results against previously saved data.', action="append", default=["baseline"])
parser.add_argument("--iterations", type=int, help='Number of times to run each benchmark to select a median value.', default=options.iterations)
parser.add_argument("--stddev-threshold", type=float, help='If stddev % is above this threshold, rerun all iterations', default=options.stddev_threshold)
parser.add_argument("--stddev-threshold", type=float, help='If stddev pct is above this threshold, rerun all iterations', default=options.stddev_threshold)
parser.add_argument("--timeout", type=int, help='Timeout for individual benchmarks in seconds.', default=options.timeout)
parser.add_argument("--filter", type=str, help='Regex pattern to filter benchmarks by name.', default=None)
parser.add_argument("--epsilon", type=float, help='Threshold to consider change of performance significant', default=options.epsilon)
Expand All @@ -252,12 +260,8 @@ def validate_and_parse_env_args(env_args):
parser.add_argument("--output-html", help='Create HTML output', action="store_true", default=False)
parser.add_argument("--output-markdown", help='Create Markdown output', action="store_true", default=True)
parser.add_argument("--dry-run", help='Do not run any actual benchmarks', action="store_true", default=False)
parser.add_argument(
"--iterations-stddev",
type=int,
help="Max number of iterations of the loop calculating stddev after completed benchmark runs",
default=options.iterations_stddev,
)
parser.add_argument("--compute-runtime", nargs='?', const=options.compute_runtime_tag, help="Fetch and build compute runtime")
parser.add_argument("--iterations-stddev", type=int, help="Max number of iterations of the loop calculating stddev after completed benchmark runs")

args = parser.parse_args()
additional_env_vars = validate_and_parse_env_args(args.env)
Expand All @@ -279,6 +283,9 @@ def validate_and_parse_env_args(env_args):
options.dry_run = args.dry_run
options.umf = args.umf
options.iterations_stddev = args.iterations_stddev
if args.compute_runtime is not None:
options.build_compute_runtime = True
options.compute_runtime_tag = args.compute_runtime

benchmark_filter = re.compile(args.filter) if args.filter else None

Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from dataclasses import dataclass
from dataclasses import dataclass, field
from enum import Enum

class Compare(Enum):
Expand Down Expand Up @@ -27,6 +27,10 @@ class Options:
stddev_threshold: float = 0.02
epsilon: float = 0.02
iterations_stddev: int = 5
build_compute_runtime: bool = False
extra_ld_libraries: list[str] = field(default_factory=list)
extra_env_vars: dict = field(default_factory=dict)
compute_runtime_tag: str = 'c1ed0334d65f6ce86d7273fe4137d1d4a5b5fa7c'

options = Options()

2 changes: 1 addition & 1 deletion scripts/benchmarks/output_markdown.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import collections, re
from benches.result import Result
from benches.options import options
from options import options
import math

class OutputLine:
Expand Down
Loading

0 comments on commit 9e06068

Please sign in to comment.