Skip to content

Commit

Permalink
Start working on YAML test plans
Browse files Browse the repository at this point in the history
  • Loading branch information
EdmundGoodman committed Feb 20, 2024
1 parent a37f7a6 commit a75dcb5
Show file tree
Hide file tree
Showing 4 changed files with 611 additions and 17 deletions.
2 changes: 1 addition & 1 deletion src/hpc_multibench/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def get_parser() -> ArgumentParser:
mutex_group.add_argument(
"-m",
"--mode",
type=Mode,
type=Mode, # TODO: This needs fixing to actually work...
default=Mode.RUN,
help="the mode to run the tool in (default: run)",
)
Expand Down
190 changes: 174 additions & 16 deletions yaml_examples/kudu_plan.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,24 +99,143 @@ run_configurations:
run_command: "mpirun -n 2 ./target/release/hpccg-rs"

benches:
"cpp-rust-comp":
# "serial":
# run_configurations:
# - "cpp-hybrid"
# - "rust-mpi" # TODO: Make rust hybrid version
# matrix:
# args:
# - "100 100 100"
# - "200 200 200"
# - "300 300 300"
# - "400 400 400"
# - "500 500 500"
# sbatch_config:
# - "nodes": 2
# "mem-per-cpu": 1000
# analysis:
# metrics:
# "Mesh x size": "nx: (\\d+)"
# "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# plot:
# x: "Mesh x size"
# y: "Total time (s)"


# "parallel":
# run_configurations:
# - "cpp-hybrid"
# - "rust-mpi" # TODO: Make rust hybrid version
# matrix:
# args:
# - "100 100 100"
# - "200 200 200"
# - "300 300 300"
# - "400 400 400"
# - "500 500 500"
# sbatch_config:
# - "nodes": 2
# "mem-per-cpu": 1000
# analysis:
# metrics:
# "Mesh x size": "nx: (\\d+)"
# "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# plot:
# x: "Mesh x size"
# y: "Total time (s)"


# "rust-techniques":
# run_configurations:
# - "cpp-hybrid"
# - "rust-mpi" # TODO: Make rust hybrid version
# matrix:
# args:
# - "100 100 100"
# - "200 200 200"
# - "300 300 300"
# - "400 400 400"
# - "500 500 500"
# sbatch_config:
# - "nodes": 2
# "mem-per-cpu": 1000
# analysis:
# metrics:
# "Mesh x size": "nx: (\\d+)"
# "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# plot:
# x: "Mesh x size"
# y: "Total time (s)"


# "mpi":
# run_configurations:
# - "cpp-hybrid"
# - "rust-mpi" # TODO: Make rust hybrid version
# matrix:
# args:
# - "100 100 100"
# - "200 200 200"
# - "300 300 300"
# - "400 400 400"
# - "500 500 500"
# sbatch_config:
# - "nodes": 2
# "mem-per-cpu": 1000
# analysis:
# metrics:
# "Mesh x size": "nx: (\\d+)"
# "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# plot:
# x: "Mesh x size"
# y: "Total time (s)"


"strong-scaling":
run_configurations:
- "cpp-hybrid"
matrix:
[args, run_command]:
- ["64 64 1024", "mpirun -np 1 ./test_HPCCG"]
- ["64 64 512", "mpirun -np 2 ./test_HPCCG"]
- ["64 64 256", "mpirun -np 4 ./test_HPCCG"]
- ["64 64 128", "mpirun -np 8 ./test_HPCCG"]
- ["64 64 64", "mpirun -np 16 ./test_HPCCG"]
- ["64 64 32", "mpirun -np 32 ./test_HPCCG"]
- ["64 64 16", "mpirun -np 64 ./test_HPCCG"]
analysis:
metrics:
"Mesh x size": "nx: (\\d+)"
"Mesh y size": "ny: (\\d+)"
"Mesh z size": "nz: (\\d+)"
"Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)"
"ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)"
"waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)"
"sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)"
plot:
x: "Mesh x size"
y: "Total time (s)"

"weak-scaling":
run_configurations:
- "cpp-reference"
- "cpp-openmp"
- "cpp-mpi"
# - "cpp-hybrid"
- "rust-reference"
- "rust-rayon"
- "rust-mpi"
- "cpp-hybrid"
matrix:
args:
- "100 100 100"
- "200 200 200"
sbatch_config:
- "nodes": 2
"mem-per-cpu": 1000
- "nodes": 1
"mem-per-cpu": 2000
run_command:
- "mpirun -np 1 ./test_HPCCG"
- "mpirun -np 2 ./test_HPCCG"
- "mpirun -np 4 ./test_HPCCG"
- "mpirun -np 8 ./test_HPCCG"
- "mpirun -np 16 ./test_HPCCG"
- "mpirun -np 32 ./test_HPCCG"
- "mpirun -np 64 ./test_HPCCG"
analysis:
metrics:
"Mesh x size": "nx: (\\d+)"
Expand All @@ -137,3 +256,42 @@ benches:
plot:
x: "Mesh x size"
y: "Total time (s)"

# "all":
# run_configurations:
# - "cpp-reference"
# - "cpp-openmp"
# - "cpp-mpi"
# - "rust-reference"
# - "rust-rayon"
# - "rust-mpi"
# matrix:
# args:
# - "100 100 100"
# - "200 200 200"
# - "300 300 300"
# - "400 400 400"
# - "500 500 500"
# sbatch_config:
# - "nodes": 2
# "mem-per-cpu": 1000
# analysis:
# metrics:
# "Mesh x size": "nx: (\\d+)"
# "Mesh y size": "ny: (\\d+)"
# "Mesh z size": "nz: (\\d+)"
# "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
# "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
# "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
# "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
# "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
# "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)"
# "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)"
# "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)"
# "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)"
# plot:
# x: "Mesh x size"
# y: "Total time (s)"
139 changes: 139 additions & 0 deletions yaml_examples/kudu_single.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
#defaults:
# "default name":
run_configurations:
"cpp-reference":
sbatch_config:
"nodes": 1
"ntasks-per-node": 1
"cpus-per-task": 1
"mem-per-cpu": 3700
module_loads: []
environment_variables: {}
directory: "../0_cpp_versions/0_ref"
build_commands:
- "make -j 8"
run_command: "./test_HPCCG"

"cpp-openmp":
sbatch_config:
"nodes": 1
"ntasks-per-node": 1
"cpus-per-task": 16
"mem-per-cpu": 3700
module_loads: []
environment_variables:
"OMP_NUM_THREADS": 16
directory: "../0_cpp_versions/1_openmp"
build_commands:
- "make -j 8"
run_command: "./test_HPCCG"

"cpp-mpi":
sbatch_config:
"nodes": 2
"ntasks-per-node": 8
"cpus-per-task": 1
"mem-per-cpu": 3700
module_loads:
- "cs402-mpi"
environment_variables: {}
directory: "../0_cpp_versions/2_mpi"
build_commands:
- "make -j 8"
run_command: "mpirun -n 2 ./test_HPCCG"

"cpp-hybrid":
sbatch_config:
"nodes": 2
"ntasks-per-node": 4
"cpus-per-task": 2
"mem-per-cpu": 3700
module_loads:
- "cs402-mpi"
environment_variables:
"OMP_NUM_THREADS": 2
directory: "../0_cpp_versions/3_hybrid"
build_commands:
- "make -j 8"
run_command: "mpirun -n 2 ./test_HPCCG"

"rust-reference":
sbatch_config:
"nodes": 1
"ntasks-per-node": 1
"cpus-per-task": 1
"mem-per-cpu": 3700
module_loads: []
environment_variables: {}
directory: "../5_iterators"
build_commands:
- "cargo build --release"
run_command: "cargo run --release"

"rust-rayon":
sbatch_config:
"nodes": 1
"ntasks-per-node": 1
"cpus-per-task": 16
"mem-per-cpu": 3700
module_loads: []
environment_variables:
"OMP_NUM_THREADS": 16
directory: "../6_parallel"
build_commands:
- "cargo build --release"
run_command: "cargo run --release"

"rust-mpi":
sbatch_config:
"nodes": 2
"ntasks-per-node": 8
"cpus-per-task": 1
"mem-per-cpu": 3700
module_loads:
- "cs402-mpi"
environment_variables: {}
directory: "../7_mpi"
build_commands:
- "cargo build --release"
run_command: "mpirun -n 2 ./target/release/hpccg-rs"

benches:
"cpp-rust-comp":
run_configurations:
- "cpp-reference"
- "cpp-openmp"
- "cpp-mpi"
# - "cpp-hybrid"
- "rust-reference"
- "rust-rayon"
- "rust-mpi"
matrix:
args:
- "100 100 100"
- "200 200 200"
sbatch_config:
- "nodes": 2
"mem-per-cpu": 1000
- "nodes": 1
"mem-per-cpu": 2000
analysis:
metrics:
"Mesh x size": "nx: (\\d+)"
"Mesh y size": "ny: (\\d+)"
"Mesh z size": "nz: (\\d+)"
"Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary"
"Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary"
"Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)"
"ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)"
"waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)"
"sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)"
plot:
x: "Mesh x size"
y: "Total time (s)"
Loading

0 comments on commit a75dcb5

Please sign in to comment.