diff --git a/src/hpc_multibench/test_bench.py b/src/hpc_multibench/test_bench.py index 04c76ab..d4b7949 100755 --- a/src/hpc_multibench/test_bench.py +++ b/src/hpc_multibench/test_bench.py @@ -263,7 +263,7 @@ def report(self) -> None: split_data: list[str] = [ f"{split_metric}={metrics[split_metric]}" - for split_metric in plot.split_by + for split_metric in plot.split_metrics ] series_name = (run_configuration.name, *split_data) diff --git a/src/hpc_multibench/yaml_model.py b/src/hpc_multibench/yaml_model.py index 4f7cfaf..0d00ab4 100755 --- a/src/hpc_multibench/yaml_model.py +++ b/src/hpc_multibench/yaml_model.py @@ -54,7 +54,8 @@ class LinePlotModel(BaseModel): title: str x: str y: str - split_by: list[str] = [] + split_metrics: list[str] = [] + # fix_metrics: dict[str, Any] = {} # class RooflinePlotModel(BaseModel): diff --git a/yaml_examples/kudu/simple_rust.yaml b/yaml_examples/kudu/simple_rust.yaml index 826d71d..20e1085 100644 --- a/yaml_examples/kudu/simple_rust.yaml +++ b/yaml_examples/kudu/simple_rust.yaml @@ -195,7 +195,7 @@ benches: - title: "MPI Configuration Sweep" x: "Mesh x size" y: "Wall time (s)" - split_by: + split_metric: - "Nodes" - "Tasks per Node" @@ -229,6 +229,6 @@ benches: - title: "Parallel Implementation Comparison" x: "Mesh x size" y: "Wall time (s)" - split_by: + split_metric: - "Tasks per Node" - "Threads" diff --git a/yaml_examples/kudu/strong_weak_scaling.yaml b/yaml_examples/kudu/strong_weak_scaling.yaml index a74a1e2..1f4cc5e 100644 --- a/yaml_examples/kudu/strong_weak_scaling.yaml +++ b/yaml_examples/kudu/strong_weak_scaling.yaml @@ -1,26 +1,10 @@ run_configurations: - "cpp-mpi": - sbatch_config: - "nodes": 1 - "ntasks-per-node": 16 - "cpus-per-task": 2 - "exclusive": "mcs" - "mem-per-cpu": 1875 - module_loads: - - "cs402-mpi" - environment_variables: {} - directory: "../0_cpp_versions/2_mpi" - build_commands: - - "make -j 8" - run_command: "mpirun ./test_HPCCG" - "cpp-hybrid": sbatch_config: "nodes": 1 - "ntasks-per-node": 16 "cpus-per-task": 2 "exclusive": "mcs" - "mem-per-cpu": 1875 + "mem": 60000 module_loads: - "cs402-mpi" environment_variables: @@ -33,17 +17,16 @@ run_configurations: benches: "strong-scaling": run_configurations: - # - "cpp-mpi" - "cpp-hybrid" matrix: - [args, run_command]: - - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] - - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] - - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] - - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] - - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] - # - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] # Exceeds Kudu's resource capacity... - # - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] + [args, sbatch_config]: + - ["64 64 1024", {"ntasks-per-node": 1}] + - ["64 64 512", {"ntasks-per-node": 2}] + - ["64 64 256", {"ntasks-per-node": 4}] + - ["64 64 128", {"ntasks-per-node": 8}] + - ["64 64 64", {"ntasks-per-node": 16}] + - ["64 64 32", {"ntasks-per-node": 32}] + - ["64 64 16", {"ntasks-per-node": 64}] analysis: metrics: "Mesh z size": "nz: (\\d+)" @@ -57,24 +40,22 @@ benches: "weak-scaling": run_configurations: - # - "cpp-mpi" - "cpp-hybrid" matrix: args: - "64 64 64" - run_command: - - "mpirun -np 1 ./test_HPCCG" - - "mpirun -np 2 ./test_HPCCG" - - "mpirun -np 4 ./test_HPCCG" - - "mpirun -np 8 ./test_HPCCG" - - "mpirun -np 16 ./test_HPCCG" - # - "mpirun -np 32 ./test_HPCCG" # Exceeds Kudu's resource capacity... - # - "mpirun -np 64 ./test_HPCCG" + sbatch_config: + - {"ntasks-per-node": 1} + - {"ntasks-per-node": 2} + - {"ntasks-per-node": 4} + - {"ntasks-per-node": 8} + - {"ntasks-per-node": 16} + - {"ntasks-per-node": 32} + - {"ntasks-per-node": 64} analysis: metrics: - "MPI Ranks": "=== RUN INSTANTIATION ===\n\\{.*run_command: mpirun -np (\\d+).*\\}" - "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" "Wall time (s)": "real\\s([\\d\\.]+)\nuser" + "MPI Ranks": "=== RUN INSTANTIATION ===\n\\{.*sbatch_config: \\{.*ntasks-per-node: (\\d+).*\\}" line_plots: - title: "Weak Scaling Plot" x: "MPI Ranks"