From f95d54a32fb841ece29f12e73ba13eaa875f0aa0 Mon Sep 17 00:00:00 2001 From: EdmundGoodman Date: Sun, 25 Feb 2024 19:40:17 +0000 Subject: [PATCH] Fiddle with YAML files defining test benches --- yaml_examples/kudu/cpp_kokkos_rust.yaml | 5 +- .../{ => kudu/incomplete}/example.yaml | 0 .../kudu/parallelism_strategies.yaml | 150 +++++++++--------- 3 files changed, 80 insertions(+), 75 deletions(-) rename yaml_examples/{ => kudu/incomplete}/example.yaml (100%) diff --git a/yaml_examples/kudu/cpp_kokkos_rust.yaml b/yaml_examples/kudu/cpp_kokkos_rust.yaml index 6a6686a..454eb62 100644 --- a/yaml_examples/kudu/cpp_kokkos_rust.yaml +++ b/yaml_examples/kudu/cpp_kokkos_rust.yaml @@ -104,8 +104,9 @@ benches: matrix: args: - "100 100 100" - - "300 300 300" - - "500 500 500" + - "150 150 150" + - "200 200 200" + - "250 250 250" analysis: metrics: "Mesh x size": "nx: (\\d+)" diff --git a/yaml_examples/example.yaml b/yaml_examples/kudu/incomplete/example.yaml similarity index 100% rename from yaml_examples/example.yaml rename to yaml_examples/kudu/incomplete/example.yaml diff --git a/yaml_examples/kudu/parallelism_strategies.yaml b/yaml_examples/kudu/parallelism_strategies.yaml index f0ce804..924e1eb 100644 --- a/yaml_examples/kudu/parallelism_strategies.yaml +++ b/yaml_examples/kudu/parallelism_strategies.yaml @@ -128,8 +128,9 @@ benches: matrix: args: - "100 100 100" - - "300 300 300" - - "500 500 500" + - "150 150 150" + - "200 200 200" + - "250 250 250" analysis: metrics: "Mesh x size": "nx: (\\d+)" @@ -160,8 +161,9 @@ benches: matrix: args: - "100 100 100" - - "300 300 300" - - "500 500 500" + - "150 150 150" + - "200 200 200" + - "250 250 250" environment_variables: - {"OMP_NUM_THREADS": 1, "RAYON_NUM_THREADS": 1} - {"OMP_NUM_THREADS": 4, "RAYON_NUM_THREADS": 4} @@ -191,73 +193,75 @@ benches: x: "Mesh x size" y: "Wall time (s)" - # "mpi": - # run_configurations: - # - "cpp-mpi" - # - "rust-mpi" - # matrix: - # args: - # - "100 100 100" - # - "300 300 300" - # - "500 500 500" ## TODO: Figure out best for one MPI node, then duplicate across many - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Mesh y size": "ny: (\\d+)" - # "Mesh z size": "nz: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" - # "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" - # "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" - # "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" - # "Wall time (s)": "real\\s([\\d\\.]+)\nuser" - # line_plots: - # - title: "MPI Implementation Comparison" - # x: "Mesh x size" - # y: "Wall time (s)" + "mpi": + run_configurations: + - "cpp-mpi" + - "rust-mpi" + matrix: ## TODO: Figure out best for one MPI node, then duplicate across many + args: + - "100 100 100" + - "150 150 150" + - "200 200 200" + - "250 250 250" + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + "Wall time (s)": "real\\s([\\d\\.]+)\nuser" + line_plots: + - title: "MPI Implementation Comparison" + x: "Mesh x size" + y: "Wall time (s)" - # "hybrid": - # run_configurations: - # - "cpp-hybrid" - # - "rust-hybrid" - # matrix: - # args: - # - "100 100 100" - # - "300 300 300" - # - "500 500 500" - # [sbatch_config, environment_variables]: - # - [{"ntasks-per-node": 1}, {"OMP_NUM_THREADS": 40}] ## TODO: Figure out best for one MPI node, then duplicate across many - # - [{"ntasks-per-node": 2}, {"OMP_NUM_THREADS": 20}] - # - [{"ntasks-per-node": 4}, {"OMP_NUM_THREADS": 10}] - # - [{"ntasks-per-node": 10}, {"OMP_NUM_THREADS": 4}] - # - [{"ntasks-per-node": 20}, {"OMP_NUM_THREADS": 2}] - # - [{"ntasks-per-node": 40}, {"OMP_NUM_THREADS": 1}] - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Mesh y size": "ny: (\\d+)" - # "Mesh z size": "nz: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" - # "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" - # "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" - # "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" - # "Wall time (s)": "real\\s([\\d\\.]+)\nuser" - # line_plots: - # - title: "MPI & Parallel Implementation Comparison" - # x: "Mesh x size" - # y: "Wall time (s)" + "hybrid": + run_configurations: + - "cpp-hybrid" + - "rust-hybrid" + matrix: + args: + - "100 100 100" + - "150 150 150" + - "200 200 200" + - "250 250 250" + [sbatch_config, environment_variables]: + - [{"ntasks-per-node": 1}, {"OMP_NUM_THREADS": 40}] ## TODO: Figure out best for one MPI node, then duplicate across many + - [{"ntasks-per-node": 2}, {"OMP_NUM_THREADS": 20}] + - [{"ntasks-per-node": 4}, {"OMP_NUM_THREADS": 10}] + - [{"ntasks-per-node": 10}, {"OMP_NUM_THREADS": 4}] + - [{"ntasks-per-node": 20}, {"OMP_NUM_THREADS": 2}] + - [{"ntasks-per-node": 40}, {"OMP_NUM_THREADS": 1}] + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + "Wall time (s)": "real\\s([\\d\\.]+)\nuser" + line_plots: + - title: "MPI & Parallel Implementation Comparison" + x: "Mesh x size" + y: "Wall time (s)"