diff --git a/src/hpc_multibench/main.py b/src/hpc_multibench/main.py index cd3a8ab..776b338 100755 --- a/src/hpc_multibench/main.py +++ b/src/hpc_multibench/main.py @@ -28,7 +28,7 @@ def get_parser() -> ArgumentParser: mutex_group.add_argument( "-m", "--mode", - type=Mode, + type=Mode, # TODO: This needs fixing to actually work... default=Mode.RUN, help="the mode to run the tool in (default: run)", ) diff --git a/yaml_examples/kudu_plan.yaml b/yaml_examples/kudu_plan.yaml index 9bc4028..42cb350 100644 --- a/yaml_examples/kudu_plan.yaml +++ b/yaml_examples/kudu_plan.yaml @@ -99,24 +99,143 @@ run_configurations: run_command: "mpirun -n 2 ./target/release/hpccg-rs" benches: - "cpp-rust-comp": + # "serial": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "parallel": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "rust-techniques": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "mpi": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + "strong-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + [args, run_command]: + - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] + - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] + - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] + - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] + - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] + - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] + - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" + + "weak-scaling": run_configurations: - - "cpp-reference" - - "cpp-openmp" - - "cpp-mpi" - # - "cpp-hybrid" - - "rust-reference" - - "rust-rayon" - - "rust-mpi" + - "cpp-hybrid" matrix: - args: - - "100 100 100" - - "200 200 200" - sbatch_config: - - "nodes": 2 - "mem-per-cpu": 1000 - - "nodes": 1 - "mem-per-cpu": 2000 + run_command: + - "mpirun -np 1 ./test_HPCCG" + - "mpirun -np 2 ./test_HPCCG" + - "mpirun -np 4 ./test_HPCCG" + - "mpirun -np 8 ./test_HPCCG" + - "mpirun -np 16 ./test_HPCCG" + - "mpirun -np 32 ./test_HPCCG" + - "mpirun -np 64 ./test_HPCCG" analysis: metrics: "Mesh x size": "nx: (\\d+)" @@ -137,3 +256,42 @@ benches: plot: x: "Mesh x size" y: "Total time (s)" + + # "all": + # run_configurations: + # - "cpp-reference" + # - "cpp-openmp" + # - "cpp-mpi" + # - "rust-reference" + # - "rust-rayon" + # - "rust-mpi" + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Mesh y size": "ny: (\\d+)" + # "Mesh z size": "nz: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + # "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + # "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + # "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" diff --git a/yaml_examples/kudu_single.yaml b/yaml_examples/kudu_single.yaml new file mode 100644 index 0000000..9bc4028 --- /dev/null +++ b/yaml_examples/kudu_single.yaml @@ -0,0 +1,139 @@ +#defaults: +# "default name": +run_configurations: + "cpp-reference": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: {} + directory: "../0_cpp_versions/0_ref" + build_commands: + - "make -j 8" + run_command: "./test_HPCCG" + + "cpp-openmp": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 16 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: + "OMP_NUM_THREADS": 16 + directory: "../0_cpp_versions/1_openmp" + build_commands: + - "make -j 8" + run_command: "./test_HPCCG" + + "cpp-mpi": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 8 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: {} + directory: "../0_cpp_versions/2_mpi" + build_commands: + - "make -j 8" + run_command: "mpirun -n 2 ./test_HPCCG" + + "cpp-hybrid": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 4 + "cpus-per-task": 2 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: + "OMP_NUM_THREADS": 2 + directory: "../0_cpp_versions/3_hybrid" + build_commands: + - "make -j 8" + run_command: "mpirun -n 2 ./test_HPCCG" + + "rust-reference": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: {} + directory: "../5_iterators" + build_commands: + - "cargo build --release" + run_command: "cargo run --release" + + "rust-rayon": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 16 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: + "OMP_NUM_THREADS": 16 + directory: "../6_parallel" + build_commands: + - "cargo build --release" + run_command: "cargo run --release" + + "rust-mpi": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 8 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: {} + directory: "../7_mpi" + build_commands: + - "cargo build --release" + run_command: "mpirun -n 2 ./target/release/hpccg-rs" + +benches: + "cpp-rust-comp": + run_configurations: + - "cpp-reference" + - "cpp-openmp" + - "cpp-mpi" + # - "cpp-hybrid" + - "rust-reference" + - "rust-rayon" + - "rust-mpi" + matrix: + args: + - "100 100 100" + - "200 200 200" + sbatch_config: + - "nodes": 2 + "mem-per-cpu": 1000 + - "nodes": 1 + "mem-per-cpu": 2000 + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" diff --git a/yaml_examples/kudu_strong_weak_scaling.yaml b/yaml_examples/kudu_strong_weak_scaling.yaml new file mode 100644 index 0000000..42cb350 --- /dev/null +++ b/yaml_examples/kudu_strong_weak_scaling.yaml @@ -0,0 +1,297 @@ +#defaults: +# "default name": +run_configurations: + "cpp-reference": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: {} + directory: "../0_cpp_versions/0_ref" + build_commands: + - "make -j 8" + run_command: "./test_HPCCG" + + "cpp-openmp": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 16 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: + "OMP_NUM_THREADS": 16 + directory: "../0_cpp_versions/1_openmp" + build_commands: + - "make -j 8" + run_command: "./test_HPCCG" + + "cpp-mpi": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 8 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: {} + directory: "../0_cpp_versions/2_mpi" + build_commands: + - "make -j 8" + run_command: "mpirun -n 2 ./test_HPCCG" + + "cpp-hybrid": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 4 + "cpus-per-task": 2 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: + "OMP_NUM_THREADS": 2 + directory: "../0_cpp_versions/3_hybrid" + build_commands: + - "make -j 8" + run_command: "mpirun -n 2 ./test_HPCCG" + + "rust-reference": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: {} + directory: "../5_iterators" + build_commands: + - "cargo build --release" + run_command: "cargo run --release" + + "rust-rayon": + sbatch_config: + "nodes": 1 + "ntasks-per-node": 1 + "cpus-per-task": 16 + "mem-per-cpu": 3700 + module_loads: [] + environment_variables: + "OMP_NUM_THREADS": 16 + directory: "../6_parallel" + build_commands: + - "cargo build --release" + run_command: "cargo run --release" + + "rust-mpi": + sbatch_config: + "nodes": 2 + "ntasks-per-node": 8 + "cpus-per-task": 1 + "mem-per-cpu": 3700 + module_loads: + - "cs402-mpi" + environment_variables: {} + directory: "../7_mpi" + build_commands: + - "cargo build --release" + run_command: "mpirun -n 2 ./target/release/hpccg-rs" + +benches: + # "serial": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "parallel": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "rust-techniques": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + # "mpi": + # run_configurations: + # - "cpp-hybrid" + # - "rust-mpi" # TODO: Make rust hybrid version + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # plot: + # x: "Mesh x size" + # y: "Total time (s)" + + + "strong-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + [args, run_command]: + - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] + - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] + - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] + - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] + - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] + - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] + - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" + + "weak-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + run_command: + - "mpirun -np 1 ./test_HPCCG" + - "mpirun -np 2 ./test_HPCCG" + - "mpirun -np 4 ./test_HPCCG" + - "mpirun -np 8 ./test_HPCCG" + - "mpirun -np 16 ./test_HPCCG" + - "mpirun -np 32 ./test_HPCCG" + - "mpirun -np 64 ./test_HPCCG" + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" + + # "all": + # run_configurations: + # - "cpp-reference" + # - "cpp-openmp" + # - "cpp-mpi" + # - "rust-reference" + # - "rust-rayon" + # - "rust-mpi" + # matrix: + # args: + # - "100 100 100" + # - "200 200 200" + # - "300 300 300" + # - "400 400 400" + # - "500 500 500" + # sbatch_config: + # - "nodes": 2 + # "mem-per-cpu": 1000 + # analysis: + # metrics: + # "Mesh x size": "nx: (\\d+)" + # "Mesh y size": "ny: (\\d+)" + # "Mesh z size": "nz: (\\d+)" + # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + # "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + # "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + # "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + # "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + # "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + # plot: + # x: "Mesh x size" + # y: "Total time (s)"