diff --git a/yaml_examples/avon/strong_weak_scaling.yaml b/yaml_examples/avon/strong_weak_scaling.yaml new file mode 100644 index 0000000..b02a83c --- /dev/null +++ b/yaml_examples/avon/strong_weak_scaling.yaml @@ -0,0 +1,96 @@ +run_configurations: + "cpp-mpi": + sbatch_config: + "nodes": 1 + "cpus-per-task": 2 + "mem-per-cpu": 3700 + module_loads: + - "GCC/11.3.0" + - "OpenMPI/4.1.4" + environment_variables: {} + directory: "../0_cpp_versions/2_mpi" + build_commands: + - "make -j 8" + run_command: "mpirun ./test_HPCCG" + + "cpp-hybrid": + sbatch_config: + "nodes": 1 + "cpus-per-task": 2 + "mem-per-cpu": 3700 + module_loads: + - "GCC/11.3.0" + - "OpenMPI/4.1.4" + environment_variables: + "OMP_NUM_THREADS": 2 + directory: "../0_cpp_versions/3_hybrid" + build_commands: + - "make -j 8" + run_command: "mpirun ./test_HPCCG" + +benches: + "strong-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + [args, run_command]: + - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] + - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] + - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] + - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] + - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] + - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] + - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" + + "weak-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + run_command: + - "mpirun -np 1 ./test_HPCCG" + - "mpirun -np 2 ./test_HPCCG" + - "mpirun -np 4 ./test_HPCCG" + - "mpirun -np 8 ./test_HPCCG" + - "mpirun -np 16 ./test_HPCCG" + - "mpirun -np 32 ./test_HPCCG" + - "mpirun -np 64 ./test_HPCCG" + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" diff --git a/yaml_examples/avon_plan.yaml b/yaml_examples/bak/avon_plan.yaml similarity index 100% rename from yaml_examples/avon_plan.yaml rename to yaml_examples/bak/avon_plan.yaml diff --git a/yaml_examples/kudu_plan.yaml b/yaml_examples/bak/kudu_plan.yaml similarity index 100% rename from yaml_examples/kudu_plan.yaml rename to yaml_examples/bak/kudu_plan.yaml diff --git a/yaml_examples/kudu_single.yaml b/yaml_examples/bak/kudu_single.yaml similarity index 100% rename from yaml_examples/kudu_single.yaml rename to yaml_examples/bak/kudu_single.yaml diff --git a/yaml_examples/kudu/strong_weak_scaling.yaml b/yaml_examples/kudu/strong_weak_scaling.yaml new file mode 100644 index 0000000..0b0c095 --- /dev/null +++ b/yaml_examples/kudu/strong_weak_scaling.yaml @@ -0,0 +1,94 @@ +run_configurations: + "cpp-mpi": + sbatch_config: + "nodes": 1 + "cpus-per-task": 2 + "mem-per-cpu": 1500 + module_loads: + - "cs402-mpi" + environment_variables: {} + directory: "../0_cpp_versions/2_mpi" + build_commands: + - "make -j 8" + run_command: "mpirun ./test_HPCCG" + + "cpp-hybrid": + sbatch_config: + "nodes": 1 + "cpus-per-task": 2 + "mem-per-cpu": 1500 + module_loads: + - "cs402-mpi" + environment_variables: + "OMP_NUM_THREADS": 2 + directory: "../0_cpp_versions/3_hybrid" + build_commands: + - "make -j 8" + run_command: "mpirun ./test_HPCCG" + +benches: + "strong-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + [args, run_command]: + - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] + - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] + - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] + - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] + - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] + - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] + - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" + + "weak-scaling": + run_configurations: + - "cpp-hybrid" + matrix: + run_command: + - "mpirun -np 1 ./test_HPCCG" + - "mpirun -np 2 ./test_HPCCG" + - "mpirun -np 4 ./test_HPCCG" + - "mpirun -np 8 ./test_HPCCG" + - "mpirun -np 16 ./test_HPCCG" + - "mpirun -np 32 ./test_HPCCG" + - "mpirun -np 64 ./test_HPCCG" + analysis: + metrics: + "Mesh x size": "nx: (\\d+)" + "Mesh y size": "ny: (\\d+)" + "Mesh z size": "nz: (\\d+)" + "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" + "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" + "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" + "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" + "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" + "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" + plot: + x: "Mesh x size" + y: "Total time (s)" diff --git a/yaml_examples/kudu_strong_weak_scaling.yaml b/yaml_examples/kudu_strong_weak_scaling.yaml deleted file mode 100644 index 42cb350..0000000 --- a/yaml_examples/kudu_strong_weak_scaling.yaml +++ /dev/null @@ -1,297 +0,0 @@ -#defaults: -# "default name": -run_configurations: - "cpp-reference": - sbatch_config: - "nodes": 1 - "ntasks-per-node": 1 - "cpus-per-task": 1 - "mem-per-cpu": 3700 - module_loads: [] - environment_variables: {} - directory: "../0_cpp_versions/0_ref" - build_commands: - - "make -j 8" - run_command: "./test_HPCCG" - - "cpp-openmp": - sbatch_config: - "nodes": 1 - "ntasks-per-node": 1 - "cpus-per-task": 16 - "mem-per-cpu": 3700 - module_loads: [] - environment_variables: - "OMP_NUM_THREADS": 16 - directory: "../0_cpp_versions/1_openmp" - build_commands: - - "make -j 8" - run_command: "./test_HPCCG" - - "cpp-mpi": - sbatch_config: - "nodes": 2 - "ntasks-per-node": 8 - "cpus-per-task": 1 - "mem-per-cpu": 3700 - module_loads: - - "cs402-mpi" - environment_variables: {} - directory: "../0_cpp_versions/2_mpi" - build_commands: - - "make -j 8" - run_command: "mpirun -n 2 ./test_HPCCG" - - "cpp-hybrid": - sbatch_config: - "nodes": 2 - "ntasks-per-node": 4 - "cpus-per-task": 2 - "mem-per-cpu": 3700 - module_loads: - - "cs402-mpi" - environment_variables: - "OMP_NUM_THREADS": 2 - directory: "../0_cpp_versions/3_hybrid" - build_commands: - - "make -j 8" - run_command: "mpirun -n 2 ./test_HPCCG" - - "rust-reference": - sbatch_config: - "nodes": 1 - "ntasks-per-node": 1 - "cpus-per-task": 1 - "mem-per-cpu": 3700 - module_loads: [] - environment_variables: {} - directory: "../5_iterators" - build_commands: - - "cargo build --release" - run_command: "cargo run --release" - - "rust-rayon": - sbatch_config: - "nodes": 1 - "ntasks-per-node": 1 - "cpus-per-task": 16 - "mem-per-cpu": 3700 - module_loads: [] - environment_variables: - "OMP_NUM_THREADS": 16 - directory: "../6_parallel" - build_commands: - - "cargo build --release" - run_command: "cargo run --release" - - "rust-mpi": - sbatch_config: - "nodes": 2 - "ntasks-per-node": 8 - "cpus-per-task": 1 - "mem-per-cpu": 3700 - module_loads: - - "cs402-mpi" - environment_variables: {} - directory: "../7_mpi" - build_commands: - - "cargo build --release" - run_command: "mpirun -n 2 ./target/release/hpccg-rs" - -benches: - # "serial": - # run_configurations: - # - "cpp-hybrid" - # - "rust-mpi" # TODO: Make rust hybrid version - # matrix: - # args: - # - "100 100 100" - # - "200 200 200" - # - "300 300 300" - # - "400 400 400" - # - "500 500 500" - # sbatch_config: - # - "nodes": 2 - # "mem-per-cpu": 1000 - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # plot: - # x: "Mesh x size" - # y: "Total time (s)" - - - # "parallel": - # run_configurations: - # - "cpp-hybrid" - # - "rust-mpi" # TODO: Make rust hybrid version - # matrix: - # args: - # - "100 100 100" - # - "200 200 200" - # - "300 300 300" - # - "400 400 400" - # - "500 500 500" - # sbatch_config: - # - "nodes": 2 - # "mem-per-cpu": 1000 - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # plot: - # x: "Mesh x size" - # y: "Total time (s)" - - - # "rust-techniques": - # run_configurations: - # - "cpp-hybrid" - # - "rust-mpi" # TODO: Make rust hybrid version - # matrix: - # args: - # - "100 100 100" - # - "200 200 200" - # - "300 300 300" - # - "400 400 400" - # - "500 500 500" - # sbatch_config: - # - "nodes": 2 - # "mem-per-cpu": 1000 - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # plot: - # x: "Mesh x size" - # y: "Total time (s)" - - - # "mpi": - # run_configurations: - # - "cpp-hybrid" - # - "rust-mpi" # TODO: Make rust hybrid version - # matrix: - # args: - # - "100 100 100" - # - "200 200 200" - # - "300 300 300" - # - "400 400 400" - # - "500 500 500" - # sbatch_config: - # - "nodes": 2 - # "mem-per-cpu": 1000 - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # plot: - # x: "Mesh x size" - # y: "Total time (s)" - - - "strong-scaling": - run_configurations: - - "cpp-hybrid" - matrix: - [args, run_command]: - - ["64 64 1024", "mpirun -np 1 ./test_HPCCG"] - - ["64 64 512", "mpirun -np 2 ./test_HPCCG"] - - ["64 64 256", "mpirun -np 4 ./test_HPCCG"] - - ["64 64 128", "mpirun -np 8 ./test_HPCCG"] - - ["64 64 64", "mpirun -np 16 ./test_HPCCG"] - - ["64 64 32", "mpirun -np 32 ./test_HPCCG"] - - ["64 64 16", "mpirun -np 64 ./test_HPCCG"] - analysis: - metrics: - "Mesh x size": "nx: (\\d+)" - "Mesh y size": "ny: (\\d+)" - "Mesh z size": "nz: (\\d+)" - "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" - "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" - "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" - "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" - plot: - x: "Mesh x size" - y: "Total time (s)" - - "weak-scaling": - run_configurations: - - "cpp-hybrid" - matrix: - run_command: - - "mpirun -np 1 ./test_HPCCG" - - "mpirun -np 2 ./test_HPCCG" - - "mpirun -np 4 ./test_HPCCG" - - "mpirun -np 8 ./test_HPCCG" - - "mpirun -np 16 ./test_HPCCG" - - "mpirun -np 32 ./test_HPCCG" - - "mpirun -np 64 ./test_HPCCG" - analysis: - metrics: - "Mesh x size": "nx: (\\d+)" - "Mesh y size": "ny: (\\d+)" - "Mesh z size": "nz: (\\d+)" - "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" - "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" - "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" - "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" - plot: - x: "Mesh x size" - y: "Total time (s)" - - # "all": - # run_configurations: - # - "cpp-reference" - # - "cpp-openmp" - # - "cpp-mpi" - # - "rust-reference" - # - "rust-rayon" - # - "rust-mpi" - # matrix: - # args: - # - "100 100 100" - # - "200 200 200" - # - "300 300 300" - # - "400 400 400" - # - "500 500 500" - # sbatch_config: - # - "nodes": 2 - # "mem-per-cpu": 1000 - # analysis: - # metrics: - # "Mesh x size": "nx: (\\d+)" - # "Mesh y size": "ny: (\\d+)" - # "Mesh z size": "nz: (\\d+)" - # "Total time (s)": "Time Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "ddot time (s)": "Time Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "waxpby time (s)": "Time Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "sparsemv time (s)": "Time Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nFLOPS Summary" - # "Total flops": "FLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "ddot flops": "FLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "waxpby flops": "FLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "sparsemv flops": "FLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)[\\s\\S]*\nMFLOPS Summary" - # "Total mflops": "MFLOPS Summary:[\\s\\S]*Total\\s*: ([\\d\\.]+)" - # "ddot mflops": "MFLOPS Summary:[\\s\\S]*DDOT\\s*: ([\\d\\.]+)" - # "waxpby mflops": "MFLOPS Summary:[\\s\\S]*WAXPBY\\s*: ([\\d\\.]+)" - # "sparsemv mflops": "MFLOPS Summary:[\\s\\S]*SPARSEMV\\s*: ([\\d\\.]+)" - # plot: - # x: "Mesh x size" - # y: "Total time (s)"