From 74b1278bef9f31e1a72b5a1bca73e4cf33ef5a85 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 19:15:31 -0700 Subject: [PATCH 01/58] deprecate older file formats. --- src/outputs/restart.hpp | 29 ++--------------------------- src/parthenon_manager.cpp | 28 ++++------------------------ 2 files changed, 6 insertions(+), 51 deletions(-) diff --git a/src/outputs/restart.hpp b/src/outputs/restart.hpp index 69c301d30702..94a39d2264ee 100644 --- a/src/outputs/restart.hpp +++ b/src/outputs/restart.hpp @@ -158,32 +158,7 @@ class RestartReader { hsize_t offset[CHUNK_MAX_DIM] = {static_cast(range.s), 0, 0, 0, 0, 0, 0}; hsize_t count[CHUNK_MAX_DIM]; int total_dim = 0; - if (file_output_format_version == -1) { - size_t vlen = 1; - for (int i = 0; i < shape.size(); i++) { - vlen *= shape[i]; - } - count[0] = static_cast(range.e - range.s + 1); - count[1] = bsize[2]; - count[2] = bsize[1]; - count[3] = bsize[0]; - count[4] = vlen; - total_dim = 5; - } else if (file_output_format_version == 2) { - PARTHENON_REQUIRE( - shape.size() <= 1, - "Higher than vector datatypes are unstable in output versions < 3"); - size_t vlen = 1; - for (int i = 0; i < shape.size(); i++) { - vlen *= shape[i]; - } - count[0] = static_cast(range.e - range.s + 1); - count[1] = vlen; - count[2] = bsize[2]; - count[3] = bsize[1]; - count[4] = bsize[0]; - total_dim = 5; - } else if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { + if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { count[0] = static_cast(range.e - range.s + 1); const int ndim = shape.size(); if (where == MetadataFlag(Metadata::Cell)) { @@ -203,7 +178,7 @@ class RestartReader { PARTHENON_THROW("Only Cell and None locations supported!"); } } else { - PARTHENON_THROW("Unknown output format version in restart file.") + PARTHENON_THROW("Unsupported output format version in restart file.") } hsize_t total_count = 1; diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 814a8c6f094f..d1959964b375 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -233,17 +233,10 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { << std::endl; const auto file_output_format_ver = resfile.GetOutputFormatVersion(); - if (file_output_format_ver == -1) { + if (file_output_format_ver != HDF5::OUTPUT_VERSION_FORMAT) { // Being extra stringent here so that we don't forget to update the machinery when // another change happens. - PARTHENON_REQUIRE_THROWS( - HDF5::OUTPUT_VERSION_FORMAT == 2 || HDF5::OUTPUT_VERSION_FORMAT == 3, - "Auto conversion from original to format 2 or 3 not implemented yet.") - - if (Globals::my_rank == 0) { - PARTHENON_WARN("Restarting from a old output file format. New outputs written with " - "this binary will use new format.") - } + PARTHENON_THROW("Deprecated file format"); } // Get an iterator on block 0 for variable listing @@ -336,25 +329,12 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { // Double note that this also needs to be update in case // we update the HDF5 infrastructure! - if (file_output_format_ver == -1) { - PARTHENON_WARN("This file output format version is deprecrated and will be " - "removed in a future release."); - for (int k = out_kb.s; k <= out_kb.e; ++k) { - for (int j = out_jb.s; j <= out_jb.e; ++j) { - for (int i = out_ib.s; i <= out_ib.e; ++i) { - for (int l = 0; l < vlen; ++l) { - v_h(l, k, j, i) = tmp[index++]; - } - } - } - } - } else if (file_output_format_ver == 2 || - file_output_format_ver == HDF5::OUTPUT_VERSION_FORMAT) { + if (file_output_format_ver == HDF5::OUTPUT_VERSION_FORMAT) { OutputUtils::PackOrUnpackVar(pmb.get(), v.get(), resfile.hasGhost, index, tmp, [&](auto index, int t, int u, int v, int k, int j, int i) { v_h(t, u, v, k, j, i) = tmp[index]; }); } else { - PARTHENON_THROW("Unknown output format version in restart file.") + PARTHENON_THROW("Unsupported output format version in restart file.") } v->data.DeepCopy(v_h); From fd7ad48d7af5010b8738e13424f443a08753c8e0 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 19:31:57 -0700 Subject: [PATCH 02/58] get rid of some of those hardcoded arrays... --- src/outputs/output_utils.hpp | 10 ++++++++++ src/outputs/parthenon_hdf5.cpp | 25 ++++++++++--------------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index db6353090cbf..cc973d5d2edc 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -57,6 +57,16 @@ struct VarInfo { std::vector component_labels; int Size() const { return nx6 * nx5 * nx4 * nx3 * nx2 * nx1; } + template + void FillShape(T *shape) const { + shape[0] = static_cast(nx6); + shape[1] = static_cast(nx5); + shape[2] = static_cast(nx4); + shape[3] = static_cast(nx3); + shape[4] = static_cast(nx2); + shape[5] = static_cast(nx1); + } + VarInfo() = delete; // TODO(JMM): Separate this into an implementation file again? diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index d7677dfee4fd..fa75374ddd86 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -312,21 +312,16 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm const hsize_t nx5 = vinfo.nx5; const hsize_t nx4 = vinfo.nx4; - hsize_t local_offset[H5_NDIM] = {my_offset, 0, 0, 0, 0, 0, 0}; - hsize_t local_count[H5_NDIM] = {static_cast(num_blocks_local), - static_cast(nx6), - static_cast(nx5), - static_cast(nx4), - static_cast(nx3), - static_cast(nx2), - static_cast(nx1)}; - hsize_t global_count[H5_NDIM] = {static_cast(max_blocks_global), - static_cast(nx6), - static_cast(nx5), - static_cast(nx4), - static_cast(nx3), - static_cast(nx2), - static_cast(nx1)}; + hsize_t local_offset[H5_NDIM] = {0}; + local_offset[0] = my_offset; + + hsize_t local_count[H5_NDIM] = {1}; + local_count[0] = static_cast(num_blocks_local); + vinfo.FillShape(&(local_count[1])); + + hsize_t global_count[H5_NDIM] = {1}; + global_count[0] = static_cast(max_blocks_global); + vinfo.FillShape(&(global_count[1])); std::vector alldims({nx6, nx5, nx4, static_cast(vinfo.nx3), static_cast(vinfo.nx2), From 7bb8c0839c1d2eb6935f7e513ecc00844fea4966 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 19:44:36 -0700 Subject: [PATCH 03/58] remove more hardcoded initializer lists --- src/outputs/output_utils.hpp | 7 +++++++ src/outputs/parthenon_hdf5.cpp | 14 +++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index cc973d5d2edc..f7f2ffe82c59 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -67,6 +67,13 @@ struct VarInfo { shape[5] = static_cast(nx1); } + template + auto GetShape() const { + return std::vector({ + static_cast(nx6), static_cast(nx5), static_cast(nx4), + static_cast(nx3), static_cast(nx2), static_cast(nx1)}); + } + VarInfo() = delete; // TODO(JMM): Separate this into an implementation file again? diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index fa75374ddd86..1ec66b68e452 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -312,25 +312,25 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm const hsize_t nx5 = vinfo.nx5; const hsize_t nx4 = vinfo.nx4; - hsize_t local_offset[H5_NDIM] = {0}; + hsize_t local_offset[H5_NDIM]; + std::fill(local_offset + 1, local_offset + H5_NDIM, 0); local_offset[0] = my_offset; - hsize_t local_count[H5_NDIM] = {1}; + hsize_t local_count[H5_NDIM];; local_count[0] = static_cast(num_blocks_local); vinfo.FillShape(&(local_count[1])); - hsize_t global_count[H5_NDIM] = {1}; + hsize_t global_count[H5_NDIM]; global_count[0] = static_cast(max_blocks_global); vinfo.FillShape(&(global_count[1])); - std::vector alldims({nx6, nx5, nx4, static_cast(vinfo.nx3), - static_cast(vinfo.nx2), - static_cast(vinfo.nx1)}); + auto alldims = vinfo.GetShape(); int ndim = -1; #ifndef PARTHENON_DISABLE_HDF5_COMPRESSION // we need chunks to enable compression - std::array chunk_size({1, 1, 1, 1, 1, 1, 1}); + std::array chunk_size; + std::fill(chunk_size.begin(), chunk_size.end(), 1); #endif if (vinfo.where == MetadataFlag(Metadata::Cell)) { ndim = 3 + vinfo.tensor_rank + 1; From 429260adfb2429af7f88e8d38ef58499ec301d05 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 21:03:29 -0700 Subject: [PATCH 04/58] update error messages --- src/interface/variable.hpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/interface/variable.hpp b/src/interface/variable.hpp index 6542d99e7e43..a627d352dce9 100644 --- a/src/interface/variable.hpp +++ b/src/interface/variable.hpp @@ -81,14 +81,16 @@ class Variable { KOKKOS_FORCEINLINE_FUNCTION auto GetDim(const int i) const { // we can't query data.GetDim() here because data may be unallocated - assert(0 < i && i <= MAX_VARIABLE_DIMENSION && "ParArrayNDs are max 6D"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, + "Index out of bounds"); return dims_[i - 1]; } KOKKOS_FORCEINLINE_FUNCTION auto GetCoarseDim(const int i) const { // we can't query coarse_s.GetDim() here because it may be unallocated - assert(0 < i && i <= MAX_VARIABLE_DIMENSION && "ParArrayNDs are max 6D"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, + "Index out of bounds"); return coarse_dims_[i - 1]; } @@ -212,7 +214,8 @@ class ParticleVariable { KOKKOS_FORCEINLINE_FUNCTION auto GetDim(const int i) const { - PARTHENON_DEBUG_REQUIRE(0 < i && i <= 6, "ParArrayNDGenerics are max 6D"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= 6, + "Index out of bounds"); return dims_[i - 1]; } From 061c0bf208c7f87671f316b31e85353942b91b98 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 21:14:35 -0700 Subject: [PATCH 05/58] some more careful initialization without hardcoding --- src/outputs/histogram.cpp | 13 +++++++------ src/outputs/parthenon_xdmf.cpp | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/outputs/histogram.cpp b/src/outputs/histogram.cpp index 88c6ced2ecd3..5944f6e8a889 100644 --- a/src/outputs/histogram.cpp +++ b/src/outputs/histogram.cpp @@ -472,12 +472,13 @@ void HistogramOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, SimTime *tm using namespace HDF5; H5P const pl_xfer = H5P::FromHIDCheck(H5Pcreate(H5P_DATASET_XFER)); - // As we're reusing the interface from the existing hdf5 output, we have to define - // everything as 7D arrays. - // Counts will be set for each histogram individually below. - const std::array local_offset({0, 0, 0, 0, 0, 0, 0}); - std::array local_count({0, 0, 0, 0, 0, 0, 0}); - std::array global_count({0, 0, 0, 0, 0, 0, 0}); + // As we're reusing the interface from the existing hdf5 output, + // we have to define everything as H5_NDIM arrays. Counts will be + // set for each histogram individually below. All + // zero-initialized + const std::array local_offset = {0}; + std::array local_count = {0}; + std::array global_count = {0}; // create/open HDF5 file const std::string filename = GenerateFilename_(pin, tm, signal); diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 08424440eeb1..bf5aea6a820a 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -85,7 +85,7 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, int nx1, int nx2, int n } std::string filename_aux = hdfFile + ".xdmf"; std::ofstream xdmf; - hsize_t dims[H5_NDIM] = {0, 0, 0, 0, 0, 0, 0}; + hsize_t dims[H5_NDIM] = {0}; // zero-initialized // open file xdmf = std::ofstream(filename_aux.c_str(), std::ofstream::trunc); From bfe54270050386bd50f731cd60439765a37e11d0 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 21:19:48 -0700 Subject: [PATCH 06/58] remove some unused vars --- src/outputs/parthenon_hdf5.cpp | 3 --- src/outputs/parthenon_xdmf.cpp | 5 +---- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 1ec66b68e452..4179494861ba 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -308,9 +308,6 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm memset(tmpData.data(), 0, tmpData.size() * sizeof(OutT)); const std::string var_name = vinfo.label; - const hsize_t nx6 = vinfo.nx6; - const hsize_t nx5 = vinfo.nx5; - const hsize_t nx4 = vinfo.nx4; hsize_t local_offset[H5_NDIM]; std::fill(local_offset + 1, local_offset + H5_NDIM, 0); diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index bf5aea6a820a..08e79050fac1 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -150,10 +150,7 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, int nx1, int nx2, int n // write graphics variables int ndim; for (const auto &vinfo : var_list) { - std::vector alldims( - {static_cast(vinfo.nx6), static_cast(vinfo.nx5), - static_cast(vinfo.nx4), static_cast(vinfo.nx3), - static_cast(vinfo.nx2), static_cast(vinfo.nx1)}); + std::vector alldims = vinfo.GetShape(); // Only cell-based data currently supported for visualization if (vinfo.where == MetadataFlag(Metadata::Cell)) { ndim = 3 + vinfo.tensor_rank + 1; From a3f346961e3201bf59c449cbaa466290fcc033b1 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 21:20:51 -0700 Subject: [PATCH 07/58] unused var --- src/parthenon_manager.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index d1959964b375..f9725b969e7c 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -293,9 +293,6 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { for (auto &v_info : indep_restart_vars) { const auto vlen = v_info->NumComponents(); const auto &label = v_info->label(); - const auto &Nv = v_info->GetDim(4); - const auto &Nu = v_info->GetDim(5); - const auto &Nt = v_info->GetDim(6); if (Globals::my_rank == 0) { std::cout << "Var: " << label << ":" << vlen << std::endl; From 243d94e4c0375ca56f422ba8e48ac186175f8b48 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 21:26:50 -0700 Subject: [PATCH 08/58] remove more hardcoded stuff --- src/outputs/parthenon_hdf5.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 4179494861ba..e7e456057355 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -412,8 +412,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm varSize = vinfo.nx6 * vinfo.nx5 * vinfo.nx4 * (out_kb.e - out_kb.s + 1) * (out_jb.e - out_jb.s + 1) * (out_ib.e - out_ib.s + 1); } else { - varSize = - vinfo.nx6 * vinfo.nx5 * vinfo.nx4 * vinfo.nx3 * vinfo.nx2 * vinfo.nx1; + varSize = vinfo.Size(); } auto fill_val = output_params.sparse_seed_nans ? std::numeric_limits::quiet_NaN() : 0; From 1864e7966728eaf116251a8da91ff34afe64b236 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 22:08:54 -0700 Subject: [PATCH 09/58] TensorSize --- src/outputs/output_utils.hpp | 1 + src/outputs/parthenon_hdf5.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index f7f2ffe82c59..915a6c5cf083 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -56,6 +56,7 @@ struct VarInfo { bool is_vector; std::vector component_labels; int Size() const { return nx6 * nx5 * nx4 * nx3 * nx2 * nx1; } + int TensorSize() const { return nx6 * nx5 * nx4; } template void FillShape(T *shape) const { diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index e7e456057355..c115112f3f06 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -409,7 +409,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (vinfo.is_sparse) { hsize_t varSize{}; if (vinfo.where == MetadataFlag(Metadata::Cell)) { - varSize = vinfo.nx6 * vinfo.nx5 * vinfo.nx4 * (out_kb.e - out_kb.s + 1) * + varSize = vinfo.TensorSize() * (out_kb.e - out_kb.s + 1) * (out_jb.e - out_jb.s + 1) * (out_ib.e - out_ib.s + 1); } else { varSize = vinfo.Size(); From 732500fd69eecf4f440a17ffa19d11d3993b38bb Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Sat, 9 Mar 2024 22:24:50 -0700 Subject: [PATCH 10/58] switch to using an array in VarInfo --- src/outputs/output_utils.hpp | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 915a6c5cf083..bda6d45c20b8 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -41,38 +42,33 @@ namespace parthenon { namespace OutputUtils { // Helper struct containing some information about a variable struct VarInfo { + static constexpr int VNDIM = 6; // soon to be 7 std::string label; int num_components; - int nx6; - int nx5; - int nx4; - int nx3; - int nx2; - int nx1; + std::array nx; int tensor_rank; // 0- to 3-D for cell-centered variables, 0- to 6-D for arbitrary shape // variables MetadataFlag where; bool is_sparse; bool is_vector; std::vector component_labels; - int Size() const { return nx6 * nx5 * nx4 * nx3 * nx2 * nx1; } - int TensorSize() const { return nx6 * nx5 * nx4; } + int Size() const { + return std::accumulate(nx.begin(), nx.end(), 1, std::multiplies()); + } + int TensorSize() const { + return nx[5]*nx[4]*nx[3];//std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); + } template void FillShape(T *shape) const { - shape[0] = static_cast(nx6); - shape[1] = static_cast(nx5); - shape[2] = static_cast(nx4); - shape[3] = static_cast(nx3); - shape[4] = static_cast(nx2); - shape[5] = static_cast(nx1); + for (int i = 0; i < VNDIM; ++i) { + shape[i] = static_cast(nx[VNDIM - i]); + } } template auto GetShape() const { - return std::vector({ - static_cast(nx6), static_cast(nx5), static_cast(nx4), - static_cast(nx3), static_cast(nx2), static_cast(nx1)}); + return std::vector(nx.rbegin(), nx.rend()); } VarInfo() = delete; @@ -81,8 +77,9 @@ struct VarInfo { VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, Metadata metadata, bool is_sparse, bool is_vector) - : label(label), num_components(num_components), nx6(nx6), nx5(nx5), nx4(nx4), - nx3(nx3), nx2(nx2), nx1(nx1), tensor_rank(metadata.Shape().size()), + : label(label), num_components(num_components), + nx({nx1,nx2,nx3,nx4,nx5,nx6}), + tensor_rank(metadata.Shape().size()), where(metadata.Where()), is_sparse(is_sparse), is_vector(is_vector) { if (num_components <= 0) { std::stringstream msg; From 534e38b0744ca28d2e5872483af3488ee1f8cc63 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Mon, 11 Mar 2024 13:41:28 -0600 Subject: [PATCH 11/58] formatting --- src/interface/variable.hpp | 9 +++------ src/outputs/output_utils.hpp | 14 +++++++------- src/outputs/parthenon_hdf5.cpp | 3 ++- 3 files changed, 12 insertions(+), 14 deletions(-) diff --git a/src/interface/variable.hpp b/src/interface/variable.hpp index a627d352dce9..5877cca0eb05 100644 --- a/src/interface/variable.hpp +++ b/src/interface/variable.hpp @@ -81,16 +81,14 @@ class Variable { KOKKOS_FORCEINLINE_FUNCTION auto GetDim(const int i) const { // we can't query data.GetDim() here because data may be unallocated - PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, - "Index out of bounds"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, "Index out of bounds"); return dims_[i - 1]; } KOKKOS_FORCEINLINE_FUNCTION auto GetCoarseDim(const int i) const { // we can't query coarse_s.GetDim() here because it may be unallocated - PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, - "Index out of bounds"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= MAX_VARIABLE_DIMENSION, "Index out of bounds"); return coarse_dims_[i - 1]; } @@ -214,8 +212,7 @@ class ParticleVariable { KOKKOS_FORCEINLINE_FUNCTION auto GetDim(const int i) const { - PARTHENON_DEBUG_REQUIRE(0 < i && i <= 6, - "Index out of bounds"); + PARTHENON_DEBUG_REQUIRE(0 < i && i <= 6, "Index out of bounds"); return dims_[i - 1]; } diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index bda6d45c20b8..b46a2e931fbe 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -56,17 +56,18 @@ struct VarInfo { return std::accumulate(nx.begin(), nx.end(), 1, std::multiplies()); } int TensorSize() const { - return nx[5]*nx[4]*nx[3];//std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); + return nx[5] * nx[4] * + nx[3]; // std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); } - template + template void FillShape(T *shape) const { for (int i = 0; i < VNDIM; ++i) { shape[i] = static_cast(nx[VNDIM - i]); } } - template + template auto GetShape() const { return std::vector(nx.rbegin(), nx.rend()); } @@ -77,10 +78,9 @@ struct VarInfo { VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, Metadata metadata, bool is_sparse, bool is_vector) - : label(label), num_components(num_components), - nx({nx1,nx2,nx3,nx4,nx5,nx6}), - tensor_rank(metadata.Shape().size()), - where(metadata.Where()), is_sparse(is_sparse), is_vector(is_vector) { + : label(label), num_components(num_components), nx({nx1, nx2, nx3, nx4, nx5, nx6}), + tensor_rank(metadata.Shape().size()), where(metadata.Where()), + is_sparse(is_sparse), is_vector(is_vector) { if (num_components <= 0) { std::stringstream msg; msg << "### ERROR: Got variable " << label << " with " << num_components diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index c115112f3f06..fddeeb7781d4 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -313,7 +313,8 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm std::fill(local_offset + 1, local_offset + H5_NDIM, 0); local_offset[0] = my_offset; - hsize_t local_count[H5_NDIM];; + hsize_t local_count[H5_NDIM]; + ; local_count[0] = static_cast(num_blocks_local); vinfo.FillShape(&(local_count[1])); From 41a9db234f1a782927e8554bbad6dfffbb8f7be6 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Mon, 11 Mar 2024 13:43:06 -0600 Subject: [PATCH 12/58] lint --- src/outputs/output_utils.hpp | 1 + src/outputs/parthenon_hdf5.cpp | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index b46a2e931fbe..87378f158aea 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -20,6 +20,7 @@ // C++ #include +#include #include #include #include diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index fddeeb7781d4..01279ac587f0 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -314,7 +314,6 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm local_offset[0] = my_offset; hsize_t local_count[H5_NDIM]; - ; local_count[0] = static_cast(num_blocks_local); vinfo.FillShape(&(local_count[1])); From 4f12e29d3aec8d20f6d1660f8f506f536d865000 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Wed, 13 Mar 2024 08:50:14 -0600 Subject: [PATCH 13/58] generalize how shapes are set --- src/outputs/output_utils.hpp | 5 +++-- src/outputs/parthenon_hdf5_types.hpp | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 87378f158aea..bc27b6ffb1e7 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -34,6 +34,7 @@ #include "basic_types.hpp" #include "interface/metadata.hpp" #include "interface/variable.hpp" +#include "kokkos_abstraction.hpp" #include "mesh/domain.hpp" #include "mesh/mesh.hpp" #include "mesh/meshblock.hpp" @@ -43,7 +44,7 @@ namespace parthenon { namespace OutputUtils { // Helper struct containing some information about a variable struct VarInfo { - static constexpr int VNDIM = 6; // soon to be 7 + static constexpr int VNDIM = 6; // MAX_VARIABLE_DIMENSION; std::string label; int num_components; std::array nx; @@ -79,7 +80,7 @@ struct VarInfo { VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, Metadata metadata, bool is_sparse, bool is_vector) - : label(label), num_components(num_components), nx({nx1, nx2, nx3, nx4, nx5, nx6}), + : label(label), num_components(num_components), nx({nx1, nx2, nx3, nx4, nx5, nx6}), tensor_rank(metadata.Shape().size()), where(metadata.Where()), is_sparse(is_sparse), is_vector(is_vector) { if (num_components <= 0) { diff --git a/src/outputs/parthenon_hdf5_types.hpp b/src/outputs/parthenon_hdf5_types.hpp index e9f771d50619..59f7fbdc9cb2 100644 --- a/src/outputs/parthenon_hdf5_types.hpp +++ b/src/outputs/parthenon_hdf5_types.hpp @@ -35,12 +35,13 @@ #include #include "utils/error_checking.hpp" +#include "kokkos_abstraction.hpp" namespace parthenon { namespace HDF5 { // Number of dimension of HDF5 field data sets (block x nv x nu x nt x nz x ny x nx) -static constexpr size_t H5_NDIM = 7; +static constexpr size_t H5_NDIM = 7; // MAX_VARIABLE_DIMENSION + 1; static constexpr int OUTPUT_VERSION_FORMAT = 3; From e87f598cbac805299bf381248636fbc39ff1d593 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Wed, 13 Mar 2024 16:19:43 -0600 Subject: [PATCH 14/58] remove prefilling --- src/outputs/parthenon_hdf5.cpp | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 01279ac587f0..41529f915a3e 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -315,11 +315,9 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm hsize_t local_count[H5_NDIM]; local_count[0] = static_cast(num_blocks_local); - vinfo.FillShape(&(local_count[1])); hsize_t global_count[H5_NDIM]; global_count[0] = static_cast(max_blocks_global); - vinfo.FillShape(&(global_count[1])); auto alldims = vinfo.GetShape(); @@ -332,7 +330,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (vinfo.where == MetadataFlag(Metadata::Cell)) { ndim = 3 + vinfo.tensor_rank + 1; for (int i = 0; i < vinfo.tensor_rank; i++) { - local_count[1 + i] = global_count[1 + i] = alldims[3 - vinfo.tensor_rank + i]; + local_count[1 + i] = global_count[1 + i] = alldims[alldims.size() - 3 - vinfo.tensor_rank + i]; } local_count[vinfo.tensor_rank + 1] = global_count[vinfo.tensor_rank + 1] = nx3; local_count[vinfo.tensor_rank + 2] = global_count[vinfo.tensor_rank + 2] = nx2; @@ -348,7 +346,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm } else if (vinfo.where == MetadataFlag(Metadata::None)) { ndim = vinfo.tensor_rank + 1; for (int i = 0; i < vinfo.tensor_rank; i++) { - local_count[1 + i] = global_count[1 + i] = alldims[6 - vinfo.tensor_rank + i]; + local_count[1 + i] = global_count[1 + i] = alldims[alldims.size() - vinfo.tensor_rank + i]; } #ifndef PARTHENON_DISABLE_HDF5_COMPRESSION From d802f4ba800f46e25ce4d0ab22706a94da6270e5 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 14 Mar 2024 09:58:15 -0600 Subject: [PATCH 15/58] move FillShape --- src/outputs/output_utils.hpp | 60 ++++++++++++++++++++++------ src/outputs/parthenon_hdf5.cpp | 35 ++++------------ src/outputs/parthenon_hdf5_types.hpp | 2 +- 3 files changed, 55 insertions(+), 42 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index bc27b6ffb1e7..5d9a510b62f7 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -28,6 +28,7 @@ #include #include #include +#include #include // Parthenon @@ -44,34 +45,61 @@ namespace parthenon { namespace OutputUtils { // Helper struct containing some information about a variable struct VarInfo { + public: static constexpr int VNDIM = 6; // MAX_VARIABLE_DIMENSION; std::string label; int num_components; - std::array nx; int tensor_rank; // 0- to 3-D for cell-centered variables, 0- to 6-D for arbitrary shape // variables MetadataFlag where; bool is_sparse; bool is_vector; + IndexShape cellbounds; std::vector component_labels; int Size() const { - return std::accumulate(nx.begin(), nx.end(), 1, std::multiplies()); + return std::accumulate(nx_.begin(), nx_.end(), 1, std::multiplies()); } int TensorSize() const { - return nx[5] * nx[4] * - nx[3]; // std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); + // std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); + return nx_[5] * nx_[4] * nx_[3]; } template - void FillShape(T *shape) const { - for (int i = 0; i < VNDIM; ++i) { - shape[i] = static_cast(nx[VNDIM - i]); + int FillShape(const IndexDomain domain, T *data) { + int ndim = -1; + int nx3 = cellbounds.ncellsk(domain); + int nx2 = cellbounds.ncellsj(domain); + int nx1 = cellbounds.ncellsi(domain); + if (where == MetadataFlag({Metadata::None})) { + ndim = tensor_rank + 1; + for (int i = 0; i < tensor_rank; ++i) { + data[i] = static_cast(rnx_[rnx_.size() - tensor_rank + i]); + } + } else if (where == MetadataFlag({Metadata::Cell})) { + ndim = 3 + tensor_rank + 1; + for (int i = 0; i < tensor_rank; ++i) { + data[i] = static_cast(rnx_[rnx_.size() - 3 - tensor_rank + i]); + } + data[tensor_rank] = static_cast(nx3); + data[tensor_rank + 1] = static_cast(nx2); + data[tensor_rank + 2] = static_cast(nx1); } + return ndim; + } + + template + int FillShape(const IndexDomain domain, T *head, Args... args) { + int ndim_head = FillShape(domain, head); + int ndim_tail = FillShape(domain, std::forward(args)...); + // this check should be impossible to trigger but... just to be safe + PARTHENON_DEBUG_REQUIRE(ndim_head == ndim_tail, + "Shape can't change for different arrays"); + return ndim_tail; } template auto GetShape() const { - return std::vector(nx.rbegin(), nx.rend()); + return std::vector(nx_.rbegin(), nx_.rend()); } VarInfo() = delete; @@ -79,10 +107,11 @@ struct VarInfo { // TODO(JMM): Separate this into an implementation file again? VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, - Metadata metadata, bool is_sparse, bool is_vector) - : label(label), num_components(num_components), nx({nx1, nx2, nx3, nx4, nx5, nx6}), + Metadata metadata, bool is_sparse, bool is_vector, const IndexShape &cellbounds) + : label(label), num_components(num_components), nx_({nx1, nx2, nx3, nx4, nx5, nx6}), tensor_rank(metadata.Shape().size()), where(metadata.Where()), - is_sparse(is_sparse), is_vector(is_vector) { + is_sparse(is_sparse), is_vector(is_vector), cellbounds(cellbounds), + rnx_(nx_.rbegin(), nx_.rend()) { if (num_components <= 0) { std::stringstream msg; msg << "### ERROR: Got variable " << label << " with " << num_components @@ -113,11 +142,16 @@ struct VarInfo { } } - explicit VarInfo(const std::shared_ptr> &var) + explicit VarInfo(const std::shared_ptr> &var, + const IndexShape &cellbounds) : VarInfo(var->label(), var->metadata().getComponentLabels(), var->NumComponents(), var->GetDim(6), var->GetDim(5), var->GetDim(4), var->GetDim(3), var->GetDim(2), var->GetDim(1), var->metadata(), var->IsSparse(), - var->IsSet(Metadata::Vector)) {} + var->IsSet(Metadata::Vector), cellbounds) {} + + private: + std::array nx_; + std::vector rnx_; }; struct SwarmVarInfo { diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 41529f915a3e..96aeaa63700c 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -82,9 +82,10 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm auto const &first_block = *(pm->block_list.front()); - const IndexRange out_ib = first_block.cellbounds.GetBoundsI(theDomain); - const IndexRange out_jb = first_block.cellbounds.GetBoundsJ(theDomain); - const IndexRange out_kb = first_block.cellbounds.GetBoundsK(theDomain); + const auto &cellbounds = first_block.cellbounds; + const IndexRange out_ib = cellbounds.GetBoundsI(theDomain); + const IndexRange out_jb = cellbounds.GetBoundsJ(theDomain); + const IndexRange out_kb = cellbounds.GetBoundsK(theDomain); auto const nx1 = out_ib.e - out_ib.s + 1; auto const nx2 = out_jb.e - out_jb.s + 1; @@ -255,7 +256,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm std::vector all_vars_info; const auto vars = get_vars(pm->block_list.front()); for (auto &v : vars) { - all_vars_info.emplace_back(v); + all_vars_info.emplace_back(v, cellbounds); } // sort alphabetically @@ -321,47 +322,25 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm auto alldims = vinfo.GetShape(); - int ndim = -1; + int ndim = vinfo.FillShape(theDomain, &(local_count[1]), &(global_count[1])); #ifndef PARTHENON_DISABLE_HDF5_COMPRESSION // we need chunks to enable compression std::array chunk_size; std::fill(chunk_size.begin(), chunk_size.end(), 1); -#endif if (vinfo.where == MetadataFlag(Metadata::Cell)) { - ndim = 3 + vinfo.tensor_rank + 1; - for (int i = 0; i < vinfo.tensor_rank; i++) { - local_count[1 + i] = global_count[1 + i] = alldims[alldims.size() - 3 - vinfo.tensor_rank + i]; - } - local_count[vinfo.tensor_rank + 1] = global_count[vinfo.tensor_rank + 1] = nx3; - local_count[vinfo.tensor_rank + 2] = global_count[vinfo.tensor_rank + 2] = nx2; - local_count[vinfo.tensor_rank + 3] = global_count[vinfo.tensor_rank + 3] = nx1; - -#ifndef PARTHENON_DISABLE_HDF5_COMPRESSION if (output_params.hdf5_compression_level > 0) { for (int i = ndim - 3; i < ndim; i++) { chunk_size[i] = local_count[i]; } } -#endif } else if (vinfo.where == MetadataFlag(Metadata::None)) { - ndim = vinfo.tensor_rank + 1; - for (int i = 0; i < vinfo.tensor_rank; i++) { - local_count[1 + i] = global_count[1 + i] = alldims[alldims.size() - vinfo.tensor_rank + i]; - } - -#ifndef PARTHENON_DISABLE_HDF5_COMPRESSION if (output_params.hdf5_compression_level > 0) { int nchunk_indices = std::min(vinfo.tensor_rank, 3); for (int i = ndim - nchunk_indices; i < ndim; i++) { - chunk_size[i] = alldims[6 - nchunk_indices + i]; + chunk_size[i] = alldims[alldims.size() - nchunk_indices + i]; } } -#endif - } else { - PARTHENON_THROW("Only Cell and None locations supported!"); } - -#ifndef PARTHENON_DISABLE_HDF5_COMPRESSION PARTHENON_HDF5_CHECK(H5Pset_chunk(pl_dcreate, ndim, chunk_size.data())); // Do not run the pipeline if compression is soft disabled. // By default data would still be passed, which may result in slower output. diff --git a/src/outputs/parthenon_hdf5_types.hpp b/src/outputs/parthenon_hdf5_types.hpp index 59f7fbdc9cb2..f0bb557edcfc 100644 --- a/src/outputs/parthenon_hdf5_types.hpp +++ b/src/outputs/parthenon_hdf5_types.hpp @@ -34,8 +34,8 @@ #include #include -#include "utils/error_checking.hpp" #include "kokkos_abstraction.hpp" +#include "utils/error_checking.hpp" namespace parthenon { namespace HDF5 { From 5d49b499089771f44a2e2b2e907a4703a931d87e Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 14 Mar 2024 10:39:46 -0600 Subject: [PATCH 16/58] clean up compression --- src/outputs/parthenon_hdf5.cpp | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 96aeaa63700c..ccd7e174e4f7 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -320,31 +320,22 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm hsize_t global_count[H5_NDIM]; global_count[0] = static_cast(max_blocks_global); - auto alldims = vinfo.GetShape(); - int ndim = vinfo.FillShape(theDomain, &(local_count[1]), &(global_count[1])); + #ifndef PARTHENON_DISABLE_HDF5_COMPRESSION - // we need chunks to enable compression - std::array chunk_size; - std::fill(chunk_size.begin(), chunk_size.end(), 1); - if (vinfo.where == MetadataFlag(Metadata::Cell)) { - if (output_params.hdf5_compression_level > 0) { - for (int i = ndim - 3; i < ndim; i++) { - chunk_size[i] = local_count[i]; - } + // we need chunks to enable compression. Do not run the pipeline + // if compression is soft disabled. By default data would still + // be passed, which may result in slower output. + if (output_params.hdf5_compression_level > 0) { + std::array chunk_size; + std::fill(chunk_size.begin(), chunk_size.end(), 1); + for (int i = 1; i < ndim; ++i) { + chunk_size[i] = local_count[i]; } - } else if (vinfo.where == MetadataFlag(Metadata::None)) { - if (output_params.hdf5_compression_level > 0) { - int nchunk_indices = std::min(vinfo.tensor_rank, 3); - for (int i = ndim - nchunk_indices; i < ndim; i++) { - chunk_size[i] = alldims[alldims.size() - nchunk_indices + i]; - } + if (vinfo.where != MetadataFlag(Metadata::None)) { + std::fill(&(chunk_size[0]), &(chunk_size[0]) + ndim - 3, 1); } - } - PARTHENON_HDF5_CHECK(H5Pset_chunk(pl_dcreate, ndim, chunk_size.data())); - // Do not run the pipeline if compression is soft disabled. - // By default data would still be passed, which may result in slower output. - if (output_params.hdf5_compression_level > 0) { + PARTHENON_HDF5_CHECK(H5Pset_chunk(pl_dcreate, ndim, chunk_size.data())); PARTHENON_HDF5_CHECK( H5Pset_deflate(pl_dcreate, std::min(9, output_params.hdf5_compression_level))); } From e994a3111dec6fec8a23719563e1135f24fc582c Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 14 Mar 2024 11:34:44 -0600 Subject: [PATCH 17/58] extend dim finally --- src/outputs/output_utils.hpp | 10 +++++----- src/outputs/parthenon_hdf5_types.hpp | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 5d9a510b62f7..d6781f0d693a 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -46,7 +46,7 @@ namespace OutputUtils { // Helper struct containing some information about a variable struct VarInfo { public: - static constexpr int VNDIM = 6; // MAX_VARIABLE_DIMENSION; + static constexpr int VNDIM = MAX_VARIABLE_DIMENSION; std::string label; int num_components; int tensor_rank; // 0- to 3-D for cell-centered variables, 0- to 6-D for arbitrary shape @@ -108,10 +108,10 @@ struct VarInfo { VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, Metadata metadata, bool is_sparse, bool is_vector, const IndexShape &cellbounds) - : label(label), num_components(num_components), nx_({nx1, nx2, nx3, nx4, nx5, nx6}), - tensor_rank(metadata.Shape().size()), where(metadata.Where()), - is_sparse(is_sparse), is_vector(is_vector), cellbounds(cellbounds), - rnx_(nx_.rbegin(), nx_.rend()) { + : label(label), num_components(num_components), + nx_({nx1, nx2, nx3, nx4, nx5, nx6, 1}), tensor_rank(metadata.Shape().size()), + where(metadata.Where()), is_sparse(is_sparse), is_vector(is_vector), + cellbounds(cellbounds), rnx_(nx_.rbegin(), nx_.rend()) { if (num_components <= 0) { std::stringstream msg; msg << "### ERROR: Got variable " << label << " with " << num_components diff --git a/src/outputs/parthenon_hdf5_types.hpp b/src/outputs/parthenon_hdf5_types.hpp index f0bb557edcfc..33ef9e15853c 100644 --- a/src/outputs/parthenon_hdf5_types.hpp +++ b/src/outputs/parthenon_hdf5_types.hpp @@ -41,7 +41,7 @@ namespace parthenon { namespace HDF5 { // Number of dimension of HDF5 field data sets (block x nv x nu x nt x nz x ny x nx) -static constexpr size_t H5_NDIM = 7; // MAX_VARIABLE_DIMENSION + 1; +static constexpr size_t H5_NDIM = MAX_VARIABLE_DIMENSION + 1; static constexpr int OUTPUT_VERSION_FORMAT = 3; From a45f5ef770ecb15bdd998becf24e2b9cae363061 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 18 Mar 2024 14:00:43 -0600 Subject: [PATCH 18/58] add unit tests... more to come --- tst/unit/CMakeLists.txt | 1 + tst/unit/test_output_utils.cpp | 97 ++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 tst/unit/test_output_utils.cpp diff --git a/tst/unit/CMakeLists.txt b/tst/unit/CMakeLists.txt index 05180d532340..aba3b83b478a 100644 --- a/tst/unit/CMakeLists.txt +++ b/tst/unit/CMakeLists.txt @@ -32,6 +32,7 @@ list(APPEND unit_tests_SOURCES test_meshblock_data_iterator.cpp test_mesh_data.cpp test_nan_tags.cpp + test_output_utils.cpp test_pararrays.cpp test_sparse_pack.cpp test_swarm.cpp diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp new file mode 100644 index 000000000000..50586eec7fa3 --- /dev/null +++ b/tst/unit/test_output_utils.cpp @@ -0,0 +1,97 @@ +//======================================================================================== +// Athena++ astrophysical MHD code +// Copyright(C) 2014 James M. Stone and other code contributors +// Licensed under the 3-clause BSD License, see LICENSE file for details +//======================================================================================== +// (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved. +// +// This program was produced under U.S. Government contract 89233218CNA000001 for Los +// Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +// for the U.S. Department of Energy/National Nuclear Security Administration. All rights +// in the program are reserved by Triad National Security, LLC, and the U.S. Department +// of Energy/National Nuclear Security Administration. The Government is granted for +// itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +// license in this material to reproduce, prepare derivative works, distribute copies to +// the public, perform publicly and display publicly, and to permit others to do so. +//======================================================================================== + +#include +#include +#include +#include + +#include + +#include "basic_types.hpp" +#include "globals.hpp" +#include "interface/mesh_data.hpp" +#include "interface/meshblock_data.hpp" +#include "interface/metadata.hpp" +#include "interface/state_descriptor.hpp" +#include "interface/variable_pack.hpp" +#include "kokkos_abstraction.hpp" +#include "mesh/domain.hpp" +#include "mesh/meshblock.hpp" +#include "outputs/output_utils.hpp" + +using parthenon::BlockList_t; +using parthenon::DevExecSpace; +using parthenon::IndexDomain; +using parthenon::loop_pattern_mdrange_tag; +using parthenon::MeshBlock; +using parthenon::MeshBlockData; +using parthenon::MeshData; +using parthenon::Metadata; +using parthenon::PackIndexMap; +using parthenon::par_for; +using parthenon::Real; +using parthenon::StateDescriptor; +using parthenon::IndexShape; + +using namespace parthenon::OutputUtils; + +TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUtils]") { + GIVEN("A MeshBlock with some vars on it") { + constexpr int NG = 2; + constexpr int NSIDE = 16; + constexpr int NDIM = 3; + + constexpr auto interior = parthenon::IndexDomain::interior; + constexpr auto entire = parthenon::IndexDomain::entire; + parthenon::Globals::nghost = NG; + + const std::string scalar_cell = "scalar_cell"; + + Metadata m({Metadata::Cell, Metadata::Independent}); + auto pkg = std::make_shared("Test package"); + pkg->AddField("scalar_cell", m); + + auto pmb = std::make_shared(NSIDE, NDIM); + auto pmbd = pmb->meshblock_data.Get(); + pmbd->Initialize(pkg, pmb); + + IndexShape cellbounds = pmb->cellbounds; + THEN("The CellBounds object is reasonable") { + REQUIRE(cellbounds.ncellsk(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsk(interior) == NSIDE); + REQUIRE(cellbounds.ncellsj(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsj(interior) == NSIDE); + REQUIRE(cellbounds.ncellsi(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsi(interior) == NSIDE); + } + + WHEN("We Initialize VarInfo on a scalar cell var") { + auto v = pmbd->GetVarPtr(scalar_cell); + VarInfo info(v, cellbounds); + + THEN("The shape is correct over both interior and entire") { + std::vector shape(10); + int ndim = info.FillShape(interior, shape.data()); + REQUIRE(ndim == 3 + 1); // block index + k,j,i + for (int i = 0; i < ndim - 1; ++i) { // don't think about block index + REQUIRE(shape[i] == NSIDE); + } + } + } + } +} From 6b4c66f7a1a1a3f1ddf29a02513f25f5686be499 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 18 Mar 2024 18:24:53 -0600 Subject: [PATCH 19/58] switch ndim as output by VarInfo so it doesn't contain the block index, for clarity --- src/outputs/output_utils.hpp | 7 ++++--- src/outputs/parthenon_hdf5.cpp | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index d6781f0d693a..deb5bfc2d013 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -66,17 +66,18 @@ struct VarInfo { template int FillShape(const IndexDomain domain, T *data) { - int ndim = -1; + int ndim = -1; // number of elements of data that describe + // variable shape int nx3 = cellbounds.ncellsk(domain); int nx2 = cellbounds.ncellsj(domain); int nx1 = cellbounds.ncellsi(domain); if (where == MetadataFlag({Metadata::None})) { - ndim = tensor_rank + 1; + ndim = tensor_rank; for (int i = 0; i < tensor_rank; ++i) { data[i] = static_cast(rnx_[rnx_.size() - tensor_rank + i]); } } else if (where == MetadataFlag({Metadata::Cell})) { - ndim = 3 + tensor_rank + 1; + ndim = 3 + tensor_rank; for (int i = 0; i < tensor_rank; ++i) { data[i] = static_cast(rnx_[rnx_.size() - 3 - tensor_rank + i]); } diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index ccd7e174e4f7..dd6922e0de3b 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -320,7 +320,8 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm hsize_t global_count[H5_NDIM]; global_count[0] = static_cast(max_blocks_global); - int ndim = vinfo.FillShape(theDomain, &(local_count[1]), &(global_count[1])); + // block index + variable on block dimensions + int ndim = 1 + vinfo.FillShape(theDomain, &(local_count[1]), &(global_count[1])); #ifndef PARTHENON_DISABLE_HDF5_COMPRESSION // we need chunks to enable compression. Do not run the pipeline From dae3ea9d4eb7dd49df69722a51963c279e4820b0 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 18 Mar 2024 18:25:19 -0600 Subject: [PATCH 20/58] Fill in current utilized capability --- tst/unit/test_output_utils.cpp | 76 +++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 10 deletions(-) diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 50586eec7fa3..3d3f268e491d 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -37,6 +37,7 @@ using parthenon::BlockList_t; using parthenon::DevExecSpace; using parthenon::IndexDomain; +using parthenon::IndexShape; using parthenon::loop_pattern_mdrange_tag; using parthenon::MeshBlock; using parthenon::MeshBlockData; @@ -46,7 +47,6 @@ using parthenon::PackIndexMap; using parthenon::par_for; using parthenon::Real; using parthenon::StateDescriptor; -using parthenon::IndexShape; using namespace parthenon::OutputUtils; @@ -60,11 +60,19 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti constexpr auto entire = parthenon::IndexDomain::entire; parthenon::Globals::nghost = NG; - const std::string scalar_cell = "scalar_cell"; + auto pkg = std::make_shared("Test package"); + const std::string scalar_cell = "scalar_cell"; Metadata m({Metadata::Cell, Metadata::Independent}); - auto pkg = std::make_shared("Test package"); - pkg->AddField("scalar_cell", m); + pkg->AddField(scalar_cell, m); + + const std::string tensor_cell = "tensor_cell"; + m = Metadata({Metadata::Cell, Metadata::Independent}, std::vector{3, 4}); + pkg->AddField(tensor_cell, m); + + const std::string tensor_none = "tensor_none"; + m = Metadata({Metadata::None, Metadata::Independent}, std::vector{3, 4}); + pkg->AddField(tensor_none, m); auto pmb = std::make_shared(NSIDE, NDIM); auto pmbd = pmb->meshblock_data.Get(); @@ -72,25 +80,73 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti IndexShape cellbounds = pmb->cellbounds; THEN("The CellBounds object is reasonable") { - REQUIRE(cellbounds.ncellsk(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsk(entire) == NSIDE + 2 * NG); REQUIRE(cellbounds.ncellsk(interior) == NSIDE); - REQUIRE(cellbounds.ncellsj(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsj(entire) == NSIDE + 2 * NG); REQUIRE(cellbounds.ncellsj(interior) == NSIDE); - REQUIRE(cellbounds.ncellsi(entire) == NSIDE + 2*NG); + REQUIRE(cellbounds.ncellsi(entire) == NSIDE + 2 * NG); REQUIRE(cellbounds.ncellsi(interior) == NSIDE); } - WHEN("We Initialize VarInfo on a scalar cell var") { + WHEN("We initialize VarInfo on a scalar cell var") { auto v = pmbd->GetVarPtr(scalar_cell); VarInfo info(v, cellbounds); THEN("The shape is correct over both interior and entire") { std::vector shape(10); int ndim = info.FillShape(interior, shape.data()); - REQUIRE(ndim == 3 + 1); // block index + k,j,i - for (int i = 0; i < ndim - 1; ++i) { // don't think about block index + REQUIRE(ndim == 3); + for (int i = 0; i < ndim; ++i) { REQUIRE(shape[i] == NSIDE); } + + ndim = info.FillShape(entire, shape.data()); + REQUIRE(ndim == 3); + for (int i = 0; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 2 * NG); + } + } + } + + WHEN("We initialize VarInfo on a tensor cell var") { + auto v = pmbd->GetVarPtr(tensor_cell); + VarInfo info(v, cellbounds); + + THEN("The shape is correct over both interior and entire") { + std::vector shape(10); + int ndim = info.FillShape(interior, shape.data()); + REQUIRE(ndim == 5); + REQUIRE(shape[0] == 4); + REQUIRE(shape[1] == 3); + for (int i = 2; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE); + } + + ndim = info.FillShape(entire, shape.data()); + REQUIRE(ndim == 5); + REQUIRE(shape[0] == 4); + REQUIRE(shape[1] == 3); + for (int i = 2; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 2 * NG); + } + } + } + + WHEN("We initialize VarInfo on a tensor no-centering var") { + auto v = pmbd->GetVarPtr(tensor_none); + VarInfo info(v, cellbounds); + + THEN("The shape is correct over both interior and entire") { + std::vector shape(10); + int ndim = info.FillShape(interior, shape.data()); + REQUIRE(ndim == 2); + REQUIRE(shape[0] == 4); + REQUIRE(shape[1] == 3); + + ndim = info.FillShape(entire, shape.data()); + REQUIRE(ndim == 2); + REQUIRE(shape[0] == 4); + REQUIRE(shape[1] == 3); } } } From bb0e69bc8785fa97253e9d5c850af5705e0a3e85 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 18 Mar 2024 18:51:41 -0600 Subject: [PATCH 21/58] swap VarInfo to using just the straight up array nx --- src/interface/variable.hpp | 5 +++++ src/outputs/output_utils.hpp | 15 +++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/interface/variable.hpp b/src/interface/variable.hpp index 5877cca0eb05..db106ad47e5a 100644 --- a/src/interface/variable.hpp +++ b/src/interface/variable.hpp @@ -85,6 +85,11 @@ class Variable { return dims_[i - 1]; } + KOKKOS_FORCEINLINE_FUNCTION + auto GetDim() const { // TODO(JMM): should this be host-only? + return dims_; + } + KOKKOS_FORCEINLINE_FUNCTION auto GetCoarseDim(const int i) const { // we can't query coarse_s.GetDim() here because it may be unallocated diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index deb5bfc2d013..b46fb812c00a 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -107,12 +107,12 @@ struct VarInfo { // TODO(JMM): Separate this into an implementation file again? VarInfo(const std::string &label, const std::vector &component_labels_, - int num_components, int nx6, int nx5, int nx4, int nx3, int nx2, int nx1, - Metadata metadata, bool is_sparse, bool is_vector, const IndexShape &cellbounds) - : label(label), num_components(num_components), - nx_({nx1, nx2, nx3, nx4, nx5, nx6, 1}), tensor_rank(metadata.Shape().size()), - where(metadata.Where()), is_sparse(is_sparse), is_vector(is_vector), - cellbounds(cellbounds), rnx_(nx_.rbegin(), nx_.rend()) { + int num_components, std::array nx, Metadata metadata, + bool is_sparse, bool is_vector, const IndexShape &cellbounds) + : label(label), num_components(num_components), nx_(nx), + tensor_rank(metadata.Shape().size()), where(metadata.Where()), + is_sparse(is_sparse), is_vector(is_vector), cellbounds(cellbounds), + rnx_(nx_.rbegin(), nx_.rend()) { if (num_components <= 0) { std::stringstream msg; msg << "### ERROR: Got variable " << label << " with " << num_components @@ -146,8 +146,7 @@ struct VarInfo { explicit VarInfo(const std::shared_ptr> &var, const IndexShape &cellbounds) : VarInfo(var->label(), var->metadata().getComponentLabels(), var->NumComponents(), - var->GetDim(6), var->GetDim(5), var->GetDim(4), var->GetDim(3), - var->GetDim(2), var->GetDim(1), var->metadata(), var->IsSparse(), + var->GetDim(), var->metadata(), var->IsSparse(), var->IsSet(Metadata::Vector), cellbounds) {} private: From 46b65e498b48e6917aba68214c21ee70147608bc Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 18 Mar 2024 19:44:10 -0600 Subject: [PATCH 22/58] output utils now contains everything we need... some yuckiness ahead with 0 padding face arrays though --- src/outputs/output_utils.hpp | 56 +++++++++++++++++++++++++--------- tst/unit/test_output_utils.cpp | 49 +++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 14 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index b46fb812c00a..98b1e2cd6a94 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -19,6 +19,7 @@ #define OUTPUTS_OUTPUT_UTILS_HPP_ // C++ +#include #include #include #include @@ -56,6 +57,10 @@ struct VarInfo { bool is_vector; IndexShape cellbounds; std::vector component_labels; + std::vector topological_elements; + int ntop_elems; + bool element_matters; + int Size() const { return std::accumulate(nx_.begin(), nx_.end(), 1, std::multiplies()); } @@ -68,22 +73,36 @@ struct VarInfo { int FillShape(const IndexDomain domain, T *data) { int ndim = -1; // number of elements of data that describe // variable shape - int nx3 = cellbounds.ncellsk(domain); - int nx2 = cellbounds.ncellsj(domain); - int nx1 = cellbounds.ncellsi(domain); if (where == MetadataFlag({Metadata::None})) { ndim = tensor_rank; for (int i = 0; i < tensor_rank; ++i) { data[i] = static_cast(rnx_[rnx_.size() - tensor_rank + i]); } - } else if (where == MetadataFlag({Metadata::Cell})) { - ndim = 3 + tensor_rank; + } else { + // For nx1,nx2,nx3 find max storage required in each direction + // accross topological elements. Unused indices will be written but + // empty. + int nx3 = 1, nx2 = 1, nx1 = 1; + for (auto el : topological_elements) { + nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); + nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); + nx1 = std::max(nx1, cellbounds.ncellsi(domain, el)); + } + // 3 cell indices, tensor rank, topological element index if needed + ndim = 3 + tensor_rank + element_matters; + // fill topological element, if relevant + if (element_matters) { + data[0] = ntop_elems; + } + // fill the tensor rank for (int i = 0; i < tensor_rank; ++i) { - data[i] = static_cast(rnx_[rnx_.size() - 3 - tensor_rank + i]); + data[i + element_matters] = + static_cast(rnx_[rnx_.size() - 3 - tensor_rank + i]); } - data[tensor_rank] = static_cast(nx3); - data[tensor_rank + 1] = static_cast(nx2); - data[tensor_rank + 2] = static_cast(nx1); + // fill cell indices + data[tensor_rank + element_matters] = static_cast(nx3); + data[tensor_rank + element_matters + 1] = static_cast(nx2); + data[tensor_rank + element_matters + 2] = static_cast(nx1); } return ndim; } @@ -102,17 +121,24 @@ struct VarInfo { auto GetShape() const { return std::vector(nx_.rbegin(), nx_.rend()); } + template + auto GetDim(int i) const { + PARTHENON_DEBUG_REQUIRE(0 < i && i < VNDIM, "Index out of bounds"); + return static_cast(nx_[i - 1]); + } VarInfo() = delete; // TODO(JMM): Separate this into an implementation file again? VarInfo(const std::string &label, const std::vector &component_labels_, int num_components, std::array nx, Metadata metadata, - bool is_sparse, bool is_vector, const IndexShape &cellbounds) + const std::vector topological_elements, bool is_sparse, + bool is_vector, const IndexShape &cellbounds) : label(label), num_components(num_components), nx_(nx), tensor_rank(metadata.Shape().size()), where(metadata.Where()), - is_sparse(is_sparse), is_vector(is_vector), cellbounds(cellbounds), - rnx_(nx_.rbegin(), nx_.rend()) { + topological_elements(topological_elements), is_sparse(is_sparse), + is_vector(is_vector), cellbounds(cellbounds), rnx_(nx_.rbegin(), nx_.rend()), + ntop_elems(topological_elements.size()), element_matters(ntop_elems > 1) { if (num_components <= 0) { std::stringstream msg; msg << "### ERROR: Got variable " << label << " with " << num_components @@ -146,10 +172,12 @@ struct VarInfo { explicit VarInfo(const std::shared_ptr> &var, const IndexShape &cellbounds) : VarInfo(var->label(), var->metadata().getComponentLabels(), var->NumComponents(), - var->GetDim(), var->metadata(), var->IsSparse(), - var->IsSet(Metadata::Vector), cellbounds) {} + var->GetDim(), var->metadata(), var->GetTopologicalElements(), + var->IsSparse(), var->IsSet(Metadata::Vector), cellbounds) {} private: + // TODO(JMM): Probably nx_ and rnx_ both not necessary... but it was + // easiest for me to reason about it this way. std::array nx_; std::vector rnx_; }; diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 3d3f268e491d..956b4e2c18eb 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -74,6 +74,16 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti m = Metadata({Metadata::None, Metadata::Independent}, std::vector{3, 4}); pkg->AddField(tensor_none, m); + // four-vector-valued var with a vector at each face + const std::string vector_face = "four_vector_face"; + m = Metadata({Metadata::Face, Metadata::Independent}, std::vector{4}); + pkg->AddField(vector_face, m); + + // vector-valued var with a single value at each edge + const std::string scalar_edge = "scalar_edge"; + m = Metadata({Metadata::Edge, Metadata::Independent}); + pkg->AddField(scalar_edge, m); + auto pmb = std::make_shared(NSIDE, NDIM); auto pmbd = pmb->meshblock_data.Get(); pmbd->Initialize(pkg, pmb); @@ -149,5 +159,44 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[1] == 3); } } + + WHEN("We initialize VarInfo on a vector face var") { + auto v = pmbd->GetVarPtr(vector_face); + VarInfo info(v, cellbounds); + + THEN("The shape is correct over both interior and entire") { + std::vector shape(10); + int ndim = info.FillShape(interior, shape.data()); + REQUIRE(ndim == 5); + REQUIRE(shape[0] == 3); + REQUIRE(shape[1] == 4); + for (int i = 2; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 1); + } + info.FillShape(entire, shape.data()); + for (int i = 2; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 2 * NG + 1); + } + } + } + + WHEN("We initialize VarInfo on a scaler edge var") { + auto v = pmbd->GetVarPtr(scalar_edge); + VarInfo info(v, cellbounds); + + THEN("The shape is correct over both interior and entire") { + std::vector shape(10); + int ndim = info.FillShape(interior, shape.data()); + REQUIRE(ndim == 4); + REQUIRE(shape[0] == 3); + for (int i = 1; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 1); + } + info.FillShape(entire, shape.data()); + for (int i = 1; i < ndim; ++i) { + REQUIRE(shape[i] == NSIDE + 2 * NG + 1); + } + } + } } } From 9c86036bb55ad0d2a32385e22046bbf6b7cd3246 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Fri, 15 Mar 2024 14:50:52 +0100 Subject: [PATCH 23/58] Rename restart to restart_hdf5 --- src/outputs/{restart.cpp => restart_hdf5.cpp} | 0 src/outputs/{restart.hpp => restart_hdf5.hpp} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename src/outputs/{restart.cpp => restart_hdf5.cpp} (100%) rename src/outputs/{restart.hpp => restart_hdf5.hpp} (100%) diff --git a/src/outputs/restart.cpp b/src/outputs/restart_hdf5.cpp similarity index 100% rename from src/outputs/restart.cpp rename to src/outputs/restart_hdf5.cpp diff --git a/src/outputs/restart.hpp b/src/outputs/restart_hdf5.hpp similarity index 100% rename from src/outputs/restart.hpp rename to src/outputs/restart_hdf5.hpp From 2be7e5d6fedbea176d9fc7d73520da35d391bfc1 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Mon, 18 Mar 2024 15:47:29 +0100 Subject: [PATCH 24/58] WIP abstract RestartReader --- src/outputs/restart_hdf5.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/outputs/restart_hdf5.hpp b/src/outputs/restart_hdf5.hpp index 69c301d30702..013800fc5d59 100644 --- a/src/outputs/restart_hdf5.hpp +++ b/src/outputs/restart_hdf5.hpp @@ -14,8 +14,8 @@ // license in this material to reproduce, prepare derivative works, distribute copies to // the public, perform publicly and display publicly, and to permit others to do so. //======================================================================================== -#ifndef OUTPUTS_RESTART_HPP_ -#define OUTPUTS_RESTART_HPP_ +#ifndef OUTPUTS_RESTART_HDF5_HPP_ +#define OUTPUTS_RESTART_HDF5_HPP_ //! \file io_wrapper.hpp // \brief defines a set of small wrapper functions for MPI versus Serial Output. @@ -342,4 +342,4 @@ class RestartReader { }; } // namespace parthenon -#endif // OUTPUTS_RESTART_HPP_ +#endif // OUTPUTS_RESTART_HDF5_HPP_ From b882570afe4ec308b7f6cebb763b06d8f07afde9 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Mon, 18 Mar 2024 23:39:15 +0100 Subject: [PATCH 25/58] WIP separating RestartReader --- src/CMakeLists.txt | 4 +- src/mesh/mesh.cpp | 1 + src/outputs/restart.hpp | 115 +++++++++++++++++++++++++++++++++++ src/outputs/restart_hdf5.cpp | 13 ++-- src/outputs/restart_hdf5.hpp | 5 +- src/parthenon_manager.cpp | 11 +++- 6 files changed, 139 insertions(+), 10 deletions(-) create mode 100644 src/outputs/restart.hpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 26f33f32b4ef..99a47e538b3b 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -191,7 +191,9 @@ add_library(parthenon outputs/parthenon_xdmf.cpp outputs/parthenon_hdf5.hpp outputs/parthenon_xdmf.hpp - outputs/restart.cpp + outputs/restart.hpp + outputs/restart_hdf5.cpp + outputs/restart_hdf5.hpp outputs/vtk.cpp parthenon/driver.hpp diff --git a/src/mesh/mesh.cpp b/src/mesh/mesh.cpp index cee6af316fc8..2f327628c0d7 100644 --- a/src/mesh/mesh.cpp +++ b/src/mesh/mesh.cpp @@ -48,6 +48,7 @@ #include "mesh/meshblock.hpp" #include "mesh/meshblock_tree.hpp" #include "outputs/restart.hpp" +#include "outputs/restart_hdf5.hpp" #include "parameter_input.hpp" #include "parthenon_arrays.hpp" #include "prolong_restrict/prolong_restrict.hpp" diff --git a/src/outputs/restart.hpp b/src/outputs/restart.hpp new file mode 100644 index 000000000000..57cb8f10de48 --- /dev/null +++ b/src/outputs/restart.hpp @@ -0,0 +1,115 @@ +//======================================================================================== +// Parthenon performance portable AMR framework +// Copyright(C) 2020-2024 The Parthenon collaboration +// Licensed under the 3-clause BSD License, see LICENSE file for details +//======================================================================================== +// (C) (or copyright) 2020-2021. Triad National Security, LLC. All rights reserved. +// +// This program was produced under U.S. Government contract 89233218CNA000001 for Los +// Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC +// for the U.S. Department of Energy/National Nuclear Security Administration. All rights +// in the program are reserved by Triad National Security, LLC, and the U.S. Department +// of Energy/National Nuclear Security Administration. The Government is granted for +// itself and others acting on its behalf a nonexclusive, paid-up, irrevocable worldwide +// license in this material to reproduce, prepare derivative works, distribute copies to +// the public, perform publicly and display publicly, and to permit others to do so. +//======================================================================================== +#ifndef OUTPUTS_RESTART_HPP_ +#define OUTPUTS_RESTART_HPP_ +//! \file io_wrapper.hpp +// \brief defines a set of small wrapper functions for MPI versus Serial Output. + +#include +#include +#include +#include +#include + +#include "mesh/domain.hpp" +#include "utils/error_checking.hpp" + +namespace parthenon { + +class Mesh; +class Param; + +class RestartReader { + public: + RestartReader(); + virtual ~RestartReader() = default; + + struct SparseInfo { + // labels of sparse fields (full label, i.e. base name and sparse id) + std::vector labels; + + // allocation status of sparse fields (2D array outer dimension: block, inner + // dimension: sparse field) + // can't use std::vector here because std::vector is the same as + // std::vector and it doesn't have .data() member + std::unique_ptr allocated; + + int num_blocks = 0; + int num_sparse = 0; + + bool IsAllocated(int block, int sparse_field_idx) const { + PARTHENON_REQUIRE_THROWS(allocated != nullptr, + "Tried to get allocation status but no data present"); + PARTHENON_REQUIRE_THROWS((block >= 0) && (block < num_blocks), + "Invalid block index in SparseInfo::IsAllocated"); + PARTHENON_REQUIRE_THROWS((sparse_field_idx >= 0) && (sparse_field_idx < num_sparse), + "Invalid sparse field index in SparseInfo::IsAllocated"); + + return allocated[block * num_sparse + sparse_field_idx]; + } + }; + + SparseInfo GetSparseInfo() const; + + // Return output format version number. Return -1 if not existent. + int GetOutputFormatVersion() const; + + public: + // Gets data for all blocks on current rank. + // Assumes blocks are contiguous + // fills internal data for given pointer + template + void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, + const std::vector &bsize, int file_output_format_version, + MetadataFlag where, const std::vector &shape = {}) const; + + // Gets the data from a swarm var on current rank. Assumes all + // blocks are contiguous. Fills dataVec based on shape from swarmvar + // metadata. + template + void ReadSwarmVar(const std::string &swarmname, const std::string &varname, + const std::size_t count, const std::size_t offset, const Metadata &m, + std::vector &dataVec); + + // Reads an array dataset from file as a 1D vector. + template + std::vector ReadDataset(const std::string &name) const; + + template + std::vector GetAttrVec(const std::string &location, const std::string &name) const; + + template + T GetAttr(const std::string &location, const std::string &name) const; + + // Gets the counts and offsets for MPI ranks for the meshblocks set + // by the indexrange. Returns the total count on this rank. + std::size_t GetSwarmCounts(const std::string &swarm, const IndexRange &range, + std::vector &counts, + std::vector &offsets); + + void ReadParams(const std::string &name, Params &p); + + // closes out the restart file + // perhaps belongs in a destructor? + void Close(); + + // Does file have ghost cells? + int hasGhost; +}; + +} // namespace parthenon +#endif // OUTPUTS_RESTART_HDF5_HPP_ diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index d986d233bd1f..67eb99b1df85 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -1,6 +1,6 @@ //======================================================================================== // Parthenon performance portable AMR framework -// Copyright(C) 2020-2022 The Parthenon collaboration +// Copyright(C) 2020-2024 The Parthenon collaboration // Licensed under the 3-clause BSD License, see LICENSE file for details //======================================================================================== // (C) (or copyright) 2020-2021. Triad National Security, LLC. All rights reserved. @@ -31,6 +31,7 @@ #include "outputs/parthenon_hdf5.hpp" #endif #include "outputs/restart.hpp" +#include "outputs/restart_hdf5.hpp" #include "utils/error_checking.hpp" namespace parthenon { @@ -38,7 +39,7 @@ namespace parthenon { //---------------------------------------------------------------------------------------- //! \fn void RestartReader::RestartReader(const std::string filename) // \brief Opens the restart file and stores appropriate file handle in fh_ -RestartReader::RestartReader(const char *filename) : filename_(filename) { +RestartReaderHDF5::RestartReaderHDF5(const char *filename) : filename_(filename) { #ifndef ENABLE_HDF5 std::stringstream msg; msg << "### FATAL ERROR in Restart (Reader) constructor" << std::endl @@ -54,7 +55,7 @@ RestartReader::RestartReader(const char *filename) : filename_(filename) { #endif // ENABLE_HDF5 } -int RestartReader::GetOutputFormatVersion() const { +int RestartReaderHDF5::GetOutputFormatVersion() const { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); #else // HDF5 enabled @@ -69,7 +70,7 @@ int RestartReader::GetOutputFormatVersion() const { #endif // ENABLE_HDF5 } -RestartReader::SparseInfo RestartReader::GetSparseInfo() const { +RestartReaderHDF5::SparseInfo RestartReaderHDF5::GetSparseInfo() const { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); #else // HDF5 enabled @@ -107,7 +108,7 @@ RestartReader::SparseInfo RestartReader::GetSparseInfo() const { // Gets the counts and offsets for MPI ranks for the meshblocks set // by the indexrange. Returns the total count on this rank. -std::size_t RestartReader::GetSwarmCounts(const std::string &swarm, +std::size_t RestartReaderHDF5::GetSwarmCounts(const std::string &swarm, const IndexRange &range, std::vector &counts, std::vector &offsets) { @@ -141,7 +142,7 @@ std::size_t RestartReader::GetSwarmCounts(const std::string &swarm, #endif // ENABLE_HDF5 } -void RestartReader::ReadParams(const std::string &name, Params &p) { +void RestartReaderHDF5::ReadParams(const std::string &name, Params &p) { #ifdef ENABLE_HDF5 p.ReadFromRestart(name, params_group_); #endif // ENABLE_HDF5 diff --git a/src/outputs/restart_hdf5.hpp b/src/outputs/restart_hdf5.hpp index 013800fc5d59..5b0f43fa4091 100644 --- a/src/outputs/restart_hdf5.hpp +++ b/src/outputs/restart_hdf5.hpp @@ -26,6 +26,7 @@ #include #include "config.hpp" +#include "outputs/restart.hpp" #ifdef ENABLE_HDF5 #include @@ -54,9 +55,9 @@ namespace parthenon { class Mesh; class Param; -class RestartReader { +class RestartReaderHDF5 : public RestartReader{ public: - explicit RestartReader(const char *theFile); + RestartReaderHDF5(const char *theFile); struct SparseInfo { // labels of sparse fields (full label, i.e. base name and sparse id) diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 814a8c6f094f..853eb989d69e 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -29,6 +29,9 @@ #include "amr_criteria/refinement_package.hpp" #include "config.hpp" #include "driver/driver.hpp" +#include "outputs/restart.hpp" +#include "outputs/restart_hdf5.hpp" +#include FS_HEADER #include "globals.hpp" #include "interface/update.hpp" #include "mesh/domain.hpp" @@ -38,6 +41,8 @@ #include "utils/error_checking.hpp" #include "utils/utils.hpp" +namespace fs = FS_NAMESPACE; + namespace parthenon { ParthenonStatus ParthenonManager::ParthenonInitEnv(int argc, char *argv[]) { @@ -100,7 +105,11 @@ ParthenonStatus ParthenonManager::ParthenonInitEnv(int argc, char *argv[]) { pinput = std::make_unique(arg.input_filename); } else if (arg.res_flag != 0) { // Read input from restart file - restartReader = std::make_unique(arg.restart_filename); + if (fs::path(arg.restart_filename).extension() == ".rhdf") { + restartReader = std::make_unique(arg.restart_filename); + } else { + PARTHENON_FAIL("HELP!"); + } // Load input stream pinput = std::make_unique(); From 70671c9c51b1166992d1dc8be668ad89a0170731 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Tue, 19 Mar 2024 16:29:12 +0100 Subject: [PATCH 26/58] Make RestartReader abstract --- src/mesh/mesh.cpp | 35 +++++++++---------- src/outputs/restart.hpp | 62 +++++++++++++++++++-------------- src/outputs/restart_hdf5.cpp | 37 ++++++++++++++++++-- src/outputs/restart_hdf5.hpp | 67 ++++++++++++++++-------------------- src/parthenon_manager.cpp | 9 ++--- 5 files changed, 121 insertions(+), 89 deletions(-) diff --git a/src/mesh/mesh.cpp b/src/mesh/mesh.cpp index 2f327628c0d7..edd010ef72df 100644 --- a/src/mesh/mesh.cpp +++ b/src/mesh/mesh.cpp @@ -104,8 +104,8 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, Packages_t &packages, // private members: num_mesh_threads_(pin->GetOrAddInteger("parthenon/mesh", "num_threads", 1)), tree(this), use_uniform_meshgen_fn_{true, true, true, true}, lb_flag_(true), - lb_automatic_(), - lb_manual_(), MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { + lb_automatic_(), lb_manual_(), + MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { std::stringstream msg; RegionSize block_size; BoundaryFlag block_bcs[6]; @@ -486,8 +486,8 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, // private members: num_mesh_threads_(pin->GetOrAddInteger("parthenon/mesh", "num_threads", 1)), tree(this), use_uniform_meshgen_fn_{true, true, true, true}, lb_flag_(true), - lb_automatic_(), - lb_manual_(), MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { + lb_automatic_(), lb_manual_(), + MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { std::stringstream msg; RegionSize block_size; BoundaryFlag block_bcs[6]; @@ -506,13 +506,14 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, // read the restart file // the file is already open and the pointer is set to after + auto mesh_info = rr.GetMeshInfo(); // All ranks read HDF file - nbnew = rr.GetAttr("Info", "NBNew"); - nbdel = rr.GetAttr("Info", "NBDel"); - nbtotal = rr.GetAttr("Info", "NumMeshBlocks"); - root_level = rr.GetAttr("Info", "RootLevel"); + nbnew = mesh_info.nbnew; + nbdel = mesh_info.nbdel; + nbtotal = mesh_info.nbtotal; + root_level = mesh_info.root_level; - const auto bc = rr.GetAttrVec("Info", "BoundaryConditions"); + const auto bc = mesh_info.bound_cond; for (int i = 0; i < 6; i++) { block_bcs[i] = GetBoundaryFlag(bc[i]); } @@ -538,7 +539,7 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, } EnrollBndryFncts_(app_in); - const auto grid_dim = rr.GetAttrVec("Info", "RootGridDomain"); + const auto grid_dim = mesh_info.grid_dim; mesh_size.xmin(X1DIR) = grid_dim[0]; mesh_size.xmax(X1DIR) = grid_dim[1]; mesh_size.xrat(X1DIR) = grid_dim[2]; @@ -554,14 +555,11 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, // initialize loclist = std::vector(nbtotal); - const auto blockSize = rr.GetAttrVec("Info", "MeshBlockSize"); - const auto includesGhost = rr.GetAttr("Info", "IncludesGhost"); - const auto nGhost = rr.GetAttr("Info", "NGhost"); - for (auto &dir : {X1DIR, X2DIR, X3DIR}) { block_size.xrat(dir) = mesh_size.xrat(dir); - block_size.nx(dir) = - blockSize[dir - 1] - (blockSize[dir - 1] > 1) * includesGhost * 2 * nGhost; + block_size.nx(dir) = mesh_info.block_size[dir - 1] - + (mesh_info.block_size[dir - 1] > 1) * mesh_info.includes_ghost * + 2 * mesh_info.n_ghost; if (block_size.nx(dir) == 1) { block_size.symmetry(dir) = true; mesh_size.symmetry(dir) = true; @@ -595,9 +593,8 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, InitUserMeshData(this, pin); // Populate logical locations - auto lx123 = rr.ReadDataset("/Blocks/loc.lx123"); - auto locLevelGidLidCnghostGflag = - rr.ReadDataset("/Blocks/loc.level-gid-lid-cnghost-gflag"); + auto lx123 = mesh_info.lx123; + auto locLevelGidLidCnghostGflag = mesh_info.level_gid_lid_cnghost_gflag; current_level = -1; for (int i = 0; i < nbtotal; i++) { loclist[i] = LogicalLocation(locLevelGidLidCnghostGflag[5 * i], lx123[3 * i], diff --git a/src/outputs/restart.hpp b/src/outputs/restart.hpp index 57cb8f10de48..2ee20afc01dc 100644 --- a/src/outputs/restart.hpp +++ b/src/outputs/restart.hpp @@ -35,7 +35,7 @@ class Param; class RestartReader { public: - RestartReader(); + RestartReader() = default; virtual ~RestartReader() = default; struct SparseInfo { @@ -63,45 +63,55 @@ class RestartReader { } }; - SparseInfo GetSparseInfo() const; + [[nodiscard]] virtual SparseInfo GetSparseInfo() const = 0; + + struct MeshInfo { + int nbnew, nbdel, nbtotal, root_level, includes_ghost, n_ghost; + std::vector bound_cond; + std::vector block_size; + std::vector grid_dim; + std::vector lx123; + std::vector level_gid_lid_cnghost_gflag; // what's this?! + }; + [[nodiscard]] virtual MeshInfo GetMeshInfo() const = 0; + + struct TimeInfo { + Real time, dt; + int ncycle; + }; + [[nodiscard]] virtual TimeInfo GetTimeInfo() const = 0; + + [[nodiscard]] virtual std::string GetInputString() const = 0; // Return output format version number. Return -1 if not existent. - int GetOutputFormatVersion() const; + [[nodiscard]] virtual int GetOutputFormatVersion() const = 0; - public: // Gets data for all blocks on current rank. // Assumes blocks are contiguous // fills internal data for given pointer - template - void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, - const std::vector &bsize, int file_output_format_version, - MetadataFlag where, const std::vector &shape = {}) const; + virtual void ReadBlocks(const std::string &name, IndexRange range, + std::vector &dataVec, const std::vector &bsize, + int file_output_format_version, MetadataFlag where, + const std::vector &shape = {}) const = 0; // Gets the data from a swarm var on current rank. Assumes all // blocks are contiguous. Fills dataVec based on shape from swarmvar // metadata. - template - void ReadSwarmVar(const std::string &swarmname, const std::string &varname, - const std::size_t count, const std::size_t offset, const Metadata &m, - std::vector &dataVec); - - // Reads an array dataset from file as a 1D vector. - template - std::vector ReadDataset(const std::string &name) const; - - template - std::vector GetAttrVec(const std::string &location, const std::string &name) const; - - template - T GetAttr(const std::string &location, const std::string &name) const; + virtual void ReadSwarmVar(const std::string &swarmname, const std::string &varname, + const std::size_t count, const std::size_t offset, + const Metadata &m, std::vector &dataVec) = 0; + virtual void ReadSwarmVar(const std::string &swarmname, const std::string &varname, + const std::size_t count, const std::size_t offset, + const Metadata &m, std::vector &dataVec) = 0; // Gets the counts and offsets for MPI ranks for the meshblocks set // by the indexrange. Returns the total count on this rank. - std::size_t GetSwarmCounts(const std::string &swarm, const IndexRange &range, - std::vector &counts, - std::vector &offsets); + [[nodiscard]] virtual std::size_t GetSwarmCounts(const std::string &swarm, + const IndexRange &range, + std::vector &counts, + std::vector &offsets) = 0; - void ReadParams(const std::string &name, Params &p); + virtual void ReadParams(const std::string &name, Params &p) = 0; // closes out the restart file // perhaps belongs in a destructor? diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index 67eb99b1df85..ceba14d72309 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -106,12 +106,43 @@ RestartReaderHDF5::SparseInfo RestartReaderHDF5::GetSparseInfo() const { #endif // ENABLE_HDF5 } +RestartReaderHDF5::MeshInfo RestartReaderHDF5::GetMeshInfo() const { + RestartReaderHDF5::MeshInfo mesh_info; + mesh_info.nbnew = GetAttr("Info", "NBNew"); + mesh_info.nbdel = GetAttr("Info", "NBDel"); + mesh_info.nbtotal = GetAttr("Info", "NumMeshBlocks"); + mesh_info.root_level = GetAttr("Info", "RootLevel"); + + mesh_info.bound_cond = GetAttrVec("Info", "BoundaryConditions"); + + mesh_info.block_size = GetAttrVec("Info", "MeshBlockSize"); + mesh_info.includes_ghost = GetAttr("Info", "IncludesGhost"); + mesh_info.n_ghost = GetAttr("Info", "NGhost"); + + mesh_info.grid_dim = GetAttrVec("Info", "RootGridDomain"); + + mesh_info.lx123 = ReadDataset("/Blocks/loc.lx123"); + mesh_info.level_gid_lid_cnghost_gflag = + ReadDataset("/Blocks/loc.level-gid-lid-cnghost-gflag"); + + return mesh_info; +} + +RestartReaderHDF5::TimeInfo RestartReaderHDF5::GetTimeInfo() const { + RestartReaderHDF5::TimeInfo time_info; + + time_info.time = GetAttr("Info", "Time"); + time_info.dt = GetAttr("Info", "dt"); + time_info.ncycle = GetAttr("Info", "NCycle"); + + return time_info; +} // Gets the counts and offsets for MPI ranks for the meshblocks set // by the indexrange. Returns the total count on this rank. std::size_t RestartReaderHDF5::GetSwarmCounts(const std::string &swarm, - const IndexRange &range, - std::vector &counts, - std::vector &offsets) { + const IndexRange &range, + std::vector &counts, + std::vector &offsets) { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); return 0; diff --git a/src/outputs/restart_hdf5.hpp b/src/outputs/restart_hdf5.hpp index 5b0f43fa4091..65eafdec1c57 100644 --- a/src/outputs/restart_hdf5.hpp +++ b/src/outputs/restart_hdf5.hpp @@ -55,39 +55,22 @@ namespace parthenon { class Mesh; class Param; -class RestartReaderHDF5 : public RestartReader{ +class RestartReaderHDF5 : public RestartReader { public: - RestartReaderHDF5(const char *theFile); - - struct SparseInfo { - // labels of sparse fields (full label, i.e. base name and sparse id) - std::vector labels; - - // allocation status of sparse fields (2D array outer dimension: block, inner - // dimension: sparse field) - // can't use std::vector here because std::vector is the same as - // std::vector and it doesn't have .data() member - std::unique_ptr allocated; - - int num_blocks = 0; - int num_sparse = 0; - - bool IsAllocated(int block, int sparse_field_idx) const { - PARTHENON_REQUIRE_THROWS(allocated != nullptr, - "Tried to get allocation status but no data present"); - PARTHENON_REQUIRE_THROWS((block >= 0) && (block < num_blocks), - "Invalid block index in SparseInfo::IsAllocated"); - PARTHENON_REQUIRE_THROWS((sparse_field_idx >= 0) && (sparse_field_idx < num_sparse), - "Invalid sparse field index in SparseInfo::IsAllocated"); - - return allocated[block * num_sparse + sparse_field_idx]; - } - }; + RestartReaderHDF5(const char *filename); + + [[nodiscard]] SparseInfo GetSparseInfo() const override; + + [[nodiscard]] MeshInfo GetMeshInfo() const override; - SparseInfo GetSparseInfo() const; + [[nodiscard]] TimeInfo GetTimeInfo() const override; + + [[nodiscard]] std::string GetInputString() const override { + return GetAttr("Input", "File"); + }; // Return output format version number. Return -1 if not existent. - int GetOutputFormatVersion() const; + [[nodiscard]] int GetOutputFormatVersion() const override; private: struct DatasetHandle { @@ -144,14 +127,13 @@ class RestartReaderHDF5 : public RestartReader{ // Gets data for all blocks on current rank. // Assumes blocks are contiguous // fills internal data for given pointer - template - void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, + void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, const std::vector &bsize, int file_output_format_version, - MetadataFlag where, const std::vector &shape = {}) const { + MetadataFlag where, const std::vector &shape = {}) const override { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); #else // HDF5 enabled - auto hdl = OpenDataset(name); + auto hdl = OpenDataset(name); constexpr int CHUNK_MAX_DIM = 7; @@ -315,14 +297,25 @@ class RestartReaderHDF5 : public RestartReader{ return res[0]; } + void ReadSwarmVar(const std::string &swarmname, const std::string &varname, + const std::size_t count, const std::size_t offset, const Metadata &m, + std::vector &dataVec) override { + ReadSwarmVar<>(swarmname, varname, count, offset, m, dataVec); + }; + void ReadSwarmVar(const std::string &swarmname, const std::string &varname, + const std::size_t count, const std::size_t offset, const Metadata &m, + std::vector &dataVec) override { + ReadSwarmVar<>(swarmname, varname, count, offset, m, dataVec); + }; // Gets the counts and offsets for MPI ranks for the meshblocks set // by the indexrange. Returns the total count on this rank. - std::size_t GetSwarmCounts(const std::string &swarm, const IndexRange &range, - std::vector &counts, - std::vector &offsets); + [[nodiscard]] std::size_t GetSwarmCounts(const std::string &swarm, + const IndexRange &range, + std::vector &counts, + std::vector &offsets) override; - void ReadParams(const std::string &name, Params &p); + void ReadParams(const std::string &name, Params &p) override; // closes out the restart file // perhaps belongs in a destructor? diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 853eb989d69e..dadfde12dcab 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -113,7 +113,7 @@ ParthenonStatus ParthenonManager::ParthenonInitEnv(int argc, char *argv[]) { // Load input stream pinput = std::make_unique(); - auto inputString = restartReader->GetAttr("Input", "File"); + auto inputString = restartReader->GetInputString(); std::istringstream is(inputString); pinput->LoadFromStream(is); } @@ -176,13 +176,14 @@ void ParthenonManager::ParthenonInitPackagesAndMesh() { std::make_unique(pinput.get(), app_input.get(), *restartReader, packages); // Read simulation time and cycle from restart file and set in input - Real tNow = restartReader->GetAttr("Info", "Time"); + const auto time_info = restartReader->GetTimeInfo(); + Real tNow = time_info.time; pinput->SetReal("parthenon/time", "start_time", tNow); - Real dt = restartReader->GetAttr("Info", "dt"); + Real dt = time_info.dt; pinput->SetReal("parthenon/time", "dt", dt); - int ncycle = restartReader->GetAttr("Info", "NCycle"); + int ncycle = time_info.ncycle; pinput->SetInteger("parthenon/time", "ncycle", ncycle); // Read package data from restart file From 7f781489afc4602037b3e650308d54c80345d856 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Tue, 19 Mar 2024 16:40:13 +0100 Subject: [PATCH 27/58] Add Changelog --- CHANGELOG.md | 1 + src/mesh/mesh.cpp | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb256e6a43bc..c0b39472cae1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ - [[PR978]](https://github.com/parthenon-hpc-lab/parthenon/pull/978) remove erroneous sparse check ### Infrastructure (changes irrelevant to downstream codes) +- [[PR 1027]](https://github.com/parthenon-hpc-lab/parthenon/pull/1027) Refactor RestartReader as abstract class - [[PR 1017]](https://github.com/parthenon-hpc-lab/parthenon/pull/1017) Make regression tests more verbose on failure - [[PR 1007]](https://github.com/parthenon-hpc-lab/parthenon/pull/1007) Split template instantiations for HDF5 Read/Write attributes to speed up compile times - [[PR 990]](https://github.com/parthenon-hpc-lab/parthenon/pull/990) Partial refactor of HDF5 I/O code for readability/extendability diff --git a/src/mesh/mesh.cpp b/src/mesh/mesh.cpp index edd010ef72df..1b9a4e68a9ce 100644 --- a/src/mesh/mesh.cpp +++ b/src/mesh/mesh.cpp @@ -104,8 +104,8 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, Packages_t &packages, // private members: num_mesh_threads_(pin->GetOrAddInteger("parthenon/mesh", "num_threads", 1)), tree(this), use_uniform_meshgen_fn_{true, true, true, true}, lb_flag_(true), - lb_automatic_(), lb_manual_(), - MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { + lb_automatic_(), + lb_manual_(), MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { std::stringstream msg; RegionSize block_size; BoundaryFlag block_bcs[6]; @@ -486,8 +486,8 @@ Mesh::Mesh(ParameterInput *pin, ApplicationInput *app_in, RestartReader &rr, // private members: num_mesh_threads_(pin->GetOrAddInteger("parthenon/mesh", "num_threads", 1)), tree(this), use_uniform_meshgen_fn_{true, true, true, true}, lb_flag_(true), - lb_automatic_(), lb_manual_(), - MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { + lb_automatic_(), + lb_manual_(), MeshBndryFnctn{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr} { std::stringstream msg; RegionSize block_size; BoundaryFlag block_bcs[6]; From b63288653b96178f829bb47b2df9c54b107bc63b Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Tue, 19 Mar 2024 10:00:46 -0600 Subject: [PATCH 28/58] VarInfo now produces padded shapes for topological elements --- src/outputs/output_utils.hpp | 50 ++++++++++++++++++++++++++++------ src/outputs/parthenon_hdf5.cpp | 8 +----- tst/unit/test_output_utils.cpp | 21 ++++++++++++++ 3 files changed, 64 insertions(+), 15 deletions(-) diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 98b1e2cd6a94..be5239d3a252 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -61,12 +61,51 @@ struct VarInfo { int ntop_elems; bool element_matters; + auto GetNKJI(const IndexDomain domain) const { + int nx3 = 1, nx2 = 1, nx1 = 1; + for (auto el : topological_elements) { + nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); + nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); + nx1 = std::max(nx1, cellbounds.ncellsi(domain, el)); + } + return std::make_tuple(nx3, nx2, nx1); + } + auto GetPaddedBoundsKJI(const IndexDomain domain) const { + int ks = 1, ke = 1, js = 1, je = 1, is = 1, ie = 1; + for (auto el : topological_elements) { + auto kb = cellbounds.GetBoundsK(domain); + auto jb = cellbounds.GetBoundsJ(domain); + auto ib = cellbounds.GetBoundsI(domain); + ks = kb.s; // pads are only upper indices + js = jb.s; + is = ib.s; + ke = std::min(ke, kb.e); + je = std::min(je, jb.e); + ie = std::min(ie, ib.e); + } + IndexRange kb{ks, ke}, jb{is, ie}, ib{is, ie}; + return std::make_tuple(kb, jb, ib); + } + int Size() const { return std::accumulate(nx_.begin(), nx_.end(), 1, std::multiplies()); } + // Includes topological element shape int TensorSize() const { - // std::accumulate(nx.begin(), nx.end() - 3, 1, std::multiplies()); - return nx_[5] * nx_[4] * nx_[3]; + if (where == MetadataFlag({Metadata::None})) { + return Size(); + } else { + return std::accumulate(rnx_.begin(), rnx_.end() - 3, 1, std::multiplies()); + } + // return nx_[5] * nx_[4] * nx_[3]; + } + int FillSize(const IndexDomain domain) const { + if (where == MetadataFlag({Metadata::None})) { + return Size(); + } else { + auto [n3, n2, n1] = GetNKJI(domain); + return TensorSize() * n3 * n2 * n1; + } } template @@ -82,12 +121,7 @@ struct VarInfo { // For nx1,nx2,nx3 find max storage required in each direction // accross topological elements. Unused indices will be written but // empty. - int nx3 = 1, nx2 = 1, nx1 = 1; - for (auto el : topological_elements) { - nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); - nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); - nx1 = std::max(nx1, cellbounds.ncellsi(domain, el)); - } + auto [nx3, nx2, nx1] = GetNKJI(domain); // 3 cell indices, tensor rank, topological element index if needed ndim = 3 + tensor_rank + element_matters; // fill topological element, if relevant diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index dd6922e0de3b..48a402e47999 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -376,13 +376,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (!is_allocated) { if (vinfo.is_sparse) { - hsize_t varSize{}; - if (vinfo.where == MetadataFlag(Metadata::Cell)) { - varSize = vinfo.TensorSize() * (out_kb.e - out_kb.s + 1) * - (out_jb.e - out_jb.s + 1) * (out_ib.e - out_ib.s + 1); - } else { - varSize = vinfo.Size(); - } + hsize_t varSize = vinfo.FillSize(theDomain); auto fill_val = output_params.sparse_seed_nans ? std::numeric_limits::quiet_NaN() : 0; std::fill(tmpData.data() + index, tmpData.data() + index + varSize, fill_val); diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 956b4e2c18eb..d762399e9a83 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -55,6 +55,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti constexpr int NG = 2; constexpr int NSIDE = 16; constexpr int NDIM = 3; + constexpr int NFULL = (NSIDE + 2 * NG); constexpr auto interior = parthenon::IndexDomain::interior; constexpr auto entire = parthenon::IndexDomain::entire; @@ -116,6 +117,10 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[i] == NSIDE + 2 * NG); } } + THEN("The size and tensorsize are correct") { + REQUIRE(info.Size() == NFULL * NFULL * NFULL); + REQUIRE(info.TensorSize() == 1); + } } WHEN("We initialize VarInfo on a tensor cell var") { @@ -140,6 +145,10 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[i] == NSIDE + 2 * NG); } } + THEN("The size and tensorsize are correct") { + REQUIRE(info.Size() == 3 * 4 * NFULL * NFULL * NFULL); + REQUIRE(info.TensorSize() == 3 * 4); + } } WHEN("We initialize VarInfo on a tensor no-centering var") { @@ -158,6 +167,10 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[0] == 4); REQUIRE(shape[1] == 3); } + THEN("The size and tensorsize are correct") { + REQUIRE(info.Size() == 3 * 4); + REQUIRE(info.TensorSize() == 3 * 4); + } } WHEN("We initialize VarInfo on a vector face var") { @@ -178,6 +191,10 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[i] == NSIDE + 2 * NG + 1); } } + THEN("The size and tensorsize are correct") { + REQUIRE(info.Size() == 3 * 4 * (NFULL + 1) * (NFULL + 1) * (NFULL + 1)); + REQUIRE(info.TensorSize() == 3 * 4); + } } WHEN("We initialize VarInfo on a scaler edge var") { @@ -197,6 +214,10 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(shape[i] == NSIDE + 2 * NG + 1); } } + THEN("The size and tensorsize are correct") { + REQUIRE(info.Size() == 3 * (NFULL + 1) * (NFULL + 1) * (NFULL + 1)); + REQUIRE(info.TensorSize() == 3); + } } } } From 1fe6c29a5a0bfeab9c76e2204b3840ad9aedf1e9 Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Wed, 20 Mar 2024 09:44:12 +0100 Subject: [PATCH 29/58] Address comments --- src/outputs/restart.hpp | 3 +- src/outputs/restart_hdf5.cpp | 86 ++++++++++++++++++++++++++++++++++- src/outputs/restart_hdf5.hpp | 87 +----------------------------------- src/parthenon_manager.cpp | 9 ++-- 4 files changed, 92 insertions(+), 93 deletions(-) diff --git a/src/outputs/restart.hpp b/src/outputs/restart.hpp index 2ee20afc01dc..b16812be0f95 100644 --- a/src/outputs/restart.hpp +++ b/src/outputs/restart.hpp @@ -25,6 +25,7 @@ #include #include +#include "interface/metadata.hpp" #include "mesh/domain.hpp" #include "utils/error_checking.hpp" @@ -122,4 +123,4 @@ class RestartReader { }; } // namespace parthenon -#endif // OUTPUTS_RESTART_HDF5_HPP_ +#endif // OUTPUTS_RESTART_HPP_ diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index ceba14d72309..779d0ebab296 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -133,7 +133,7 @@ RestartReaderHDF5::TimeInfo RestartReaderHDF5::GetTimeInfo() const { time_info.time = GetAttr("Info", "Time"); time_info.dt = GetAttr("Info", "dt"); - time_info.ncycle = GetAttr("Info", "NCycle"); + time_info.ncycle = GetAttr("Info", "NCycle"); return time_info; } @@ -178,5 +178,89 @@ void RestartReaderHDF5::ReadParams(const std::string &name, Params &p) { p.ReadFromRestart(name, params_group_); #endif // ENABLE_HDF5 } +void RestartReaderHDF5::ReadBlocks(const std::string &name, IndexRange range, + std::vector &dataVec, + const std::vector &bsize, + int file_output_format_version, MetadataFlag where, + const std::vector &shape) const { +#ifndef ENABLE_HDF5 + PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); +#else // HDF5 enabled + auto hdl = OpenDataset(name); + + constexpr int CHUNK_MAX_DIM = 7; + + /** Select hyperslab in dataset **/ + hsize_t offset[CHUNK_MAX_DIM] = {static_cast(range.s), 0, 0, 0, 0, 0, 0}; + hsize_t count[CHUNK_MAX_DIM]; + int total_dim = 0; + if (file_output_format_version == -1) { + size_t vlen = 1; + for (int i = 0; i < shape.size(); i++) { + vlen *= shape[i]; + } + count[0] = static_cast(range.e - range.s + 1); + count[1] = bsize[2]; + count[2] = bsize[1]; + count[3] = bsize[0]; + count[4] = vlen; + total_dim = 5; + } else if (file_output_format_version == 2) { + PARTHENON_REQUIRE(shape.size() <= 1, + "Higher than vector datatypes are unstable in output versions < 3"); + size_t vlen = 1; + for (int i = 0; i < shape.size(); i++) { + vlen *= shape[i]; + } + count[0] = static_cast(range.e - range.s + 1); + count[1] = vlen; + count[2] = bsize[2]; + count[3] = bsize[1]; + count[4] = bsize[0]; + total_dim = 5; + } else if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { + count[0] = static_cast(range.e - range.s + 1); + const int ndim = shape.size(); + if (where == MetadataFlag(Metadata::Cell)) { + for (int i = 0; i < ndim; i++) { + count[1 + i] = shape[ndim - i - 1]; + } + count[ndim + 1] = bsize[2]; + count[ndim + 2] = bsize[1]; + count[ndim + 3] = bsize[0]; + total_dim = 3 + ndim + 1; + } else if (where == MetadataFlag(Metadata::None)) { + for (int i = 0; i < ndim; i++) { + count[1 + i] = shape[ndim - i - 1]; + } + total_dim = ndim + 1; + } else { + PARTHENON_THROW("Only Cell and None locations supported!"); + } + } else { + PARTHENON_THROW("Unknown output format version in restart file.") + } + + hsize_t total_count = 1; + for (int i = 0; i < total_dim; ++i) { + total_count *= count[i]; + } + + PARTHENON_REQUIRE_THROWS(dataVec.size() >= total_count, + "Buffer (size " + std::to_string(dataVec.size()) + + ") is too small for dataset " + name + " (size " + + std::to_string(total_count) + ")"); + PARTHENON_HDF5_CHECK( + H5Sselect_hyperslab(hdl.dataspace, H5S_SELECT_SET, offset, NULL, count, NULL)); + + const H5S memspace = H5S::FromHIDCheck(H5Screate_simple(total_dim, count, NULL)); + PARTHENON_HDF5_CHECK( + H5Sselect_hyperslab(hdl.dataspace, H5S_SELECT_SET, offset, NULL, count, NULL)); + + // Read data from file + PARTHENON_HDF5_CHECK(H5Dread(hdl.dataset, hdl.type, memspace, hdl.dataspace, + H5P_DEFAULT, dataVec.data())); +#endif // ENABLE_HDF5 +} } // namespace parthenon diff --git a/src/outputs/restart_hdf5.hpp b/src/outputs/restart_hdf5.hpp index 65eafdec1c57..2c31ff6bd967 100644 --- a/src/outputs/restart_hdf5.hpp +++ b/src/outputs/restart_hdf5.hpp @@ -19,10 +19,7 @@ //! \file io_wrapper.hpp // \brief defines a set of small wrapper functions for MPI versus Serial Output. -#include -#include #include -#include #include #include "config.hpp" @@ -57,7 +54,7 @@ class Param; class RestartReaderHDF5 : public RestartReader { public: - RestartReaderHDF5(const char *filename); + explicit RestartReaderHDF5(const char *filename); [[nodiscard]] SparseInfo GetSparseInfo() const override; @@ -129,87 +126,7 @@ class RestartReaderHDF5 : public RestartReader { // fills internal data for given pointer void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, const std::vector &bsize, int file_output_format_version, - MetadataFlag where, const std::vector &shape = {}) const override { -#ifndef ENABLE_HDF5 - PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); -#else // HDF5 enabled - auto hdl = OpenDataset(name); - - constexpr int CHUNK_MAX_DIM = 7; - - /** Select hyperslab in dataset **/ - hsize_t offset[CHUNK_MAX_DIM] = {static_cast(range.s), 0, 0, 0, 0, 0, 0}; - hsize_t count[CHUNK_MAX_DIM]; - int total_dim = 0; - if (file_output_format_version == -1) { - size_t vlen = 1; - for (int i = 0; i < shape.size(); i++) { - vlen *= shape[i]; - } - count[0] = static_cast(range.e - range.s + 1); - count[1] = bsize[2]; - count[2] = bsize[1]; - count[3] = bsize[0]; - count[4] = vlen; - total_dim = 5; - } else if (file_output_format_version == 2) { - PARTHENON_REQUIRE( - shape.size() <= 1, - "Higher than vector datatypes are unstable in output versions < 3"); - size_t vlen = 1; - for (int i = 0; i < shape.size(); i++) { - vlen *= shape[i]; - } - count[0] = static_cast(range.e - range.s + 1); - count[1] = vlen; - count[2] = bsize[2]; - count[3] = bsize[1]; - count[4] = bsize[0]; - total_dim = 5; - } else if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { - count[0] = static_cast(range.e - range.s + 1); - const int ndim = shape.size(); - if (where == MetadataFlag(Metadata::Cell)) { - for (int i = 0; i < ndim; i++) { - count[1 + i] = shape[ndim - i - 1]; - } - count[ndim + 1] = bsize[2]; - count[ndim + 2] = bsize[1]; - count[ndim + 3] = bsize[0]; - total_dim = 3 + ndim + 1; - } else if (where == MetadataFlag(Metadata::None)) { - for (int i = 0; i < ndim; i++) { - count[1 + i] = shape[ndim - i - 1]; - } - total_dim = ndim + 1; - } else { - PARTHENON_THROW("Only Cell and None locations supported!"); - } - } else { - PARTHENON_THROW("Unknown output format version in restart file.") - } - - hsize_t total_count = 1; - for (int i = 0; i < total_dim; ++i) { - total_count *= count[i]; - } - - PARTHENON_REQUIRE_THROWS(dataVec.size() >= total_count, - "Buffer (size " + std::to_string(dataVec.size()) + - ") is too small for dataset " + name + " (size " + - std::to_string(total_count) + ")"); - PARTHENON_HDF5_CHECK( - H5Sselect_hyperslab(hdl.dataspace, H5S_SELECT_SET, offset, NULL, count, NULL)); - - const H5S memspace = H5S::FromHIDCheck(H5Screate_simple(total_dim, count, NULL)); - PARTHENON_HDF5_CHECK( - H5Sselect_hyperslab(hdl.dataspace, H5S_SELECT_SET, offset, NULL, count, NULL)); - - // Read data from file - PARTHENON_HDF5_CHECK(H5Dread(hdl.dataset, hdl.type, memspace, hdl.dataspace, - H5P_DEFAULT, dataVec.data())); -#endif // ENABLE_HDF5 - } + MetadataFlag where, const std::vector &shape = {}) const override; // Gets the data from a swarm var on current rank. Assumes all // blocks are contiguous. Fills dataVec based on shape from swarmvar diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index dadfde12dcab..28a084bcd5e0 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -28,16 +28,13 @@ #include "amr_criteria/refinement_package.hpp" #include "config.hpp" -#include "driver/driver.hpp" -#include "outputs/restart.hpp" -#include "outputs/restart_hdf5.hpp" #include FS_HEADER #include "globals.hpp" -#include "interface/update.hpp" #include "mesh/domain.hpp" #include "mesh/meshblock.hpp" #include "outputs/output_utils.hpp" -#include "outputs/parthenon_hdf5.hpp" +#include "outputs/restart.hpp" +#include "outputs/restart_hdf5.hpp" #include "utils/error_checking.hpp" #include "utils/utils.hpp" @@ -108,7 +105,7 @@ ParthenonStatus ParthenonManager::ParthenonInitEnv(int argc, char *argv[]) { if (fs::path(arg.restart_filename).extension() == ".rhdf") { restartReader = std::make_unique(arg.restart_filename); } else { - PARTHENON_FAIL("HELP!"); + PARTHENON_FAIL("Unsupported restart file format."); } // Load input stream From 2fa83e60266aa00d74621afee49d50aaa94e4f8f Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 12:23:54 -0600 Subject: [PATCH 30/58] added way to get vec of var info and moved some code out of header into implementation file --- src/basic_types.hpp | 3 ++ src/outputs/output_utils.cpp | 61 ++++++++++++++++++++++++++++++++++ src/outputs/output_utils.hpp | 55 ++++++------------------------ src/outputs/parthenon_hdf5.cpp | 15 ++------- src/outputs/restart_hdf5.cpp | 3 +- tst/unit/test_output_utils.cpp | 15 +++++++++ 6 files changed, 94 insertions(+), 58 deletions(-) diff --git a/src/basic_types.hpp b/src/basic_types.hpp index b36383aa2827..886788727d48 100644 --- a/src/basic_types.hpp +++ b/src/basic_types.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -208,6 +209,8 @@ struct SimTime { template using Dictionary = std::unordered_map; +template +using Triple_t = std::tuple; } // namespace parthenon #endif // BASIC_TYPES_HPP_ diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index 64c099d85617..18744d08fccd 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -33,6 +33,67 @@ namespace parthenon { namespace OutputUtils { +Triple_t VarInfo::GetNKJI(const IndexDomain domain) const { + int nx3 = 1, nx2 = 1, nx1 = 1; + for (auto el : topological_elements) { + nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); + nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); + nx1 = std::max(nx1, cellbounds.ncellsi(domain, el)); + } + return std::make_tuple(nx3, nx2, nx1); +} + +Triple_t VarInfo::GetPaddedBoundsKJI(const IndexDomain domain) const { + int ks = 1, ke = 1, js = 1, je = 1, is = 1, ie = 1; + for (auto el : topological_elements) { + auto kb = cellbounds.GetBoundsK(domain); + auto jb = cellbounds.GetBoundsJ(domain); + auto ib = cellbounds.GetBoundsI(domain); + ks = kb.s; // pads are only upper indices + js = jb.s; + is = ib.s; + ke = std::min(ke, kb.e); + je = std::min(je, jb.e); + ie = std::min(ie, ib.e); + } + IndexRange kb{ks, ke}, jb{is, ie}, ib{is, ie}; + return std::make_tuple(kb, jb, ib); +} + +int VarInfo::Size() const { + return std::accumulate(nx_.begin(), nx_.end(), 1, std::multiplies()); +} + +// Includes topological element shape +int VarInfo::TensorSize() const { + if (where == MetadataFlag({Metadata::None})) { + return Size(); + } else { + return std::accumulate(rnx_.begin(), rnx_.end() - 3, 1, std::multiplies()); + } +} + +int VarInfo::FillSize(const IndexDomain domain) const { + if (where == MetadataFlag({Metadata::None})) { + return Size(); + } else { + auto [n3, n2, n1] = GetNKJI(domain); + return TensorSize() * n3 * n2 * n1; + } +} + +std::vector VarInfo::GetAll(const VariableVector &vars, + const IndexShape &cellbounds) { + std::vector out; + for (const auto &v : vars) { + out.emplace_back(v, cellbounds); + } + std::sort(out.begin(), out.end(), + [](const VarInfo &a, const VarInfo &b) { return a.label < b.label; }); + + return out; +} + void SwarmInfo::AddOffsets(const SP_Swarm &swarm) { std::size_t count = swarm->GetNumActive(); std::size_t offset = (offsets.size() > 0) ? offsets.back() : 0; diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index be5239d3a252..2a8b3dff91d1 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -61,52 +61,14 @@ struct VarInfo { int ntop_elems; bool element_matters; - auto GetNKJI(const IndexDomain domain) const { - int nx3 = 1, nx2 = 1, nx1 = 1; - for (auto el : topological_elements) { - nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); - nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); - nx1 = std::max(nx1, cellbounds.ncellsi(domain, el)); - } - return std::make_tuple(nx3, nx2, nx1); - } - auto GetPaddedBoundsKJI(const IndexDomain domain) const { - int ks = 1, ke = 1, js = 1, je = 1, is = 1, ie = 1; - for (auto el : topological_elements) { - auto kb = cellbounds.GetBoundsK(domain); - auto jb = cellbounds.GetBoundsJ(domain); - auto ib = cellbounds.GetBoundsI(domain); - ks = kb.s; // pads are only upper indices - js = jb.s; - is = ib.s; - ke = std::min(ke, kb.e); - je = std::min(je, jb.e); - ie = std::min(ie, ib.e); - } - IndexRange kb{ks, ke}, jb{is, ie}, ib{is, ie}; - return std::make_tuple(kb, jb, ib); - } + Triple_t GetNKJI(const IndexDomain domain) const; + Triple_t GetPaddedBoundsKJI(const IndexDomain domain) const; - int Size() const { - return std::accumulate(nx_.begin(), nx_.end(), 1, std::multiplies()); - } + int Size() const; // Includes topological element shape - int TensorSize() const { - if (where == MetadataFlag({Metadata::None})) { - return Size(); - } else { - return std::accumulate(rnx_.begin(), rnx_.end() - 3, 1, std::multiplies()); - } - // return nx_[5] * nx_[4] * nx_[3]; - } - int FillSize(const IndexDomain domain) const { - if (where == MetadataFlag({Metadata::None})) { - return Size(); - } else { - auto [n3, n2, n1] = GetNKJI(domain); - return TensorSize() * n3 * n2 * n1; - } - } + int TensorSize() const; + // Size of region that needs to be filled with 0s if not allocated + int FillSize(const IndexDomain domain) const; template int FillShape(const IndexDomain domain, T *data) { @@ -209,6 +171,11 @@ struct VarInfo { var->GetDim(), var->metadata(), var->GetTopologicalElements(), var->IsSparse(), var->IsSet(Metadata::Vector), cellbounds) {} + static std::vector GetAll(const VariableVector &vars, + const IndexShape &cellbounds); + + bool operator==(const std::string &other) const { return other == label; } + private: // TODO(JMM): Probably nx_ and rnx_ both not necessary... but it was // easiest for me to reason about it this way. diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 48a402e47999..31dd1b006c9b 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -250,18 +250,9 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm return GetAnyVariables(var_vec, output_params.variables); } }; - - // get list of all vars, just use the first block since the list is the same for all - // blocks - std::vector all_vars_info; - const auto vars = get_vars(pm->block_list.front()); - for (auto &v : vars) { - all_vars_info.emplace_back(v, cellbounds); - } - - // sort alphabetically - std::sort(all_vars_info.begin(), all_vars_info.end(), - [](const VarInfo &a, const VarInfo &b) { return a.label < b.label; }); + // get list of all vars, just use the first block since the list is + // the same for all blocks + auto all_vars_info = VarInfo::GetAll(get_vars(pm->block_list.front()), cellbounds); // We need to add information about the sparse variables to the HDF5 file, namely: // 1) Which variables are sparse diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index b68733c90cf1..122b361232de 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -181,8 +181,7 @@ void RestartReaderHDF5::ReadParams(const std::string &name, Params &p) { void RestartReaderHDF5::ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, const std::vector &bsize, - int file_output_format_version, - MetadataFlag where, + int file_output_format_version, MetadataFlag where, const std::vector &shape) const { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index d762399e9a83..33f3e1903145 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -85,6 +85,9 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti m = Metadata({Metadata::Edge, Metadata::Independent}); pkg->AddField(scalar_edge, m); + const std::vector var_names = {scalar_cell, tensor_cell, tensor_none, + vector_face, scalar_edge}; + auto pmb = std::make_shared(NSIDE, NDIM); auto pmbd = pmb->meshblock_data.Get(); pmbd->Initialize(pkg, pmb); @@ -219,5 +222,17 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(info.TensorSize() == 3); } } + + WHEN("We request info from all vars") { + auto vars = parthenon::GetAnyVariables(pmbd->GetVariableVector(), + {parthenon::Metadata::Independent}); + auto all_info = VarInfo::GetAll(vars, cellbounds); + THEN("The labels are all present") { + for (const std::string &name : var_names) { + auto pinfo = std::find(all_info.begin(), all_info.end(), name); + REQUIRE(pinfo != all_info.end()); + } + } + } } } From fcb15d6ad174d13d5d23e8bd9cc5638f64b19565 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 14:24:34 -0600 Subject: [PATCH 31/58] PaddedShape exposed. Broke xdmf will fix. --- src/outputs/output_utils.cpp | 36 ++++++++++++++++++++++++++++++++++ src/outputs/output_utils.hpp | 28 +++++++++++--------------- src/outputs/parthenon_xdmf.cpp | 5 +++-- tst/unit/test_output_utils.cpp | 26 ++++++++++++++++++++++++ 4 files changed, 76 insertions(+), 19 deletions(-) diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index 18744d08fccd..fc4d3016c527 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -82,6 +82,42 @@ int VarInfo::FillSize(const IndexDomain domain) const { } } +// number of elements of data that describe variable shape +int VarInfo::GetNDim() const { + // 3 cell indices, tensor rank, topological element index if needed + return (where == MetadataFlag({Metadata::None})) + ? tensor_rank + : (3 + tensor_rank + element_matters); +} + +// Returns full shape as read to/written from I/O, with 1-padding. +std::vector VarInfo::GetPaddedShape(IndexDomain domain) const { + std::vector out = GetRawShape(); + auto [nx3, nx2, nx1] = GetNKJI(domain); + out[0] = nx3; + out[1] = nx2; + out[2] = nx1; + return out; +} +std::vector VarInfo::GetPaddedShapeReversed(IndexDomain domain) const { + std::vector out(rnx_.begin(), rnx_.end()); + auto [nx3, nx2, nx1] = GetNKJI(domain); + out[VNDIM - 3] = nx3; + out[VNDIM - 2] = nx2; + out[VNDIM - 1] = nx1; + return out; +} + +std::vector VarInfo::GetRawShape() const { + return std::vector(nx_.begin(), nx_.end()); +} + +int VarInfo::GetDim(int i) const { + PARTHENON_DEBUG_REQUIRE(0 < i && i < VNDIM, "Index out of bounds"); + return nx_[i - 1]; +} + + std::vector VarInfo::GetAll(const VariableVector &vars, const IndexShape &cellbounds) { std::vector out; diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 2a8b3dff91d1..171a2f2841db 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -69,13 +69,12 @@ struct VarInfo { int TensorSize() const; // Size of region that needs to be filled with 0s if not allocated int FillSize(const IndexDomain domain) const; + // number of elements of data that describe variable shape + int GetNDim() const; template - int FillShape(const IndexDomain domain, T *data) { - int ndim = -1; // number of elements of data that describe - // variable shape + int FillShape(const IndexDomain domain, T *data) const { if (where == MetadataFlag({Metadata::None})) { - ndim = tensor_rank; for (int i = 0; i < tensor_rank; ++i) { data[i] = static_cast(rnx_[rnx_.size() - tensor_rank + i]); } @@ -84,8 +83,6 @@ struct VarInfo { // accross topological elements. Unused indices will be written but // empty. auto [nx3, nx2, nx1] = GetNKJI(domain); - // 3 cell indices, tensor rank, topological element index if needed - ndim = 3 + tensor_rank + element_matters; // fill topological element, if relevant if (element_matters) { data[0] = ntop_elems; @@ -100,11 +97,11 @@ struct VarInfo { data[tensor_rank + element_matters + 1] = static_cast(nx2); data[tensor_rank + element_matters + 2] = static_cast(nx1); } - return ndim; + return GetNDim(); } template - int FillShape(const IndexDomain domain, T *head, Args... args) { + int FillShape(const IndexDomain domain, T *head, Args... args) const { int ndim_head = FillShape(domain, head); int ndim_tail = FillShape(domain, std::forward(args)...); // this check should be impossible to trigger but... just to be safe @@ -113,15 +110,12 @@ struct VarInfo { return ndim_tail; } - template - auto GetShape() const { - return std::vector(nx_.rbegin(), nx_.rend()); - } - template - auto GetDim(int i) const { - PARTHENON_DEBUG_REQUIRE(0 < i && i < VNDIM, "Index out of bounds"); - return static_cast(nx_[i - 1]); - } + // Returns full shape as read to/written from I/O, with 1-padding. + std::vector GetPaddedShape(IndexDomain domain) const; + std::vector GetPaddedShapeReversed(IndexDomain domain) const; + // nx accessors + std::vector GetRawShape() const; + int GetDim(int i) const; VarInfo() = delete; diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 08e79050fac1..b4b2aaa96d8e 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -150,12 +150,13 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, int nx1, int nx2, int n // write graphics variables int ndim; for (const auto &vinfo : var_list) { - std::vector alldims = vinfo.GetShape(); + // TODO(JMM): This is crazy and needs to be fixed given otehr cleanup. + std::vector alldims = vinfo.GetRawShape(); // Only cell-based data currently supported for visualization if (vinfo.where == MetadataFlag(Metadata::Cell)) { ndim = 3 + vinfo.tensor_rank + 1; for (int i = 0; i < vinfo.tensor_rank; i++) { - dims[1 + i] = alldims[3 - vinfo.tensor_rank + i]; + dims[1 + i] = static_cast(alldims[3 - vinfo.tensor_rank + i]); } dims[vinfo.tensor_rank + 1] = nx3; dims[vinfo.tensor_rank + 2] = nx2; diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 33f3e1903145..93385a4d070b 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -198,6 +198,32 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti REQUIRE(info.Size() == 3 * 4 * (NFULL + 1) * (NFULL + 1) * (NFULL + 1)); REQUIRE(info.TensorSize() == 3 * 4); } + THEN("Requesting reversed padded shape provides correctly shaped object") { + constexpr int ND = VarInfo::VNDIM; + auto padded_shape = info.GetPaddedShapeReversed(interior); + REQUIRE(padded_shape.size() == ND); + REQUIRE(padded_shape[ND - 1] == NSIDE + 1); + REQUIRE(padded_shape[ND - 2] == NSIDE + 1); + REQUIRE(padded_shape[ND - 3] == NSIDE + 1); + REQUIRE(padded_shape[ND - 4] == 4); + REQUIRE(padded_shape[0] == 3); + for (int i = 1; i < ND - 5; ++i) { + REQUIRE(padded_shape[i] == 1); + } + } + THEN("Requesting padded shape provides correctly shaped object") { + constexpr int ND = VarInfo::VNDIM; + auto padded_shape = info.GetPaddedShape(interior); + REQUIRE(padded_shape.size() == ND); + REQUIRE(padded_shape[0] == NSIDE + 1); + REQUIRE(padded_shape[1] == NSIDE + 1); + REQUIRE(padded_shape[2] == NSIDE + 1); + REQUIRE(padded_shape[3] == 4); + for (int i = 4; i < ND - 1; ++i) { + REQUIRE(padded_shape[i] == 1); + } + REQUIRE(padded_shape[ND - 1] == 3); + } } WHEN("We initialize VarInfo on a scaler edge var") { From bdd326a6ea3291ee6e5c89054afd20bf2a3d52b8 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 15:51:04 -0600 Subject: [PATCH 32/58] cleanup and fix output utils, generalize packorunpack var --- src/outputs/output_utils.cpp | 24 ++++++++-------- src/outputs/output_utils.hpp | 50 ++++++++++++++++++---------------- tst/unit/test_output_utils.cpp | 10 +++++++ 3 files changed, 50 insertions(+), 34 deletions(-) diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index fc4d3016c527..a75be124e6a6 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -35,6 +35,8 @@ namespace OutputUtils { Triple_t VarInfo::GetNKJI(const IndexDomain domain) const { int nx3 = 1, nx2 = 1, nx1 = 1; + // TODO(JMM): I know that this could be done by hand, but I'd rather + // rely on the loop bounds machinery and this should be cheap. for (auto el : topological_elements) { nx3 = std::max(nx3, cellbounds.ncellsk(domain, el)); nx2 = std::max(nx2, cellbounds.ncellsj(domain, el)); @@ -44,17 +46,19 @@ Triple_t VarInfo::GetNKJI(const IndexDomain domain) const { } Triple_t VarInfo::GetPaddedBoundsKJI(const IndexDomain domain) const { - int ks = 1, ke = 1, js = 1, je = 1, is = 1, ie = 1; + // TODO(JMM): I know that this could be done by hand, but I'd rather + // rely on the loop bounds machinery and this should be cheap. + int ks = 0, ke = 0, js = 0, je = 0, is = 0, ie = 0; for (auto el : topological_elements) { - auto kb = cellbounds.GetBoundsK(domain); - auto jb = cellbounds.GetBoundsJ(domain); - auto ib = cellbounds.GetBoundsI(domain); + auto kb = cellbounds.GetBoundsK(domain, el); + auto jb = cellbounds.GetBoundsJ(domain, el); + auto ib = cellbounds.GetBoundsI(domain, el); ks = kb.s; // pads are only upper indices js = jb.s; is = ib.s; - ke = std::min(ke, kb.e); - je = std::min(je, jb.e); - ie = std::min(ie, ib.e); + ke = std::max(ke, kb.e); + je = std::max(je, jb.e); + ie = std::max(ie, ib.e); } IndexRange kb{ks, ke}, jb{is, ie}, ib{is, ie}; return std::make_tuple(kb, jb, ib); @@ -85,9 +89,8 @@ int VarInfo::FillSize(const IndexDomain domain) const { // number of elements of data that describe variable shape int VarInfo::GetNDim() const { // 3 cell indices, tensor rank, topological element index if needed - return (where == MetadataFlag({Metadata::None})) - ? tensor_rank - : (3 + tensor_rank + element_matters); + return (where == MetadataFlag({Metadata::None})) ? tensor_rank + : (3 + tensor_rank + element_matters); } // Returns full shape as read to/written from I/O, with 1-padding. @@ -117,7 +120,6 @@ int VarInfo::GetDim(int i) const { return nx_[i - 1]; } - std::vector VarInfo::GetAll(const VariableVector &vars, const IndexShape &cellbounds) { std::vector out; diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index 171a2f2841db..a66fc52924ac 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -290,31 +290,35 @@ std::vector FlattenBlockInfo(Mesh *pm, int shape, Function_t f) { // mirror must be provided because copying done externally template -void PackOrUnpackVar(MeshBlock *pmb, Variable *pvar, bool do_ghosts, idx_t &idx, - std::vector &data, Function_t f) { - const auto &Nt = pvar->GetDim(6); - const auto &Nu = pvar->GetDim(5); - const auto &Nv = pvar->GetDim(4); +void PackOrUnpackVar(const VarInfo &info, Variable *pvar, bool do_ghosts, + idx_t &idx, std::vector &data, Function_t f) { const IndexDomain domain = (do_ghosts ? IndexDomain::entire : IndexDomain::interior); - IndexRange kb, jb, ib; - if (pvar->metadata().Where() == MetadataFlag(Metadata::Cell)) { - kb = pmb->cellbounds.GetBoundsK(domain); - jb = pmb->cellbounds.GetBoundsJ(domain); - ib = pmb->cellbounds.GetBoundsI(domain); - // TODO(JMM): Add topological elements here - } else { // metadata none - kb = {0, pvar->GetDim(3) - 1}; - jb = {0, pvar->GetDim(2) - 1}; - ib = {0, pvar->GetDim(1) - 1}; + // shape as written to or read from. contains additional padding + // in orthogonal directions. + // e.g., Face1-centered var is shape (N1+1)x(N2+1)x(N3+1) + // format is + // topological_elems x tensor_elems x block_elems + const auto shape = info.GetPaddedShapeReversed(domain); + // TODO(JMM): Should I hide this inside VarInfo? + auto [kb, jb, ib] = info.GetPaddedBoundsKJI(domain); + if (info.where == MetadataFlag({Metadata::None})) { + kb.s = 0; + kb.e = shape[4]; + jb.s = 0; + jb.e = shape[5]; + ib.s = 0; + ib.e = shape[6]; } - for (int t = 0; t < Nt; ++t) { - for (int u = 0; u < Nu; ++u) { - for (int v = 0; v < Nv; ++v) { - for (int k = kb.s; k <= kb.e; ++k) { - for (int j = jb.s; j <= jb.e; ++j) { - for (int i = ib.s; i <= ib.e; ++i) { - f(idx, t, u, v, k, j, i); - idx++; + for (int topo = 0; topo < shape[0]; ++topo) { + for (int t = 0; t < shape[1]; ++t) { + for (int u = 0; u < shape[2]; ++u) { + for (int v = 0; v < shape[3]; ++v) { + for (int k = kb.s; k <= kb.e; ++k) { + for (int j = jb.s; j <= jb.e; ++j) { + for (int i = ib.s; i <= ib.e; ++i) { + f(idx, topo, t, u, v, k, j, i); + idx++; + } } } } diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 93385a4d070b..882383108a91 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -224,6 +224,16 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } REQUIRE(padded_shape[ND - 1] == 3); } + + THEN("The padded bounds are correct") { + auto [kb, jb, ib] = info.GetPaddedBoundsKJI(interior); + REQUIRE(kb.s == NG); + REQUIRE(kb.e == NSIDE + NG); + REQUIRE(jb.s == NG); + REQUIRE(jb.e == NSIDE + NG); + REQUIRE(ib.s == NG); + REQUIRE(ib.e == NSIDE + NG); + } } WHEN("We initialize VarInfo on a scaler edge var") { From 3a629db48bb88eb4470ba422ab1cb43390121da3 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 15:51:29 -0600 Subject: [PATCH 33/58] switch restarts over to use VarInfo and topological elements --- src/outputs/parthenon_hdf5.cpp | 6 +++--- src/outputs/restart.hpp | 6 +++--- src/outputs/restart_hdf5.cpp | 37 ++++++++++++---------------------- src/outputs/restart_hdf5.hpp | 7 ++++--- src/parthenon_manager.cpp | 29 ++++++++++++++------------ 5 files changed, 39 insertions(+), 46 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 31dd1b006c9b..0a54010490c2 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -350,9 +350,9 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (v->IsAllocated() && (var_name == v->label())) { auto v_h = v->data.GetHostMirrorAndCopy(); OutputUtils::PackOrUnpackVar( - pmb.get(), v.get(), output_params.include_ghost_zones, index, tmpData, - [&](auto index, int t, int u, int v, int k, int j, int i) { - tmpData[index] = static_cast(v_h(t, u, v, k, j, i)); + vinfo, v.get(), output_params.include_ghost_zones, index, tmpData, + [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { + tmpData[index] = static_cast(v_h(topo, t, u, v, k, j, i)); }); is_allocated = true; diff --git a/src/outputs/restart.hpp b/src/outputs/restart.hpp index b16812be0f95..5f0ce46878f6 100644 --- a/src/outputs/restart.hpp +++ b/src/outputs/restart.hpp @@ -27,6 +27,7 @@ #include "interface/metadata.hpp" #include "mesh/domain.hpp" +#include "outputs/output_utils.hpp" #include "utils/error_checking.hpp" namespace parthenon { @@ -91,9 +92,8 @@ class RestartReader { // Assumes blocks are contiguous // fills internal data for given pointer virtual void ReadBlocks(const std::string &name, IndexRange range, - std::vector &dataVec, const std::vector &bsize, - int file_output_format_version, MetadataFlag where, - const std::vector &shape = {}) const = 0; + const OutputUtils::VarInfo &info, std::vector &dataVec, + int file_output_format_version) const = 0; // Gets the data from a swarm var on current rank. Assumes all // blocks are contiguous. Fills dataVec based on shape from swarmvar diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index 122b361232de..98b49749b389 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -26,6 +26,7 @@ #include "interface/params.hpp" #include "mesh/mesh.hpp" #include "mesh/meshblock.hpp" +#include "outputs/output_utils.hpp" #include "outputs/outputs.hpp" #ifdef ENABLE_HDF5 #include "outputs/parthenon_hdf5.hpp" @@ -179,40 +180,28 @@ void RestartReaderHDF5::ReadParams(const std::string &name, Params &p) { #endif // ENABLE_HDF5 } void RestartReaderHDF5::ReadBlocks(const std::string &name, IndexRange range, + const OutputUtils::VarInfo &info, std::vector &dataVec, - const std::vector &bsize, - int file_output_format_version, MetadataFlag where, - const std::vector &shape) const { + int file_output_format_version) const { #ifndef ENABLE_HDF5 PARTHENON_FAIL("Restart functionality is not available because HDF5 is disabled"); #else // HDF5 enabled auto hdl = OpenDataset(name); - constexpr int CHUNK_MAX_DIM = 7; + constexpr int CHUNK_MAX_DIM = info.VNDIM; /** Select hyperslab in dataset **/ - hsize_t offset[CHUNK_MAX_DIM] = {static_cast(range.s), 0, 0, 0, 0, 0, 0}; - hsize_t count[CHUNK_MAX_DIM]; int total_dim = 0; + hsize_t offset[CHUNK_MAX_DIM], count[CHUNK_MAX_DIM]; + std::fill(offset + 1, offset + CHUNK_MAX_DIM, 0); + std::fill(count + 1, count + CHUNK_MAX_DIM, 1); + + offset[0] = static_cast(range.s); + count[0] = static_cast(range.e - range.s + 1); + const IndexDomain domain = hasGhost ? IndexDomain::entire : IndexDomain::interior; + if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { - count[0] = static_cast(range.e - range.s + 1); - const int ndim = shape.size(); - if (where == MetadataFlag(Metadata::Cell)) { - for (int i = 0; i < ndim; i++) { - count[1 + i] = shape[ndim - i - 1]; - } - count[ndim + 1] = bsize[2]; - count[ndim + 2] = bsize[1]; - count[ndim + 3] = bsize[0]; - total_dim = 3 + ndim + 1; - } else if (where == MetadataFlag(Metadata::None)) { - for (int i = 0; i < ndim; i++) { - count[1 + i] = shape[ndim - i - 1]; - } - total_dim = ndim + 1; - } else { - PARTHENON_THROW("Only Cell and None locations supported!"); - } + total_dim = info.FillShape(domain, &(count[1])) + 1; } else { PARTHENON_THROW("Unknown output format version in restart file.") } diff --git a/src/outputs/restart_hdf5.hpp b/src/outputs/restart_hdf5.hpp index 2c31ff6bd967..f66df421ce5a 100644 --- a/src/outputs/restart_hdf5.hpp +++ b/src/outputs/restart_hdf5.hpp @@ -28,6 +28,7 @@ #include #include "interface/metadata.hpp" +#include "outputs/output_utils.hpp" #include "outputs/parthenon_hdf5_types.hpp" using namespace parthenon::HDF5; @@ -124,9 +125,9 @@ class RestartReaderHDF5 : public RestartReader { // Gets data for all blocks on current rank. // Assumes blocks are contiguous // fills internal data for given pointer - void ReadBlocks(const std::string &name, IndexRange range, std::vector &dataVec, - const std::vector &bsize, int file_output_format_version, - MetadataFlag where, const std::vector &shape = {}) const override; + void ReadBlocks(const std::string &name, IndexRange range, + const OutputUtils::VarInfo &info, std::vector &dataVec, + int file_output_format_version) const override; // Gets the data from a swarm var on current rank. Assumes all // blocks are contiguous. Fills dataVec based on shape from swarmvar diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 1af0c8d9be9e..16581207fe78 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -263,6 +263,8 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { const auto indep_restart_vars = GetAnyVariables(mb.meshblock_data.Get()->GetVariableVector(), {parthenon::Metadata::Independent, parthenon::Metadata::Restart}); + const auto all_vars_info = + OutputUtils::VarInfo::GetAll(indep_restart_vars, mb.cellbounds); const auto sparse_info = resfile.GetSparseInfo(); // create map of sparse field labels to index in the SparseInfo table @@ -274,11 +276,11 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { // Allocate space based on largest vector int max_vlen = 1; int num_sparse = 0; - for (auto &v_info : indep_restart_vars) { - const auto &label = v_info->label(); + for (const auto &v_info : all_vars_info) { + const auto &label = v_info.label; // check that variable is in the list of sparse fields if and only if it is sparse - if (v_info->IsSparse()) { + if (v_info.is_sparse) { ++num_sparse; PARTHENON_REQUIRE_THROWS(sparse_idxs.count(label) == 1, "Sparse field " + label + @@ -288,7 +290,7 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { "Dense field " + label + " is marked as sparse in restart file"); } - max_vlen = std::max(max_vlen, v_info->NumComponents()); + max_vlen = std::max(max_vlen, v_info.num_components); } // make sure we have all sparse variables that are in the restart file @@ -297,17 +299,16 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { "Mismatch between sparse fields in simulation and restart file"); std::vector tmp(static_cast(nb) * nCells * max_vlen); - for (auto &v_info : indep_restart_vars) { - const auto vlen = v_info->NumComponents(); - const auto &label = v_info->label(); + for (const auto &v_info : all_vars_info) { + const auto vlen = v_info.num_components; + const auto &label = v_info.label; if (Globals::my_rank == 0) { std::cout << "Var: " << label << ":" << vlen << std::endl; } // Read relevant data from the hdf file, this works for dense and sparse variables try { - resfile.ReadBlocks(label, myBlocks, tmp, bsize, file_output_format_ver, - v_info->metadata().Where(), v_info->metadata().Shape()); + resfile.ReadBlocks(label, myBlocks, v_info, tmp, file_output_format_ver); } catch (std::exception &ex) { std::cout << "[" << Globals::my_rank << "] WARNING: Failed to read variable " << label << " from restart file:" << std::endl @@ -317,7 +318,7 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { size_t index = 0; for (auto &pmb : rm.block_list) { - if (v_info->IsSparse()) { + if (v_info.is_sparse) { // check if the sparse variable is allocated on this block if (sparse_info.IsAllocated(pmb->gid, sparse_idxs.at(label))) { pmb->AllocateSparse(label); @@ -334,9 +335,11 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { // Double note that this also needs to be update in case // we update the HDF5 infrastructure! if (file_output_format_ver == HDF5::OUTPUT_VERSION_FORMAT) { - OutputUtils::PackOrUnpackVar(pmb.get(), v.get(), resfile.hasGhost, index, tmp, - [&](auto index, int t, int u, int v, int k, int j, - int i) { v_h(t, u, v, k, j, i) = tmp[index]; }); + OutputUtils::PackOrUnpackVar( + v_info, v.get(), resfile.hasGhost, index, tmp, + [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { + v_h(topo, t, u, v, k, j, i) = tmp[index]; + }); } else { PARTHENON_THROW("Unsupported output format version in restart file.") } From a098bffcdcaa8ea35dfc5a31a1f272025bd76c8b Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 16:27:57 -0600 Subject: [PATCH 34/58] fix xdmf at least for cell centered --- src/outputs/parthenon_hdf5.cpp | 2 +- src/outputs/parthenon_xdmf.cpp | 56 ++++++++++++++++++++++------------ src/outputs/parthenon_xdmf.hpp | 4 +-- 3 files changed, 39 insertions(+), 23 deletions(-) diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 0a54010490c2..a5de9531707d 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -495,7 +495,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (output_params.write_xdmf) { Kokkos::Profiling::pushRegion("genXDMF"); // generate XDMF companion file - XDMF::genXDMF(filename, pm, tm, nx1, nx2, nx3, all_vars_info, swarm_info); + XDMF::genXDMF(filename, pm, tm, theDomain, nx1, nx2, nx3, all_vars_info, swarm_info); Kokkos::Profiling::popRegion(); // genXDMF } diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index b4b2aaa96d8e..fa188e588800 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -59,7 +59,8 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name const std::vector &component_labels, std::string &hdfFile, int iblock, const int &num_components, int &ndims, hsize_t *dims, - const std::string &dims321, bool isVector); + const std::string &dims321, bool isVector, + MetadataFlag where); static std::string ParticleDatasetRef(const std::string &prefix, const std::string &swmname, const std::string &varname, @@ -69,10 +70,12 @@ static std::string ParticleDatasetRef(const std::string &prefix, static void ParticleVariableRef(std::ofstream &xdmf, const std::string &varname, const SwarmVarInfo &varinfo, const std::string &swmname, const std::string &hdffile, int particle_count); +static std::string LocationToStringRef(MetadataFlag where); } // namespace impl -void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, int nx1, int nx2, int nx3, - const std::vector &var_list, const AllSwarmInfo &all_swarm_info) { +void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int nx1, + int nx2, int nx3, const std::vector &var_list, + const AllSwarmInfo &all_swarm_info) { using namespace HDF5; using namespace OutputUtils; using namespace impl; @@ -150,24 +153,20 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, int nx1, int nx2, int n // write graphics variables int ndim; for (const auto &vinfo : var_list) { - // TODO(JMM): This is crazy and needs to be fixed given otehr cleanup. - std::vector alldims = vinfo.GetRawShape(); - // Only cell-based data currently supported for visualization - if (vinfo.where == MetadataFlag(Metadata::Cell)) { - ndim = 3 + vinfo.tensor_rank + 1; - for (int i = 0; i < vinfo.tensor_rank; i++) { - dims[1 + i] = static_cast(alldims[3 - vinfo.tensor_rank + i]); - } - dims[vinfo.tensor_rank + 1] = nx3; - dims[vinfo.tensor_rank + 2] = nx2; - dims[vinfo.tensor_rank + 3] = nx1; - } else { - continue; + if (vinfo.where != MetadataFlag({Metadata::Cell})) { + continue; // TODO(JMM): Fixme. Haven't checked other element shapes } - + // if (vinfo.where == MetadataFlag({Metadata::None})) { + // // TODO(JMM): Technically other centering is supported. But + // // lets ignore that. + // continue; + // } + ndim = vinfo.FillShape(domain, &(dims[1])) + 1; const int num_components = vinfo.num_components; + // TODO(JMM): Will need to fix this too. writeXdmfSlabVariableRef(xdmf, vinfo.label, vinfo.component_labels, hdfFile, ib, - num_components, ndim, dims, dims321, vinfo.is_vector); + num_components, ndim, dims, dims321, vinfo.is_vector, + vinfo.where); } xdmf << " " << std::endl; } @@ -239,10 +238,12 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name const std::vector &component_labels, std::string &hdfFile, int iblock, const int &num_components, int &ndims, hsize_t *dims, - const std::string &dims321, bool isVector) { + const std::string &dims321, bool isVector, + MetadataFlag where) { // writes a slab reference to file std::vector names; int nentries = 1; + // TODO(JMM): Fix this hardcoded nonsense. if (num_components == 1 || isVector) { // we only make one entry, because either num_components == 1, or we write this as a // vector @@ -280,7 +281,7 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name } else if (tensor_dims == 1) { const std::string prefix = " "; for (int i = 0; i < nentries; i++) { - fid << prefix << R"( &var_list, +void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int nx1, + int nx2, int nx3, const std::vector &var_list, const OutputUtils::AllSwarmInfo &all_swarm_info); } // namespace XDMF } // namespace parthenon From 0e40f0c5052f1a00c96ce8169cbbd087c43e0054 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 17:10:55 -0600 Subject: [PATCH 35/58] changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eaa4a9d7a991..e8efc2556ea2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ Date: 2024-03-21 - [[PR 996]](https://github.com/parthenon-hpc-lab/parthenon/pull/996) Remove dynamic allocations from swarm particle creation ### Changed (changing behavior/API/variables/...) +- [[PR1019](https://github.com/parthenon-hpc-lab/parthenon/pull/1019) Enable output for non-cell-centered variables - [[PR 973]](https://github.com/parthenon-hpc-lab/parthenon/pull/973) Multigrid performance upgrades ### Fixed (not changing behavior/API/variables/...) From 2fcd21958870adb0e1c49ac00564aea104ab4130 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 17:15:31 -0600 Subject: [PATCH 36/58] update output version and fix constexpr issue on clang --- src/outputs/parthenon_hdf5_types.hpp | 2 +- src/outputs/parthenon_xdmf.cpp | 15 ++++++--------- src/outputs/restart_hdf5.cpp | 10 +++++----- src/parthenon_manager.cpp | 4 ++-- 4 files changed, 14 insertions(+), 17 deletions(-) diff --git a/src/outputs/parthenon_hdf5_types.hpp b/src/outputs/parthenon_hdf5_types.hpp index 33ef9e15853c..55514e221876 100644 --- a/src/outputs/parthenon_hdf5_types.hpp +++ b/src/outputs/parthenon_hdf5_types.hpp @@ -43,7 +43,7 @@ namespace HDF5 { // Number of dimension of HDF5 field data sets (block x nv x nu x nt x nz x ny x nx) static constexpr size_t H5_NDIM = MAX_VARIABLE_DIMENSION + 1; -static constexpr int OUTPUT_VERSION_FORMAT = 3; +static constexpr int OUTPUT_VERSION_FORMAT = 4; /** * @brief RAII handles for HDF5. Use the typedefs directly (e.g. `H5A`, `H5D`, etc.) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index fa188e588800..61bbfb209a47 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -153,17 +153,14 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int // write graphics variables int ndim; for (const auto &vinfo : var_list) { - if (vinfo.where != MetadataFlag({Metadata::Cell})) { - continue; // TODO(JMM): Fixme. Haven't checked other element shapes + // JMM: I can't figure out how to get faces/edges to work and + // I'm not going try any longer. More eyes appreciated. + if ((vinfo.where != MetadataFlag({Metadata::Cell})) && + (vinfo.where != MetadataFlag({Metadata::Node}))) { + continue; } - // if (vinfo.where == MetadataFlag({Metadata::None})) { - // // TODO(JMM): Technically other centering is supported. But - // // lets ignore that. - // continue; - // } ndim = vinfo.FillShape(domain, &(dims[1])) + 1; const int num_components = vinfo.num_components; - // TODO(JMM): Will need to fix this too. writeXdmfSlabVariableRef(xdmf, vinfo.label, vinfo.component_labels, hdfFile, ib, num_components, ndim, dims, dims321, vinfo.is_vector, vinfo.where); @@ -243,7 +240,7 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name // writes a slab reference to file std::vector names; int nentries = 1; - // TODO(JMM): Fix this hardcoded nonsense. + // TODO(JMM): this is not generic if (num_components == 1 || isVector) { // we only make one entry, because either num_components == 1, or we write this as a // vector diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index 98b49749b389..e65bd3f42aeb 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -188,19 +188,19 @@ void RestartReaderHDF5::ReadBlocks(const std::string &name, IndexRange range, #else // HDF5 enabled auto hdl = OpenDataset(name); - constexpr int CHUNK_MAX_DIM = info.VNDIM; + const int VNDIM = info.VNDIM; /** Select hyperslab in dataset **/ int total_dim = 0; - hsize_t offset[CHUNK_MAX_DIM], count[CHUNK_MAX_DIM]; - std::fill(offset + 1, offset + CHUNK_MAX_DIM, 0); - std::fill(count + 1, count + CHUNK_MAX_DIM, 1); + hsize_t offset[VNDIM], count[VNDIM]; + std::fill(offset + 1, offset + VNDIM, 0); + std::fill(count + 1, count + VNDIM, 1); offset[0] = static_cast(range.s); count[0] = static_cast(range.e - range.s + 1); const IndexDomain domain = hasGhost ? IndexDomain::entire : IndexDomain::interior; - if (file_output_format_version == HDF5::OUTPUT_VERSION_FORMAT) { + if (file_output_format_version >= HDF5::OUTPUT_VERSION_FORMAT - 1) { total_dim = info.FillShape(domain, &(count[1])) + 1; } else { PARTHENON_THROW("Unknown output format version in restart file.") diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 16581207fe78..43619acd6d85 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -240,7 +240,7 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { << std::endl; const auto file_output_format_ver = resfile.GetOutputFormatVersion(); - if (file_output_format_ver != HDF5::OUTPUT_VERSION_FORMAT) { + if (file_output_format_ver < HDF5::OUTPUT_VERSION_FORMAT - 1) { // Being extra stringent here so that we don't forget to update the machinery when // another change happens. PARTHENON_THROW("Deprecated file format"); @@ -334,7 +334,7 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { // Double note that this also needs to be update in case // we update the HDF5 infrastructure! - if (file_output_format_ver == HDF5::OUTPUT_VERSION_FORMAT) { + if (file_output_format_ver >= HDF5::OUTPUT_VERSION_FORMAT - 1) { OutputUtils::PackOrUnpackVar( v_info, v.get(), resfile.hasGhost, index, tmp, [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { From 23c7fe3d2e3bbedc878dc99a406dc4ca1fdc81c5 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 18:50:40 -0600 Subject: [PATCH 37/58] lroberts comments --- src/interface/variable.hpp | 5 +---- src/outputs/output_utils.cpp | 12 ++++++------ src/outputs/output_utils.hpp | 13 ++++++++----- src/outputs/parthenon_hdf5.cpp | 2 +- src/parthenon_manager.cpp | 2 +- tst/unit/test_output_utils.cpp | 10 +++++----- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/interface/variable.hpp b/src/interface/variable.hpp index db106ad47e5a..42c067efb9d7 100644 --- a/src/interface/variable.hpp +++ b/src/interface/variable.hpp @@ -85,10 +85,7 @@ class Variable { return dims_[i - 1]; } - KOKKOS_FORCEINLINE_FUNCTION - auto GetDim() const { // TODO(JMM): should this be host-only? - return dims_; - } + auto GetDim() const { return dims_; } KOKKOS_FORCEINLINE_FUNCTION auto GetCoarseDim(const int i) const { diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index a75be124e6a6..55da48d9cd16 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -33,7 +33,7 @@ namespace parthenon { namespace OutputUtils { -Triple_t VarInfo::GetNKJI(const IndexDomain domain) const { +Triple_t VarInfo::GetNumKJI(const IndexDomain domain) const { int nx3 = 1, nx2 = 1, nx1 = 1; // TODO(JMM): I know that this could be done by hand, but I'd rather // rely on the loop bounds machinery and this should be cheap. @@ -73,7 +73,7 @@ int VarInfo::TensorSize() const { if (where == MetadataFlag({Metadata::None})) { return Size(); } else { - return std::accumulate(rnx_.begin(), rnx_.end() - 3, 1, std::multiplies()); + return std::accumulate(rnx_.begin() + 1, rnx_.end() - 3, 1, std::multiplies()); } } @@ -81,8 +81,8 @@ int VarInfo::FillSize(const IndexDomain domain) const { if (where == MetadataFlag({Metadata::None})) { return Size(); } else { - auto [n3, n2, n1] = GetNKJI(domain); - return TensorSize() * n3 * n2 * n1; + auto [n3, n2, n1] = GetNumKJI(domain); + return ntop_elems * TensorSize() * n3 * n2 * n1; } } @@ -96,7 +96,7 @@ int VarInfo::GetNDim() const { // Returns full shape as read to/written from I/O, with 1-padding. std::vector VarInfo::GetPaddedShape(IndexDomain domain) const { std::vector out = GetRawShape(); - auto [nx3, nx2, nx1] = GetNKJI(domain); + auto [nx3, nx2, nx1] = GetNumKJI(domain); out[0] = nx3; out[1] = nx2; out[2] = nx1; @@ -104,7 +104,7 @@ std::vector VarInfo::GetPaddedShape(IndexDomain domain) const { } std::vector VarInfo::GetPaddedShapeReversed(IndexDomain domain) const { std::vector out(rnx_.begin(), rnx_.end()); - auto [nx3, nx2, nx1] = GetNKJI(domain); + auto [nx3, nx2, nx1] = GetNumKJI(domain); out[VNDIM - 3] = nx3; out[VNDIM - 2] = nx2; out[VNDIM - 1] = nx1; diff --git a/src/outputs/output_utils.hpp b/src/outputs/output_utils.hpp index a66fc52924ac..10cb8fa887e9 100644 --- a/src/outputs/output_utils.hpp +++ b/src/outputs/output_utils.hpp @@ -57,11 +57,15 @@ struct VarInfo { bool is_vector; IndexShape cellbounds; std::vector component_labels; + // list of topological elements in variable... e.g., Face1, Face2, etc std::vector topological_elements; + // how many topological elements are stored in variable, e.g., 3 for + // face and edge vars. int ntop_elems; + // whether or not topological element matters. bool element_matters; - Triple_t GetNKJI(const IndexDomain domain) const; + Triple_t GetNumKJI(const IndexDomain domain) const; Triple_t GetPaddedBoundsKJI(const IndexDomain domain) const; int Size() const; @@ -82,7 +86,7 @@ struct VarInfo { // For nx1,nx2,nx3 find max storage required in each direction // accross topological elements. Unused indices will be written but // empty. - auto [nx3, nx2, nx1] = GetNKJI(domain); + auto [nx3, nx2, nx1] = GetNumKJI(domain); // fill topological element, if relevant if (element_matters) { data[0] = ntop_elems; @@ -289,9 +293,8 @@ std::vector FlattenBlockInfo(Mesh *pm, int shape, Function_t f) { } // mirror must be provided because copying done externally -template -void PackOrUnpackVar(const VarInfo &info, Variable *pvar, bool do_ghosts, - idx_t &idx, std::vector &data, Function_t f) { +template +void PackOrUnpackVar(const VarInfo &info, bool do_ghosts, idx_t &idx, Function_t f) { const IndexDomain domain = (do_ghosts ? IndexDomain::entire : IndexDomain::interior); // shape as written to or read from. contains additional padding // in orthogonal directions. diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index a5de9531707d..821cb929d63d 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -350,7 +350,7 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm if (v->IsAllocated() && (var_name == v->label())) { auto v_h = v->data.GetHostMirrorAndCopy(); OutputUtils::PackOrUnpackVar( - vinfo, v.get(), output_params.include_ghost_zones, index, tmpData, + vinfo, output_params.include_ghost_zones, index, [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { tmpData[index] = static_cast(v_h(topo, t, u, v, k, j, i)); }); diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index 43619acd6d85..d7cf79b95023 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -336,7 +336,7 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { // we update the HDF5 infrastructure! if (file_output_format_ver >= HDF5::OUTPUT_VERSION_FORMAT - 1) { OutputUtils::PackOrUnpackVar( - v_info, v.get(), resfile.hasGhost, index, tmp, + v_info, resfile.hasGhost, index, [&](auto index, int topo, int t, int u, int v, int k, int j, int i) { v_h(topo, t, u, v, k, j, i) = tmp[index]; }); diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 882383108a91..5fc51f71bfe7 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -122,7 +122,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } THEN("The size and tensorsize are correct") { REQUIRE(info.Size() == NFULL * NFULL * NFULL); - REQUIRE(info.TensorSize() == 1); + REQUIRE(info.TensorSize() * info.ntop_elems == 1); } } @@ -150,7 +150,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } THEN("The size and tensorsize are correct") { REQUIRE(info.Size() == 3 * 4 * NFULL * NFULL * NFULL); - REQUIRE(info.TensorSize() == 3 * 4); + REQUIRE(info.TensorSize() * info.ntop_elems == 3 * 4); } } @@ -172,7 +172,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } THEN("The size and tensorsize are correct") { REQUIRE(info.Size() == 3 * 4); - REQUIRE(info.TensorSize() == 3 * 4); + REQUIRE(info.TensorSize() * info.ntop_elems == 3 * 4); } } @@ -196,7 +196,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } THEN("The size and tensorsize are correct") { REQUIRE(info.Size() == 3 * 4 * (NFULL + 1) * (NFULL + 1) * (NFULL + 1)); - REQUIRE(info.TensorSize() == 3 * 4); + REQUIRE(info.TensorSize() * info.ntop_elems == 3 * 4); } THEN("Requesting reversed padded shape provides correctly shaped object") { constexpr int ND = VarInfo::VNDIM; @@ -255,7 +255,7 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } THEN("The size and tensorsize are correct") { REQUIRE(info.Size() == 3 * (NFULL + 1) * (NFULL + 1) * (NFULL + 1)); - REQUIRE(info.TensorSize() == 3); + REQUIRE(info.TensorSize() * info.ntop_elems == 3); } } From e383761d2b32b5c0492a10b07b6ece26fbece648 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 25 Mar 2024 18:57:14 -0600 Subject: [PATCH 38/58] oops padded shapes shouldn't modify vars not tied to a topological element --- src/outputs/output_utils.cpp | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index 55da48d9cd16..69330c7da4d4 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -96,18 +96,22 @@ int VarInfo::GetNDim() const { // Returns full shape as read to/written from I/O, with 1-padding. std::vector VarInfo::GetPaddedShape(IndexDomain domain) const { std::vector out = GetRawShape(); - auto [nx3, nx2, nx1] = GetNumKJI(domain); - out[0] = nx3; - out[1] = nx2; - out[2] = nx1; + if (where != MetadataFlag({Metadata::None})) { + auto [nx3, nx2, nx1] = GetNumKJI(domain); + out[0] = nx3; + out[1] = nx2; + out[2] = nx1; + } return out; } std::vector VarInfo::GetPaddedShapeReversed(IndexDomain domain) const { std::vector out(rnx_.begin(), rnx_.end()); - auto [nx3, nx2, nx1] = GetNumKJI(domain); - out[VNDIM - 3] = nx3; - out[VNDIM - 2] = nx2; - out[VNDIM - 1] = nx1; + if (where != MetadataFlag({Metadata::None})) { + auto [nx3, nx2, nx1] = GetNumKJI(domain); + out[VNDIM - 3] = nx3; + out[VNDIM - 2] = nx2; + out[VNDIM - 1] = nx1; + } return out; } From 14c751fbd5bbddba3cc0cf65a8b336ab9123875b Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Tue, 26 Mar 2024 17:17:44 -0600 Subject: [PATCH 39/58] Write topological location as an attribute --- src/interface/metadata.hpp | 16 ++++++++++++++++ src/outputs/parthenon_hdf5.cpp | 9 +++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/interface/metadata.hpp b/src/interface/metadata.hpp index 707f8a6bd917..19c8b1482c5d 100644 --- a/src/interface/metadata.hpp +++ b/src/interface/metadata.hpp @@ -348,6 +348,21 @@ class Metadata { } static int num_flags; + static std::string LocationToString(MetadataFlag flag) { + if (flag == Cell) { + return "Cell"; + } else if (flag == Face) { + return "Face"; + } else if (flag == Edge) { + return "Edge"; + } else if (flag == Node) { + return "Node"; + } else if (flag == None) { + return "None"; + } + PARTHENON_THROW("Unknown topology flag"); + } + // Sparse threshold routines void SetSparseThresholds(parthenon::Real alloc, parthenon::Real dealloc, parthenon::Real default_val = 0.0) { @@ -447,6 +462,7 @@ class Metadata { PARTHENON_THROW("No topology flag set"); } + std::string WhereAsString() const { return LocationToString(Where()); } bool IsMeshTied() const { return (Where() != None); } diff --git a/src/outputs/parthenon_hdf5.cpp b/src/outputs/parthenon_hdf5.cpp index 821cb929d63d..b80cb8fac8ca 100644 --- a/src/outputs/parthenon_hdf5.cpp +++ b/src/outputs/parthenon_hdf5.cpp @@ -383,8 +383,13 @@ void PHDF5Output::WriteOutputFileImpl(Mesh *pm, ParameterInput *pin, SimTime *tm Kokkos::Profiling::pushRegion("write variable data"); // write data to file - HDF5WriteND(file, var_name, tmpData.data(), ndim, &local_offset[0], &local_count[0], - &global_count[0], pl_xfer, pl_dcreate); + { // scope so the dataset gets closed + HDF5WriteND(file, var_name, tmpData.data(), ndim, &local_offset[0], &local_count[0], + &global_count[0], pl_xfer, pl_dcreate); + H5D dset = H5D::FromHIDCheck(H5Dopen2(file, var_name.c_str(), H5P_DEFAULT)); + HDF5WriteAttribute("TopologicalLocation", Metadata::LocationToString(vinfo.where), + dset); + } Kokkos::Profiling::popRegion(); // write variable data Kokkos::Profiling::popRegion(); // write variable loop } From f24981dd8877d7f91a310fdbbe43b9d1b6128e8a Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Wed, 27 Mar 2024 17:34:04 -0600 Subject: [PATCH 40/58] README --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3df6b6871cdb..28ec449f3cab 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Parthenon -- a performance portable block-structured adaptive mesh refinement fr * Flexible, plug-in package system * Abstract variables controlled via metadata flags * Support for particles +* Support for cell-, node-, face-, and edge-centered fields * Multi-stage drivers/integrators with support for task-based parallelism # Community From 2651182e71c940783ea76e161f2d6d7ddf2a3fc7 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 10:49:53 -0600 Subject: [PATCH 41/58] python can interpolate and plot edge/face/node data --- .../parthenon_tools/movie2d.py | 29 +- .../parthenon_tools/parthenon_tools/phdf.py | 285 +++++++++--------- 2 files changed, 162 insertions(+), 152 deletions(-) diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py index b911eb0b88aa..c08700a2fe8f 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py @@ -208,17 +208,20 @@ def plot_dump( ye = yf # get tensor components - if len(q.shape) > 6: - raise ValueError( - "Tensor rank is higher than I can handle. " - + "Please revise the movie2d script" - ) - if len(q.shape) == 6: - q = q[:, components[0], components[1], 0, :, :] - if len(q.shape) == 5: - q = q[:, components[-1], 0, :, :] - if len(q.shape) == 4: - q = q[:, 0, :, :] + ntensors = len(q.shape[1:-3]) + if components: + if len(components) != ntensors: + print("value error!", len(components), ntensors, q.shape) + raise ValueError( + "Tensor rank not the same as number of specified components" + ) + for c in components: + if c > (q.shape[1] - 1): + print("Value error!", c, q.shape) + raise ValueError("Component out of bounds") + q = q[:, c] + # move to 2d + q = q[..., 0, :, :] fig = plt.figure() p = fig.add_subplot(111, aspect=1) @@ -299,11 +302,11 @@ def plot_dump( args.output_directory.mkdir(0o755, True, True) logger.info(f"Total files to process: {len(args.files)}") - components = [0, 0] + components = [] if args.tc is not None: components = args.tc if args.vc is not None: - components = [0, args.vc] + components = [args.vc] do_swarm = args.swarm is not None _x = ProcessPoolExecutor if args.worker_type == "process" else ThreadPoolExecutor diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py index 39f87957c0c1..2cde89426c4f 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py @@ -99,20 +99,29 @@ class phdf: BlockBounds[NumBlocks]: Bounds of all the blocks Class Functions: - Get(variable, flatten=True): - Gets data for the named variable. Returns None if variable - is not found in the file or the data if found. + Get(variable, flatten=True, interior=False, average=True): + Reads data for the named variable from file. - If variable is a vector, each element of the returned - numpy array is a vector of that length. + Returns None if variable is not found in the file or the data + if found. - Default is to return a flat array of length TotalCells. - However if flatten is set to False, a 4D (or 5D if vector) - array is returned that has dimensions [NumBlocks, Nz, Ny, - Nx] where NumBlocks is the total number of blocks, and Nz, - Ny, and Nx are the number of cells in the z, y, and x - directions respectively. + If variable is a vector, each element of the returned numpy + array is a vector of that length. + Default is to return a flat array of length TotalCells. + However if flatten is set to False, a 4D (or 5D if vector) + array is returned that has dimensions [NumBlocks, Nz, Ny, Nx] + where NumBlocks is the total number of blocks, and Nz, Ny, and + Nx are the number of cells in the z, y, and x directions + respectively. + + If flatten is False and interior is True, only non-ghost data + will be returned. This array will correspond to the coordinates + xg and xng, etc. + + By default, node, face, and edge centered variables are + averaged to cell centers for visualization. This can be + disabled by setting average = False. ToLocation(index): Returns the location as an array [ib, bidx, iz, iy, ix] which convets the index into a block, index within that @@ -296,6 +305,7 @@ def load_ghost_coords(coord_i): self.Variables = [k for k in f.keys()] self.varData = {} + self.varTopology = {} self.swarmData = {} # Construct a map of datasets to contained components,idx @@ -492,9 +502,8 @@ def findBlockIdxInOther(self, other, ib, tol=1e-10, verbose=False): print(f"Block id: {ib} with bounds {myibBounds} not found in {other.file}") return None # block index not found - def Get(self, variable, flatten=True, interior=False): - """ - Reads data for the named variable from file. + def Get(self, variable, flatten=True, interior=False, average=True): + """Reads data for the named variable from file. Returns None if variable is not found in the file or the data if found. @@ -502,8 +511,8 @@ def Get(self, variable, flatten=True, interior=False): If variable is a vector, each element of the returned numpy array is a vector of that length. - Default is to return a flat array of length TotalCells. - However if flatten is set to False, a 4D (or 5D if vector) + If flatten is true, returns a flat array of length TotalCells. + If flatten is set to False, a 4D (or 5D if vector) array is returned that has dimensions [NumBlocks, Nz, Ny, Nx] where NumBlocks is the total number of blocks, and Nz, Ny, and Nx are the number of cells in the z, y, and x directions @@ -512,26 +521,17 @@ def Get(self, variable, flatten=True, interior=False): If flatten is False and interior is True, only non-ghost data will be returned. This array will correspond to the coordinates xg and xng, etc. + + By default, node, face, and edge centered variables are + averaged to cell centers for visualization. This can be + disabled by setting average = False. """ try: if self.varData.get(variable) is None: self.varData[variable] = self.fid[variable][:] vShape = self.varData[variable].shape if self.OutputFormatVersion < 3: - if self.OutputFormatVersion == -1: - vLen = vShape[-1] - else: - vLen = vShape[1] # index 0 is the block, so we need to use 1 - # in versions < 3, if variable is a scalar remove the component index - if vLen == 1: - tmp = self.varData[variable].reshape(self.TotalCells) - newShape = ( - self.NumBlocks, - self.MeshBlockSize[2], - self.MeshBlockSize[1], - self.MeshBlockSize[0], - ) - self.varData[variable] = tmp.reshape((newShape)) + raise ValueError("Unsupported output version") except KeyError: print( @@ -542,114 +542,126 @@ def Get(self, variable, flatten=True, interior=False): ) return None + try: + self.varTopology[variable] = self.fid[variable].attrs["TopologicalLocation"] + except: + self.varTopology[variable] = "Cell" + average_able = (self.varTopology[variable] != "Cell") and ( + self.varTopology[variable] != "None" + ) + vShape = self.varData[variable].shape - if flatten: - # TODO(tbd) remove legacy mode in next major rel. - if self.OutputFormatVersion == -1: - if np.prod(vShape) > self.TotalCells: - return self.varData[variable][:].reshape( - self.TotalCells, vShape[-1] + simdim = (vShape[-1] > 1) + (vShape[-2] > 1) + (vShape[-3] > 1) + if average and average_able: + v = self.varData[variable] + vnew = v.copy() + vnew = vnew[..., :-1] + if simdim >= 2: + vnew = vnew[..., :-1, :] + if simdim >= 3: + vnew = vnew[..., :-1, :, :] + + if self.varTopology[variable] == "Face": + if simdim == 1: + vnew[:, 0] = 0.5 * (v[:, 0, ..., 1:] + v[:, 0, ..., :-1]) + elif simdim == 2: + vnew[:, 0] = 0.5 * (v[:, 0, ..., :-1, 1:] + v[:, 0, ..., :-1, :-1]) + vnew[:, 1] = 0.5 * (v[:, 1, ..., 1:, :-1] + v[:, 1, ..., :-1, :-1]) + else: # simdim == 3 + vnew[:, 0] = 0.5 * ( + v[:, 0, ..., :-1, :-1, 1:] + v[:, 0, ..., :-1, :-1, :-1] ) - else: - return self.varData[variable][:].reshape(self.TotalCells) - - elif self.OutputFormatVersion == 2: - if np.prod(vShape) > self.TotalCells: - ret = np.empty( - (vShape[1], self.TotalCells), dtype=self.varData[variable].dtype + vnew[:, 1] = 0.5 * ( + v[:, 1, ..., :-1, 1:, :-1] + v[:, 1, ..., :-1, :-1, :-1] ) - ret[:] = np.nan - for i in range(vShape[1]): - ret[i] = self.varData[variable][:, i, :, :, :].ravel() - assert (ret != np.nan).all() - return ret - else: - return self.varData[variable][:].reshape(self.TotalCells) - - elif self.OutputFormatVersion == 3: - # Check for cell-centered data - if ( - vShape[-1] == self.MeshBlockSize[0] - and vShape[-2] == self.MeshBlockSize[1] - and vShape[-3] == self.MeshBlockSize[2] - ): - fieldShape = vShape[1:-3] - totalFieldEntries = np.prod(fieldShape) - ndim = len(fieldShape) - if ndim == 0: - return self.varData[variable].ravel() - else: - if ndim == 1: - ret = np.empty( - (vShape[1], self.TotalCells), - dtype=self.varData[variable].dtype, - ) - ret[:] = np.nan - for i in range(vShape[1]): - ret[i] = self.varData[variable][:, i, :, :, :].ravel() - assert (ret != np.nan).all() - return ret - elif ndim == 2: - ret = np.empty( - (vShape[1] * vShape[2], self.TotalCells), - dtype=self.varData[variable].dtype, - ) - ret[:] = np.nan - for i in range(vShape[1]): - for j in range(vShape[2]): - ret[i + vShape[1] * j] = self.varData[variable][ - :, i, j, :, :, : - ].ravel() - assert (ret != np.nan).all() - return ret - else: - ret = np.empty( - (vShape[1] * vShape[2] * vShape[3], self.TotalCells), - dtype=self.varData[variable].dtype, - ) - ret[:] = np.nan - for i in range(vShape[1]): - for j in range(vShape[2]): - for k in range(vShape[3]): - ret[ - i + vShape[1] * (j + vShape[2] * k) - ] = self.varData[variable][ - :, i, j, k, :, :, : - ].ravel() - assert (ret != np.nan).all() - return ret - else: - # Not cell-based variable - raise Exception( - f"Flattening only supported for cell-based variables but requested for {variable}" + vnew[:, 2] = 0.5 * ( + v[:, 2, ..., 1:, :-1, :-1] + v[:, 2, ..., :-1, :-1, :-1] + ) + if self.varTopology[variable] == "Edge": + if simdim == 1: + # nothing to do for E1 == :,0 + # E2 and E3 behave like node-centered data + vnew[:, 1] = 0.5 * (v[:, 1, ..., 1:] + v[:, 1, ..., :-1]) + vnew[:, 2] = 0.5 * (v[:, 2, ..., 1:] + v[:, 2, ..., :-1]) + if simdim == 2: + # E1 and E2 behave like face centered data + vnew[:, 0] = 0.5 * (v[:, 0, ..., 1:, :-1] + v[:, 0, ..., :-1, :-1]) + vnew[:, 1] = 0.5 * (v[:, 1, ..., :-1, 1:] + v[:, 1, ..., :-1, :-1]) + # E3 behaves like node-centered data + vnew[:, 2] = 0.25 * ( + v[:, 2, ..., 1:, 1:] + + v[:, 2, ..., 1:, :-1] + + v[:, 2, ..., :-1, 1:] + + v[:, 2, ..., :-1, :-1] + ) + else: # simdim == 3 + vnew[:, 0] = 0.25 * ( + v[:, 0, ..., 1:, 1:, :-1] + + v[:, 0, ..., 1:, :-1, :-1] + + v[:, 0, ..., :-1, 1:, :-1] + + v[:, 0, ..., :-1, :-1, :-1] + ) + vnew[:, 1] = 0.25 * ( + v[:, 1, ..., 1:, :-1, 1:] + + v[:, 1, ..., 1:, :-1, :-1] + + v[:, 1, ..., :-1, :-1, 1:] + + v[:, 1, ..., :-1, :-1, :-1] + ) + vnew[:, 2] = 0.25 * ( + v[:, 2, ..., :-1, 1:, 1:] + + v[:, 2, ..., :-1, 1:, :-1] + + v[:, 2, ..., :-1, :-1, 1:] + + v[:, 2, ..., :-1, :-1, :-1] + ) + elif self.varTopology[variable] == "Node": + if simdim == 1: + vnew = (1.0 / 2.0) * (v[..., 1:] + v[..., :-1]) + elif simdim == 2: + vnew = (1.0 / 4.0) * ( + v[..., 1:, 1:] + + v[..., 1:, :-1] + + v[..., :-1, 1:] + + v[..., :-1, :-1] + ) + else: # simdim == 3 + vnew = (1.0 / 8.0) * ( + v[..., 1:, 1:, 1:] + + v[..., :-1, :-1, :-1] + + v[..., 1:, 1:, :-1] + + v[..., 1:, :-1, 1:] + + v[..., :-1, 1:, 1:] + + v[..., :-1, :-1, 1:] + + v[..., :-1, 1:, :-1] + + v[..., 1:, :-1, :-1] ) + else: + raise ValueError("This topology cannot be averaged") + v = vnew + self.varData[variable] = v + + if flatten: + nblocks = vShape[0] + if self.varTopology[variable] == "None": + remaining_size = np.product(vShape[1:]) + return self.varData[variable].reshape(nblocks, remaining_size) + else: + preserved_shape = vShape[:-3] + remaining_size = np.product(vShape[-3:]) + return self.varData[variable].reshape(*preserved_shape, remaining_size) if self.IncludesGhost and interior: nghost = self.NGhost - # TODO(tbd) remove legacy mode in next major rel. - if self.OutputFormatVersion == -1: - if vShape[3] == 1: - return self.varData[variable][:, :, :, :] - elif vShape[2] == 1: - return self.varData[variable][:, :, :, nghost:-nghost] - elif vShape[1] == 1: - return self.varData[variable][:, :, nghost:-nghost, nghost:-nghost] - else: - return self.varData[variable][ - :, nghost:-nghost, nghost:-nghost, nghost:-nghost - ] + print(vShape) + if vShape[-1] == 1: + return self.varData[variable][:] + elif vShape[-2] == 1: + return self.varData[variable][..., nghost:-nghost] + elif vShape[-3] == 1: + return self.varData[variable][..., nghost:-nghost, nghost:-nghost] else: - print(vShape) - if vShape[-1] == 1: - return self.varData[variable][:] - elif vShape[-2] == 1: - return self.varData[variable][..., nghost:-nghost] - elif vShape[-3] == 1: - return self.varData[variable][..., nghost:-nghost, nghost:-nghost] - else: - return self.varData[variable][ - ..., nghost:-nghost, nghost:-nghost, nghost:-nghost - ] + return self.varData[variable][ + ..., nghost:-nghost, nghost:-nghost, nghost:-nghost + ] return self.varData[variable][:] def GetSwarm(self, swarm): @@ -720,16 +732,11 @@ def GetComponents(self, components, flatten=True): # If dataset isn't a vector, just save dataset component_data[component] = dataset else: - # TODO(tbd) remove legacy mode in next major rel. - # Data is a vector, save only the component - if self.OutputFormatVersion == -1: - component_data[component] = dataset[..., idx] - else: - if flatten: - component_data[component] = dataset[idx, ...] + if flatten: + component_data[component] = dataset[idx, ...] # need to take leading block index into account - else: - component_data[component] = dataset[:, idx, ...] + else: + component_data[component] = dataset[:, idx, ...] return component_data From 101be68869a2ef8ded6e34e59e103365ca51a18d Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 11:00:50 -0600 Subject: [PATCH 42/58] tensor type... --- src/outputs/parthenon_xdmf.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 61bbfb209a47..ab7147bbdcbf 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -252,10 +252,10 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name } } const int tensor_dims = ndims - 1 - 3; - + wherestring = LocationToStringRef(where); if (tensor_dims == 0) { const std::string prefix = " "; - fid << prefix << R"(" << std::endl; fid << prefix << " " << R"( Date: Thu, 28 Mar 2024 11:01:41 -0600 Subject: [PATCH 43/58] oops needs type --- src/outputs/parthenon_xdmf.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index ab7147bbdcbf..60fdb4b9498e 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -252,7 +252,7 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name } } const int tensor_dims = ndims - 1 - 3; - wherestring = LocationToStringRef(where); + auto wherestring = LocationToStringRef(where); if (tensor_dims == 0) { const std::string prefix = " "; fid << prefix << R"( Date: Thu, 28 Mar 2024 11:06:27 -0600 Subject: [PATCH 44/58] remove one-sided MPI --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 28ec449f3cab..ed6a1bb05a16 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Parthenon -- a performance portable block-structured adaptive mesh refinement fr * High performance by * device first/device resident approach (work data only in device memory to prevent expensive transfers between host and device) * transparent packing of data across blocks (to reduce/hide kernel launch latency) - * direct device-to-device communication via asynchronous, one-sided MPI communication + * direct device-to-device communication via asynchronous MPI communication * Intermediate abstraction layer to hide complexity of device kernel launches * Flexible, plug-in package system * Abstract variables controlled via metadata flags From c41eee70eb421a209c6d521e49ff96fa25a1c438 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 11:08:52 -0600 Subject: [PATCH 45/58] fix index.rst --- doc/sphinx/index.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/sphinx/index.rst b/doc/sphinx/index.rst index d78ca4d422dc..924cc3f7ce5e 100644 --- a/doc/sphinx/index.rst +++ b/doc/sphinx/index.rst @@ -15,11 +15,12 @@ Key Features * Device first/device resident approach (work data only in device memory to prevent expensive transfers between host and device) * Transparent packing of data across blocks (to reduce/hide kernel launch latency) -* Direct device-to-device communication via asynchronous, one-sided MPI communication +* Direct device-to-device communication via asynchronous MPI communication * Intermediate abstraction layer to hide complexity of device kernel launches * Flexible, plug-in package system * Abstract variables controlled via metadata flags * Support for particles +* Support for cell-, node-, face-, and edge-centered fields * Multi-stage drivers/integrators with support for task-based parallelism Community From deac3bc6bf188463c9edcec6cf3c0637fb084cdc Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 12:12:37 -0600 Subject: [PATCH 46/58] xdmf fix for node centered data hopefully --- src/outputs/parthenon_xdmf.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 60fdb4b9498e..9c4937157c56 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -161,6 +161,12 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int } ndim = vinfo.FillShape(domain, &(dims[1])) + 1; const int num_components = vinfo.num_components; + nx3 = dims[ndim - 3]; + nx2 = dims[ndim - 2]; + nx1 = dims[ndim - 1]; + dims321 = + std::to_string(nx3) + " " + std::to_string(nx2) + " " + std::to_string(nx1); + writeXdmfSlabVariableRef(xdmf, vinfo.label, vinfo.component_labels, hdfFile, ib, num_components, ndim, dims, dims321, vinfo.is_vector, vinfo.where); From c87de366792a9023e44c3c2516485fafb88c3873 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 12:19:13 -0600 Subject: [PATCH 47/58] changelog for breaking, update CC --- CHANGELOG.md | 1 + src/outputs/parthenon_xdmf.cpp | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8efc2556ea2..6526638d6ae6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,6 +55,7 @@ Date: 2024-03-21 - [[PR 982]](https://github.com/parthenon-hpc-lab/parthenon/pull/982) add some gut check testing for parthenon-VIBE ### Incompatibilities (i.e. breaking changes) +- [[PR1019](https://github.com/parthenon-hpc-lab/parthenon/pull/1019) Remove support for file formats < 3 - [[PR 987]](https://github.com/parthenon-hpc-lab/parthenon/pull/987) Change the API for what was IterativeTasks - [[PR 974]](https://github.com/parthenon-hpc-lab/parthenon/pull/974) Change GetParentPointer to always return T* - [[PR 996]](https://github.com/parthenon-hpc-lab/parthenon/pull/996) Remove dynamic allocations from swarm particle creation diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 9c4937157c56..7d4ddc4d7857 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -3,7 +3,7 @@ // Copyright(C) 2023 The Parthenon collaboration // Licensed under the 3-clause BSD License, see LICENSE file for details //======================================================================================== -// (C) (or copyright) 2020-2023. Triad National Security, LLC. All rights reserved. +// (C) (or copyright) 2020-2024. Triad National Security, LLC. All rights reserved. // // This program was produced under U.S. Government contract 89233218CNA000001 for Los // Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC From a68a7821bbcad9a4b415ad1ca0228eaaeec47bfb Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 12:27:50 -0600 Subject: [PATCH 48/58] disable vector tag in xdmf if not cell-centered --- src/outputs/parthenon_xdmf.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 7d4ddc4d7857..692564fb0e2f 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -285,7 +285,7 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name const std::string prefix = " "; for (int i = 0; i < nentries; i++) { fid << prefix << R"( Date: Thu, 28 Mar 2024 12:40:29 -0600 Subject: [PATCH 49/58] actually short-circuit special casing for vector logic --- src/outputs/parthenon_xdmf.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 692564fb0e2f..0d27d2594573 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -247,7 +247,8 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name std::vector names; int nentries = 1; // TODO(JMM): this is not generic - if (num_components == 1 || isVector) { + bool is_cell_vector = isVector && (where == MetadataFlag({Metadata::Cell})); + if (num_components == 1 || is_cell_vector) { // we only make one entry, because either num_components == 1, or we write this as a // vector names.push_back(name); @@ -285,7 +286,7 @@ static void writeXdmfSlabVariableRef(std::ofstream &fid, const std::string &name const std::string prefix = " "; for (int i = 0; i < nentries; i++) { fid << prefix << R"( Date: Thu, 28 Mar 2024 12:44:52 -0600 Subject: [PATCH 50/58] caveat edge locations --- doc/sphinx/src/outputs.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/sphinx/src/outputs.rst b/doc/sphinx/src/outputs.rst index 170c83ebac7e..997f4352c06e 100644 --- a/doc/sphinx/src/outputs.rst +++ b/doc/sphinx/src/outputs.rst @@ -398,6 +398,12 @@ capable of opening and visualizing Parthenon graphics dumps. In both cases, the ``.xdmf`` files should be opened. In ParaView, select the “XDMF Reader” when prompted. +.. warning:: + + Currently parthenon face- and edge- centered data is not supported + for ParaView and VisIt. However, our python tooling does support + all mesh locations. + Preparing outputs for ``yt`` ---------------------------- From 45a896d3806ccf159c815851adfb3a546b928cae Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 12:45:33 -0600 Subject: [PATCH 51/58] warning --- doc/sphinx/src/outputs.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/sphinx/src/outputs.rst b/doc/sphinx/src/outputs.rst index 997f4352c06e..2c274f04c078 100644 --- a/doc/sphinx/src/outputs.rst +++ b/doc/sphinx/src/outputs.rst @@ -399,7 +399,6 @@ cases, the ``.xdmf`` files should be opened. In ParaView, select the “XDMF Reader” when prompted. .. warning:: - Currently parthenon face- and edge- centered data is not supported for ParaView and VisIt. However, our python tooling does support all mesh locations. From 492cb7061cc60779fda3f838320884aec4866bc1 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 15:21:21 -0600 Subject: [PATCH 52/58] more verbose error in phdf --- .../python/packages/parthenon_tools/parthenon_tools/phdf.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py index 2cde89426c4f..2e2e48e49fb4 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py @@ -635,7 +635,11 @@ def Get(self, variable, flatten=True, interior=False, average=True): + v[..., 1:, :-1, :-1] ) else: - raise ValueError("This topology cannot be averaged") + raise ValueError( + "Topology {} for var {} cannot be averaged".format( + self.varTopology[variable], variable + ) + ) v = vnew self.varData[variable] = v From 242207f540e48bbf5b18f24b81649af96a72e603 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 17:30:20 -0600 Subject: [PATCH 53/58] stupid python UTF-8 nonsense --- .../packages/parthenon_tools/parthenon_tools/phdf.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py index 2e2e48e49fb4..f44b44c383e2 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py @@ -546,6 +546,9 @@ def Get(self, variable, flatten=True, interior=False, average=True): self.varTopology[variable] = self.fid[variable].attrs["TopologicalLocation"] except: self.varTopology[variable] = "Cell" + if type(self.varTopology[variable]) != str: + # This is stupid and seems to be python version dependent. + self.varTopology[variable] = self.varTopology[variable].decode("UTF-8") average_able = (self.varTopology[variable] != "Cell") and ( self.varTopology[variable] != "None" ) @@ -635,11 +638,12 @@ def Get(self, variable, flatten=True, interior=False, average=True): + v[..., 1:, :-1, :-1] ) else: - raise ValueError( - "Topology {} for var {} cannot be averaged".format( + print( + "WARNING: Topology {} for var {} cannot be averaged. Resetting.".format( self.varTopology[variable], variable ) ) + vnew = v.copy() v = vnew self.varData[variable] = v From ff1c609387bcec2eb7e18a0dde2b0f23eb139766 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Thu, 28 Mar 2024 17:45:06 -0600 Subject: [PATCH 54/58] reset nghost to not break other unit tests --- tst/unit/test_output_utils.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index 5fc51f71bfe7..cd637168f720 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -59,6 +59,9 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti constexpr auto interior = parthenon::IndexDomain::interior; constexpr auto entire = parthenon::IndexDomain::entire; + + // JMM: This needs to be reset to 0 when we're done, because other + // tests assume it's unset, thus zero-initialized. parthenon::Globals::nghost = NG; auto pkg = std::make_shared("Test package"); @@ -270,5 +273,9 @@ TEST_CASE("The VarInfo object produces appropriate ranges", "[VarInfo][OutputUti } } } + + // JMM: This needs to be reset to 0 when we're done, because other + // tests assume it's unset, thus zero-initialized. + parthenon::Globals::nghost = 0; } } From d5aba21219d326f6c3e7b3872177936ed204676d Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Fri, 29 Mar 2024 09:48:49 -0600 Subject: [PATCH 55/58] get ascent to behave itself --- src/outputs/ascent.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/outputs/ascent.cpp b/src/outputs/ascent.cpp index 13af3b50f613..bab4632485f1 100644 --- a/src/outputs/ascent.cpp +++ b/src/outputs/ascent.cpp @@ -175,7 +175,7 @@ void AscentOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, SimTime *tm, if (!var->IsSet(Metadata::Cell)) { continue; } - const auto var_info = VarInfo(var); + const auto var_info = VarInfo(var, bounds); for (int icomp = 0; icomp < var_info.num_components; ++icomp) { auto const data = Kokkos::subview(var->data, 0, 0, icomp, Kokkos::ALL(), From 3f9fb86ae36fd749260d219ccb9702902542b960 Mon Sep 17 00:00:00 2001 From: Jonah Miller Date: Mon, 1 Apr 2024 08:45:53 -0600 Subject: [PATCH 56/58] Update src/outputs/output_utils.cpp Co-authored-by: Adam M. Dempsey --- src/outputs/output_utils.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index 69330c7da4d4..b864c521ee79 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -60,7 +60,7 @@ Triple_t VarInfo::GetPaddedBoundsKJI(const IndexDomain domain) const je = std::max(je, jb.e); ie = std::max(ie, ib.e); } - IndexRange kb{ks, ke}, jb{is, ie}, ib{is, ie}; + IndexRange kb{ks, ke}, jb{js, je}, ib{is, ie}; return std::make_tuple(kb, jb, ib); } From 62bce90ec1569fce67ea59055981537f2178ccae Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 1 Apr 2024 15:08:48 -0600 Subject: [PATCH 57/58] fix exceptions --- .../parthenon_tools/movie2d.py | 89 +++++++++++-------- 1 file changed, 53 insertions(+), 36 deletions(-) diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py index c08700a2fe8f..0b3f68b41c38 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py @@ -21,7 +21,12 @@ from argparse import ArgumentParser from pathlib import Path -from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor +from concurrent.futures import ( + ThreadPoolExecutor, + ProcessPoolExecutor, + wait, + ALL_COMPLETED, +) import matplotlib as mpl import matplotlib.pyplot as plt @@ -211,14 +216,16 @@ def plot_dump( ntensors = len(q.shape[1:-3]) if components: if len(components) != ntensors: - print("value error!", len(components), ntensors, q.shape) raise ValueError( - "Tensor rank not the same as number of specified components" + "Tensor rank not the same as number of specified components: {}, {}, {}".format( + len(components), ntensors, q.shape + ) ) for c in components: if c > (q.shape[1] - 1): - print("Value error!", c, q.shape) - raise ValueError("Component out of bounds") + raise ValueError( + "Component {} out of bounds. Shape = {}".format(c, q.shape) + ) q = q[:, c] # move to 2d q = q[..., 0, :, :] @@ -311,6 +318,7 @@ def plot_dump( _x = ProcessPoolExecutor if args.worker_type == "process" else ThreadPoolExecutor with _x(max_workers=args.workers) as pool: + futures = [] for frame_id, file_name in enumerate(args.files): data = phdf(file_name) @@ -379,40 +387,49 @@ def plot_dump( # NOTE: After doing 5 test on different precision, keeping 2 looks more promising current_time = format(round(data.Time, 2), ".2f") if args.debug_plot: - pool.submit( - plot_dump, - data.xg, - data.yg, - q, - current_time, - output_file, - True, - data.gid, - data.xig, - data.yig, - data.xeg, - data.yeg, - components, - swarmx, - swarmy, - swarmcolor, - particlesize, + futures.append( + pool.submit( + plot_dump, + data.xg, + data.yg, + q, + current_time, + output_file, + True, + data.gid, + data.xig, + data.yig, + data.xeg, + data.yeg, + components, + swarmx, + swarmy, + swarmcolor, + particlesize, + ) ) else: - pool.submit( - plot_dump, - data.xng, - data.yng, - q, - current_time, - output_file, - True, - components=components, - swarmx=swarmx, - swarmy=swarmy, - swarmcolor=swarmcolor, - particlesize=particlesize, + futures.append( + pool.submit( + plot_dump, + data.xng, + data.yng, + q, + current_time, + output_file, + True, + components=components, + swarmx=swarmx, + swarmy=swarmy, + swarmcolor=swarmcolor, + particlesize=particlesize, + ) ) + wait(futures, return_when=ALL_COMPLETED) + for future in futures: + exception = future.exception() + if exception: + raise exception if not ERROR_FLAG: logger.info("All frames produced.") From 5d91dd9ca7f7be436238f8237cdec8e98cdb01a4 Mon Sep 17 00:00:00 2001 From: Jonah Maxwell Miller Date: Mon, 1 Apr 2024 16:13:12 -0600 Subject: [PATCH 58/58] pgrete comments --- .../parthenon_tools/movie2d.py | 4 ++ .../parthenon_tools/parthenon_tools/phdf.py | 59 +++++++++---------- src/outputs/parthenon_hdf5_types.hpp | 3 +- src/outputs/parthenon_xdmf.cpp | 5 +- src/outputs/restart_hdf5.cpp | 6 +- src/parthenon_manager.cpp | 13 ++-- tst/unit/test_output_utils.cpp | 4 +- 7 files changed, 51 insertions(+), 43 deletions(-) diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py index 0b3f68b41c38..363f4d00eb66 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/movie2d.py @@ -221,6 +221,10 @@ def plot_dump( len(components), ntensors, q.shape ) ) + # The first index of q is block index. Here we walk through + # the tensor components, slowest-first and, iteratively, fix + # q's slowest moving non-block index to the fixed tensor + # component. Then we move to the next index. for c in components: if c > (q.shape[1] - 1): raise ValueError( diff --git a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py index f44b44c383e2..4e75439791b4 100644 --- a/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py +++ b/scripts/python/packages/parthenon_tools/parthenon_tools/phdf.py @@ -99,21 +99,20 @@ class phdf: BlockBounds[NumBlocks]: Bounds of all the blocks Class Functions: - Get(variable, flatten=True, interior=False, average=True): + Get(variable, flatten=True, interior=False, average_to_cell_centers=True): Reads data for the named variable from file. Returns None if variable is not found in the file or the data if found. - If variable is a vector, each element of the returned numpy - array is a vector of that length. - - Default is to return a flat array of length TotalCells. - However if flatten is set to False, a 4D (or 5D if vector) - array is returned that has dimensions [NumBlocks, Nz, Ny, Nx] - where NumBlocks is the total number of blocks, and Nz, Ny, and - Nx are the number of cells in the z, y, and x directions - respectively. + Default is to return a flat array of length + TotalCells*total tensor components. However if flatten is + set to False, a 4D (or 5D, or 6D if tensorial) array is + returned that has dimensions + [NumBlocks, tensor_components, Nz, Ny, Nx] + where NumBlocks is the total number of blocks, and Nz, Ny, + and Nx are the number of cells in the z, y, and x + directions respectively. If flatten is False and interior is True, only non-ghost data will be returned. This array will correspond to the coordinates @@ -121,7 +120,7 @@ class phdf: By default, node, face, and edge centered variables are averaged to cell centers for visualization. This can be - disabled by setting average = False. + disabled by setting average_to_cell_centers = False. ToLocation(index): Returns the location as an array [ib, bidx, iz, iy, ix] which convets the index into a block, index within that @@ -502,21 +501,20 @@ def findBlockIdxInOther(self, other, ib, tol=1e-10, verbose=False): print(f"Block id: {ib} with bounds {myibBounds} not found in {other.file}") return None # block index not found - def Get(self, variable, flatten=True, interior=False, average=True): + def Get(self, variable, flatten=True, interior=False, average_to_cell_centers=True): """Reads data for the named variable from file. Returns None if variable is not found in the file or the data if found. - If variable is a vector, each element of the returned numpy - array is a vector of that length. - - If flatten is true, returns a flat array of length TotalCells. - If flatten is set to False, a 4D (or 5D if vector) - array is returned that has dimensions [NumBlocks, Nz, Ny, Nx] - where NumBlocks is the total number of blocks, and Nz, Ny, and - Nx are the number of cells in the z, y, and x directions - respectively. + Default is to return a flat array of length + TotalCells*total tensor components. However if flatten is + set to False, a 4D (or 5D, or 6D if tensorial) array is + returned that has dimensions + [NumBlocks, tensor_components, Nz, Ny, Nx] + where NumBlocks is the total number of blocks, and Nz, Ny, + and Nx are the number of cells in the z, y, and x + directions respectively. If flatten is False and interior is True, only non-ghost data will be returned. This array will correspond to the coordinates @@ -524,7 +522,7 @@ def Get(self, variable, flatten=True, interior=False, average=True): By default, node, face, and edge centered variables are averaged to cell centers for visualization. This can be - disabled by setting average = False. + disabled by setting average_to_cell_centers = False. """ try: if self.varData.get(variable) is None: @@ -543,27 +541,28 @@ def Get(self, variable, flatten=True, interior=False, average=True): return None try: - self.varTopology[variable] = self.fid[variable].attrs["TopologicalLocation"] + self.varTopology[variable] = ( + self.fid[variable].attrs["TopologicalLocation"].astype(str) + ) except: self.varTopology[variable] = "Cell" - if type(self.varTopology[variable]) != str: - # This is stupid and seems to be python version dependent. - self.varTopology[variable] = self.varTopology[variable].decode("UTF-8") average_able = (self.varTopology[variable] != "Cell") and ( self.varTopology[variable] != "None" ) vShape = self.varData[variable].shape simdim = (vShape[-1] > 1) + (vShape[-2] > 1) + (vShape[-3] > 1) - if average and average_able: + if average_to_cell_centers and average_able: v = self.varData[variable] vnew = v.copy() + # Make vnew the proper shape to be averaged into vnew = vnew[..., :-1] if simdim >= 2: vnew = vnew[..., :-1, :] if simdim >= 3: vnew = vnew[..., :-1, :, :] + # Average... if self.varTopology[variable] == "Face": if simdim == 1: vnew[:, 0] = 0.5 * (v[:, 0, ..., 1:] + v[:, 0, ..., :-1]) @@ -638,12 +637,11 @@ def Get(self, variable, flatten=True, interior=False, average=True): + v[..., 1:, :-1, :-1] ) else: - print( - "WARNING: Topology {} for var {} cannot be averaged. Resetting.".format( + raise ValueError( + "Topology {} for var {} cannot be averaged.".format( self.varTopology[variable], variable ) ) - vnew = v.copy() v = vnew self.varData[variable] = v @@ -659,7 +657,6 @@ def Get(self, variable, flatten=True, interior=False, average=True): if self.IncludesGhost and interior: nghost = self.NGhost - print(vShape) if vShape[-1] == 1: return self.varData[variable][:] elif vShape[-2] == 1: diff --git a/src/outputs/parthenon_hdf5_types.hpp b/src/outputs/parthenon_hdf5_types.hpp index 55514e221876..6a3454b868d3 100644 --- a/src/outputs/parthenon_hdf5_types.hpp +++ b/src/outputs/parthenon_hdf5_types.hpp @@ -40,7 +40,8 @@ namespace parthenon { namespace HDF5 { -// Number of dimension of HDF5 field data sets (block x nv x nu x nt x nz x ny x nx) +// Number of dimension of HDF5 field data sets: +// (block x n_topological_elements x nv x nu x nt x nz x ny x nx) static constexpr size_t H5_NDIM = MAX_VARIABLE_DIMENSION + 1; static constexpr int OUTPUT_VERSION_FORMAT = 4; diff --git a/src/outputs/parthenon_xdmf.cpp b/src/outputs/parthenon_xdmf.cpp index 0d27d2594573..d16f3507ea00 100644 --- a/src/outputs/parthenon_xdmf.cpp +++ b/src/outputs/parthenon_xdmf.cpp @@ -117,9 +117,6 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int // Now write Grid for each block dims[0] = pm->nbtotal; - std::string dims321 = - std::to_string(nx3) + " " + std::to_string(nx2) + " " + std::to_string(nx1); - for (int ib = 0; ib < pm->nbtotal; ib++) { xdmf << " " << std::endl; xdmf << blockTopology; @@ -164,7 +161,7 @@ void genXDMF(std::string hdfFile, Mesh *pm, SimTime *tm, IndexDomain domain, int nx3 = dims[ndim - 3]; nx2 = dims[ndim - 2]; nx1 = dims[ndim - 1]; - dims321 = + std::string dims321 = std::to_string(nx3) + " " + std::to_string(nx2) + " " + std::to_string(nx1); writeXdmfSlabVariableRef(xdmf, vinfo.label, vinfo.component_labels, hdfFile, ib, diff --git a/src/outputs/restart_hdf5.cpp b/src/outputs/restart_hdf5.cpp index 43f0880127ef..a5016bc68e8d 100644 --- a/src/outputs/restart_hdf5.cpp +++ b/src/outputs/restart_hdf5.cpp @@ -201,10 +201,14 @@ void RestartReaderHDF5::ReadBlocks(const std::string &name, IndexRange range, count[0] = static_cast(range.e - range.s + 1); const IndexDomain domain = hasGhost ? IndexDomain::entire : IndexDomain::interior; + // Currently supports versions 3 and 4. if (file_output_format_version >= HDF5::OUTPUT_VERSION_FORMAT - 1) { total_dim = info.FillShape(domain, &(count[1])) + 1; } else { - PARTHENON_THROW("Unknown output format version in restart file.") + std::stringstream msg; + msg << "File format version " << file_output_format_version << " not supported. " + << "Current format is " << HDF5::OUTPUT_VERSION_FORMAT << std::endl; + PARTHENON_THROW(msg) } hsize_t total_count = 1; diff --git a/src/parthenon_manager.cpp b/src/parthenon_manager.cpp index d7cf79b95023..91a87c386e6c 100644 --- a/src/parthenon_manager.cpp +++ b/src/parthenon_manager.cpp @@ -239,11 +239,13 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { std::cout << "Blocks assigned to rank " << Globals::my_rank << ": " << nbs << ":" << nbe << std::endl; + // Currently supports versions 3 and 4. const auto file_output_format_ver = resfile.GetOutputFormatVersion(); if (file_output_format_ver < HDF5::OUTPUT_VERSION_FORMAT - 1) { - // Being extra stringent here so that we don't forget to update the machinery when - // another change happens. - PARTHENON_THROW("Deprecated file format"); + std::stringstream msg; + msg << "File format version " << file_output_format_ver << " not supported. " + << "Current format is " << HDF5::OUTPUT_VERSION_FORMAT << std::endl; + PARTHENON_THROW(msg) } // Get an iterator on block 0 for variable listing @@ -341,7 +343,10 @@ void ParthenonManager::RestartPackages(Mesh &rm, RestartReader &resfile) { v_h(topo, t, u, v, k, j, i) = tmp[index]; }); } else { - PARTHENON_THROW("Unsupported output format version in restart file.") + std::stringstream msg; + msg << "File format version " << file_output_format_ver << " not supported. " + << "Current format is " << HDF5::OUTPUT_VERSION_FORMAT << std::endl; + PARTHENON_THROW(msg) } v->data.DeepCopy(v_h); diff --git a/tst/unit/test_output_utils.cpp b/tst/unit/test_output_utils.cpp index cd637168f720..4b04f3851eba 100644 --- a/tst/unit/test_output_utils.cpp +++ b/tst/unit/test_output_utils.cpp @@ -1,6 +1,6 @@ //======================================================================================== -// Athena++ astrophysical MHD code -// Copyright(C) 2014 James M. Stone and other code contributors +// Parthenon performance portable AMR framework +// Copyright(C) 2024 The Parthenon collaboration // Licensed under the 3-clause BSD License, see LICENSE file for details //======================================================================================== // (C) (or copyright) 2024. Triad National Security, LLC. All rights reserved.