From 039aa30b395dd455824df79561b2f55ea13970fe Mon Sep 17 00:00:00 2001 From: deegan Date: Fri, 6 Feb 2026 14:51:41 +0100 Subject: [PATCH 1/7] 3d... --- pyphare/pyphare/pharesee/run/utils.py | 3 +++ src/core/data/grid/gridlayout.hpp | 23 +++++++++++++++++++++ src/diagnostic/detail/vtkh5_type_writer.hpp | 1 - tests/simulator/test_diagnostics.py | 4 ++-- 4 files changed, 28 insertions(+), 3 deletions(-) diff --git a/pyphare/pyphare/pharesee/run/utils.py b/pyphare/pyphare/pharesee/run/utils.py index b36477706..6e0a79086 100644 --- a/pyphare/pyphare/pharesee/run/utils.py +++ b/pyphare/pyphare/pharesee/run/utils.py @@ -12,6 +12,9 @@ from pyphare.core.gridlayout import yee_centering +from pyphare.core.gridlayout import yee_centering + + def _current1d(by, bz, xby, xbz): # jx = 0 # jy = -dxBz diff --git a/src/core/data/grid/gridlayout.hpp b/src/core/data/grid/gridlayout.hpp index 5dc1442a3..57be4ad26 100644 --- a/src/core/data/grid/gridlayout.hpp +++ b/src/core/data/grid/gridlayout.hpp @@ -971,6 +971,29 @@ namespace core + /** + * @brief BxToMoments return the indexes and associated coef to compute the linear + * interpolation necessary to project Bx onto moments. + */ + NO_DISCARD auto static constexpr BxToMoments() { return GridLayoutImpl::BxToMoments(); } + + + /** + * @brief ByToMoments return the indexes and associated coef to compute the linear + * interpolation necessary to project By onto moments. + */ + NO_DISCARD auto static constexpr ByToMoments() { return GridLayoutImpl::ByToMoments(); } + + + /** + * @brief BzToMoments return the indexes and associated coef to compute the linear + * interpolation necessary to project Bz onto moments. + */ + NO_DISCARD auto static constexpr BzToMoments() { return GridLayoutImpl::BzToMoments(); } + + + + /** * @brief ExToMoments return the indexes and associated coef to compute the linear * interpolation necessary to project Ex onto moments. diff --git a/src/diagnostic/detail/vtkh5_type_writer.hpp b/src/diagnostic/detail/vtkh5_type_writer.hpp index ffc865700..aac881c61 100644 --- a/src/diagnostic/detail/vtkh5_type_writer.hpp +++ b/src/diagnostic/detail/vtkh5_type_writer.hpp @@ -472,7 +472,6 @@ void H5TypeWriter::VTKFileInitializer::resize_boxes(int const ilvl) for (int i = 0; i < core::mpi::rank(); ++i) box_offset += rank_boxes[i].size(); - PHARE_LOG_SCOPE(3, "VTKFileInitializer::resize_boxes::3"); amrbox_ds.select({box_offset, 0}, {rank_boxes[core::mpi::rank()].size(), dimension * 2}) .write(hier_data.flattened_lcl_level_boxes[ilvl]); diff --git a/tests/simulator/test_diagnostics.py b/tests/simulator/test_diagnostics.py index 46a4b3e05..ede9376e6 100644 --- a/tests/simulator/test_diagnostics.py +++ b/tests/simulator/test_diagnostics.py @@ -257,9 +257,9 @@ def test_dump_elapsed_time_diags(self, dim=1, interp=1): simInput = copy.deepcopy(simArgs) # configure simulation dim sized values for key in ["cells", "dl", "boundary_types"]: - simInput[key] = [simInput[key] for d in range(dim)] + simInput[key] = [simInput[key] for d in range(ndim)] - b0 = [[10 for i in range(dim)], [19 for i in range(dim)]] + b0 = [[10 for i in range(ndim)], [19 for i in range(ndim)]] simInput["refinement_boxes"] = {"L0": {"B0": b0}} diag_path = self.unique_diag_dir_for_test_case(f"{out}/test", dim, interp) From a30d74e0807dafaa5f6888f0a13e477a3f290d94 Mon Sep 17 00:00:00 2001 From: deegan Date: Sat, 7 Feb 2026 15:18:52 +0100 Subject: [PATCH 2/7] hlld post rebase --- pyphare/pyphare/pharein/diagnostics.py | 2 +- .../pyphare/pharein/maxwellian_fluid_model.py | 2 +- pyphare/pyphare/pharein/restarts.py | 4 ++-- pyphare/pyphare/pharein/simulation.py | 20 ++++++++++++++-- pyphare/pyphare/pharein/uniform_model.py | 2 +- res/sim/all.txt | 2 ++ src/core/data/grid/gridlayout.hpp | 23 ------------------- src/diagnostic/detail/vtkh5_type_writer.hpp | 1 + .../test_ion_population_fixtures.hpp | 2 +- .../functional/mhd_convergence/convergence.py | 1 - tests/simulator/test_initialization.py | 8 +++---- 11 files changed, 31 insertions(+), 36 deletions(-) diff --git a/pyphare/pyphare/pharein/diagnostics.py b/pyphare/pyphare/pharein/diagnostics.py index 90664b7e3..4d2a08bd1 100644 --- a/pyphare/pyphare/pharein/diagnostics.py +++ b/pyphare/pyphare/pharein/diagnostics.py @@ -277,7 +277,7 @@ def to_dict(self): def population_in_model(population): - return population in [p for p in global_vars.sim.model.populations] + return population in [p for p in global_vars.sim.maxwellian_fluid_model.populations] class FluidDiagnostics_(Diagnostics): diff --git a/pyphare/pyphare/pharein/maxwellian_fluid_model.py b/pyphare/pyphare/pharein/maxwellian_fluid_model.py index 4a0f38d39..2d747bb6f 100644 --- a/pyphare/pyphare/pharein/maxwellian_fluid_model.py +++ b/pyphare/pyphare/pharein/maxwellian_fluid_model.py @@ -93,7 +93,7 @@ def __init__(self, bx=None, by=None, bz=None, **kwargs): self.validate(global_vars.sim) self.validated = True - global_vars.sim.set_model(self) + global_vars.sim.set_maxwellian_fluid_model(self) # ------------------------------------------------------------------------------ diff --git a/pyphare/pyphare/pharein/restarts.py b/pyphare/pyphare/pharein/restarts.py index aa6076031..1c5a1006c 100644 --- a/pyphare/pyphare/pharein/restarts.py +++ b/pyphare/pyphare/pharein/restarts.py @@ -59,7 +59,7 @@ def restart_time(restart_options): if restart_options["restart_time"] == "auto": return find_latest_time_from_restarts(restart_options) return restart_options["restart_time"] - return None + return 0 def find_latest_time_from_restarts(restart_options): @@ -73,7 +73,7 @@ def find_latest_time_from_restarts(restart_options): except ValueError: ... # skipped - return None if len(dirs) == 0 else sorted(dirs)[-1] + return 0 if len(dirs) == 0 else sorted(dirs)[-1] # ------------------------------------------------------------------------------ diff --git a/pyphare/pyphare/pharein/simulation.py b/pyphare/pyphare/pharein/simulation.py index 649d4a0dc..ed9503071 100644 --- a/pyphare/pyphare/pharein/simulation.py +++ b/pyphare/pyphare/pharein/simulation.py @@ -1070,7 +1070,9 @@ def __init__(self, **kwargs): self.ndim = compute_dimension(self.cells) self.diagnostics = {} - self.model = None + self.uniform_model = None + self.maxwellian_fluid_model = None + self.mhd_model = None self.electrons = None self.load_balancer = None @@ -1185,7 +1187,21 @@ def set_model(self, model): :meta private: """ - self.model = model + self.uniform_model = mhd_model + + def set_maxwellian_fluid_model(self, maxwellian_fluid_model): + """ + + :meta private: + """ + self.maxwellian_fluid_model = maxwellian_fluid_model + + def set_mhd_model(self, mhd_model): + """ + + :meta private: + """ + self.mhd_model = mhd_model def set_electrons(self, electrons): """ diff --git a/pyphare/pyphare/pharein/uniform_model.py b/pyphare/pyphare/pharein/uniform_model.py index 9d079d012..055fcf754 100644 --- a/pyphare/pyphare/pharein/uniform_model.py +++ b/pyphare/pyphare/pharein/uniform_model.py @@ -11,7 +11,7 @@ def __init__(self, b=(1.0, 0.0, 0.0), e=(0.0, 0.0, 0.0), **kwargs): if global_vars.sim.model is not None: raise RuntimeError("A model is already created") - global_vars.sim.set_model(self) + global_vars.sim.set_uniform_model(self) if len(b) != 3 or (not isinstance(b, tuple) and not isinstance(b, list)): raise ValueError("invalid B") diff --git a/res/sim/all.txt b/res/sim/all.txt index 629616c93..c56b56ded 100644 --- a/res/sim/all.txt +++ b/res/sim/all.txt @@ -31,3 +31,5 @@ 3,3,6 3,3,12 3,3,27 +2,2,4,TVDRK2,Linear,VanLeer,HLLD,false,false,false +2,2,4,TVDRK3,WENOZ,None,HLLD,false,false,false diff --git a/src/core/data/grid/gridlayout.hpp b/src/core/data/grid/gridlayout.hpp index 57be4ad26..5dc1442a3 100644 --- a/src/core/data/grid/gridlayout.hpp +++ b/src/core/data/grid/gridlayout.hpp @@ -971,29 +971,6 @@ namespace core - /** - * @brief BxToMoments return the indexes and associated coef to compute the linear - * interpolation necessary to project Bx onto moments. - */ - NO_DISCARD auto static constexpr BxToMoments() { return GridLayoutImpl::BxToMoments(); } - - - /** - * @brief ByToMoments return the indexes and associated coef to compute the linear - * interpolation necessary to project By onto moments. - */ - NO_DISCARD auto static constexpr ByToMoments() { return GridLayoutImpl::ByToMoments(); } - - - /** - * @brief BzToMoments return the indexes and associated coef to compute the linear - * interpolation necessary to project Bz onto moments. - */ - NO_DISCARD auto static constexpr BzToMoments() { return GridLayoutImpl::BzToMoments(); } - - - - /** * @brief ExToMoments return the indexes and associated coef to compute the linear * interpolation necessary to project Ex onto moments. diff --git a/src/diagnostic/detail/vtkh5_type_writer.hpp b/src/diagnostic/detail/vtkh5_type_writer.hpp index aac881c61..ffc865700 100644 --- a/src/diagnostic/detail/vtkh5_type_writer.hpp +++ b/src/diagnostic/detail/vtkh5_type_writer.hpp @@ -472,6 +472,7 @@ void H5TypeWriter::VTKFileInitializer::resize_boxes(int const ilvl) for (int i = 0; i < core::mpi::rank(); ++i) box_offset += rank_boxes[i].size(); + PHARE_LOG_SCOPE(3, "VTKFileInitializer::resize_boxes::3"); amrbox_ds.select({box_offset, 0}, {rank_boxes[core::mpi::rank()].size(), dimension * 2}) .write(hier_data.flattened_lcl_level_boxes[ilvl]); diff --git a/tests/core/data/ion_population/test_ion_population_fixtures.hpp b/tests/core/data/ion_population/test_ion_population_fixtures.hpp index 7314c9b5d..8330ae735 100644 --- a/tests/core/data/ion_population/test_ion_population_fixtures.hpp +++ b/tests/core/data/ion_population/test_ion_population_fixtures.hpp @@ -23,7 +23,7 @@ struct UsableIonsDefaultTypes public: auto static constexpr dim = ParticleArray_::dimension; auto static constexpr interp = interp_; - SimOpts static constexpr opts{dim, interp_}; + SimOpts<> static constexpr opts{dim, interp_}; using PHARE_Types = PHARE::core::PHARE_Types; using ParticleArray_t = ParticleArray_; diff --git a/tests/functional/mhd_convergence/convergence.py b/tests/functional/mhd_convergence/convergence.py index d56d8175f..5873519a8 100644 --- a/tests/functional/mhd_convergence/convergence.py +++ b/tests/functional/mhd_convergence/convergence.py @@ -152,4 +152,3 @@ def main(): if __name__ == "__main__": main() - diff --git a/tests/simulator/test_initialization.py b/tests/simulator/test_initialization.py index f5e5c82e2..d7c772965 100644 --- a/tests/simulator/test_initialization.py +++ b/tests/simulator/test_initialization.py @@ -267,7 +267,7 @@ def _test_B_is_as_provided_by_user(self, dim, interp_order, ppc=100, **kwargs): from pyphare.pharein import global_vars - model = global_vars.sim.model + model = global_vars.sim.maxwellian_fluid_model bx_fn = model.model_dict["bx"] by_fn = model.model_dict["by"] @@ -361,7 +361,7 @@ def _test_bulkvel_is_as_provided_by_user( from pyphare.pharein import global_vars - model = global_vars.sim.model + model = global_vars.sim.maxwellian_fluid_model # protons and beam have same bulk vel here so take only proton func. vx_fn = model.model_dict["protons"]["vx"] vy_fn = model.model_dict["protons"]["vy"] @@ -430,7 +430,7 @@ def _test_density_is_as_provided_by_user(self, ndim, interp_order, **kwargs): from pyphare.pharein import global_vars - model = global_vars.sim.model + model = global_vars.sim.maxwellian_fluid_model proton_density_fn = model.model_dict["protons"]["density"] beam_density_fn = model.model_dict["beam"]["density"] @@ -503,7 +503,7 @@ def _test_density_decreases_as_1overSqrtN( from pyphare.pharein import global_vars - model = global_vars.sim.model + model = global_vars.sim.maxwellian_fluid_model density_fn = model.model_dict["protons"]["density"] patch = hier.level(0).patches[0] From 99c4652129a107baecbd6b70a7880daa74fd94fa Mon Sep 17 00:00:00 2001 From: Ivan Girault Date: Wed, 10 Dec 2025 15:10:15 +0100 Subject: [PATCH 3/7] WIP Squashed commits WIP scalar -> scalar or tensor Add documentation Remove attributes relative to boundary location and physical quantity in field boundary condition definition. fix some merging issues WIP move some definitions, cosmetic changes WIP --- .gitignore | 1 + pyphare/pyphare/pharein/initialize/general.py | 7 + .../pyphare/pharein/maxwellian_fluid_model.py | 2 +- pyphare/pyphare/pharein/simulation.py | 45 ++-- pyphare/pyphare/pharein/uniform_model.py | 2 +- res/cmake/test.cmake | 3 + src/amr/CMakeLists.txt | 4 + src/amr/data/field/field_data.hpp | 4 +- src/amr/data/field/field_data_traits.hpp | 41 ++++ .../refine/field_refine_patch_strategy.hpp | 190 +++++++++++++++ .../refine/magnetic_refine_patch_strategy.hpp | 127 +++++----- .../data/tensorfield/tensor_field_data.hpp | 38 ++- .../tensorfield/tensor_field_data_traits.hpp | 98 ++++++++ .../tensorfield/tensor_field_geometry.hpp | 5 +- .../hybrid_hybrid_messenger_strategy.hpp | 68 ++++-- src/amr/messengers/messenger_factory.hpp | 15 +- src/amr/messengers/mhd_messenger.hpp | 220 ++++++++---------- src/amr/physical_models/hybrid_model.hpp | 11 + src/amr/physical_models/mhd_model.hpp | 20 +- src/amr/wrappers/hierarchy.hpp | 10 +- src/core/CMakeLists.txt | 5 + src/core/boundary/boundary.hpp | 139 +++++++++++ src/core/boundary/boundary_defs.hpp | 147 ++++++++++++ src/core/boundary/boundary_factory.hpp | 151 ++++++++++++ src/core/boundary/boundary_manager.hpp | 108 +++++++++ src/core/data/field/field_traits.hpp | 45 ++++ src/core/data/grid/gridlayout.hpp | 83 ++++++- src/core/data/grid/gridlayout_traits.hpp | 102 ++++++++ src/core/data/grid/gridlayoutdefs.hpp | 7 +- .../data/tensorfield/tensorfield_traits.hpp | 115 +++++++++ src/core/data/vecfield/vecfield_traits.hpp | 11 + ...field_antisymmetric_boundary_condition.hpp | 110 +++++++++ .../field_boundary_condition.hpp | 91 ++++++++ .../field_boundary_condition_dispatcher.hpp | 132 +++++++++++ .../field_boundary_condition_factory.hpp | 94 ++++++++ .../field_dirichlet_boundary_condition.hpp | 111 +++++++++ ..._transverse_neumann_boundary_condition.hpp | 162 +++++++++++++ .../field_neumann_boundary_condition.hpp | 101 ++++++++ .../field_symmetric_boundary_condition.hpp | 109 +++++++++ ...oundary_condition.cpp => no_boundary.hpp;} | 0 src/core/utilities/box/box.hpp | 90 +++++++ src/core/utilities/meta/meta_utilities.hpp | 43 ++++ src/core/utilities/point/point.hpp | 14 ++ .../amr/data/field/field_data/CMakeLists.txt | 31 +++ .../data/field/field_data/test_field_data.cpp | 14 ++ .../tensor_field_data/CMakeLists.txt | 31 +++ .../test_tensor_field_data.cpp | 26 +++ tests/core/data/field/CMakeLists.txt | 31 +++ tests/core/data/field/test_field.cpp | 8 + tests/core/data/gridlayout/CMakeLists.txt | 1 + .../data/gridlayout/gridlayout_traits.cpp | 0 .../test_ion_population_fixtures.hpp | 2 +- tests/core/data/tensorfield/CMakeLists.txt | 31 +++ .../data/tensorfield/test_tensorfield.cpp | 27 +++ tests/core/utilities/box/test_box.cpp | 115 ++++++++- tests/core/utilities/point/test_point.cpp | 13 +- 56 files changed, 2950 insertions(+), 261 deletions(-) create mode 100644 src/amr/data/field/field_data_traits.hpp create mode 100644 src/amr/data/field/refine/field_refine_patch_strategy.hpp create mode 100644 src/amr/data/tensorfield/tensor_field_data_traits.hpp create mode 100644 src/core/boundary/boundary.hpp create mode 100644 src/core/boundary/boundary_defs.hpp create mode 100644 src/core/boundary/boundary_factory.hpp create mode 100644 src/core/boundary/boundary_manager.hpp create mode 100644 src/core/data/field/field_traits.hpp create mode 100644 src/core/data/grid/gridlayout_traits.hpp create mode 100644 src/core/data/tensorfield/tensorfield_traits.hpp create mode 100644 src/core/data/vecfield/vecfield_traits.hpp create mode 100644 src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp create mode 100644 src/core/numerics/boundary_condition/field_boundary_condition.hpp create mode 100644 src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp create mode 100644 src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp create mode 100644 src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp create mode 100644 src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp create mode 100644 src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp create mode 100644 src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp rename src/core/numerics/boundary_condition/{boundary_condition.cpp => no_boundary.hpp;} (100%) create mode 100644 tests/amr/data/field/field_data/CMakeLists.txt create mode 100644 tests/amr/data/field/field_data/test_field_data.cpp create mode 100644 tests/amr/data/tensorfield/tensor_field_data/CMakeLists.txt create mode 100644 tests/amr/data/tensorfield/tensor_field_data/test_tensor_field_data.cpp create mode 100644 tests/core/data/field/CMakeLists.txt create mode 100644 tests/core/data/field/test_field.cpp create mode 100644 tests/core/data/gridlayout/gridlayout_traits.cpp create mode 100644 tests/core/data/tensorfield/CMakeLists.txt create mode 100644 tests/core/data/tensorfield/test_tensorfield.cpp diff --git a/.gitignore b/.gitignore index ef8557e08..d9169a9b8 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,5 @@ PHARE_REPORT.zip .cache .gdbinit .phlop +pyrightconfig.json diff --git a/pyphare/pyphare/pharein/initialize/general.py b/pyphare/pyphare/pharein/initialize/general.py index dd917a721..98417356a 100644 --- a/pyphare/pyphare/pharein/initialize/general.py +++ b/pyphare/pyphare/pharein/initialize/general.py @@ -105,6 +105,13 @@ def populateDict(sim): add_double("simulation/grid/meshsize/z", sim.dl[2]) add_string("simulation/grid/boundary_type/z", sim.boundary_types[2]) + directions = "x", "y", "z" + sides = "lower", "upper" + for direction in directions[:sim.ndim]: + for side in sides: + add_string(f"simulation/grid/boundary_conditions/{direction}{ + side}/type", sim.boundary_conditions[f"{direction}{side}"]["type"]) + add_int("simulation/interp_order", sim.interp_order) add_int("simulation/refined_particle_nbr", sim.refined_particle_nbr) add_double("simulation/time_step", sim.time_step) diff --git a/pyphare/pyphare/pharein/maxwellian_fluid_model.py b/pyphare/pyphare/pharein/maxwellian_fluid_model.py index 2d747bb6f..4a0f38d39 100644 --- a/pyphare/pyphare/pharein/maxwellian_fluid_model.py +++ b/pyphare/pyphare/pharein/maxwellian_fluid_model.py @@ -93,7 +93,7 @@ def __init__(self, bx=None, by=None, bz=None, **kwargs): self.validate(global_vars.sim) self.validated = True - global_vars.sim.set_maxwellian_fluid_model(self) + global_vars.sim.set_model(self) # ------------------------------------------------------------------------------ diff --git a/pyphare/pyphare/pharein/simulation.py b/pyphare/pyphare/pharein/simulation.py index ed9503071..546b5350d 100644 --- a/pyphare/pyphare/pharein/simulation.py +++ b/pyphare/pyphare/pharein/simulation.py @@ -240,7 +240,7 @@ def check_path(**kwargs): def check_boundaries(ndim, **kwargs): - valid_boundary_types = ("periodic",) + valid_boundary_types = ("periodic","physical") boundary_types = kwargs.get("boundary_types", ["periodic"] * ndim) phare_utilities.check_iterables(boundary_types) @@ -267,6 +267,27 @@ def check_boundaries(ndim, **kwargs): return boundary_types +# ------------------------------------------------------------------------------ + +def check_boundary_conditions(ndim, **kwargs): + valid_bc_types = ("open", "reflective") + directions = "x", "y", "z" + sides = "lower", "upper" + valid_boundary_names = [f"{directions[i]}{side}" for side in sides for i in range(ndim)] + default_boundary_conditions = {name: {"type": "open"} + for name in valid_boundary_names} + boundary_conditions = kwargs.get( + "boundary_conditions", default_boundary_conditions) + for name, condition in boundary_conditions.items(): + if not name in valid_boundary_names: + raise ValueError(f"Wrong boundary name {name}: should belong to {valid_boundary_names}") + condition_type = condition["type"] + if not condition_type in valid_bc_types: + raise ValueError( + f"Non-existing boundary condition type {condition_type}.") + return boundary_conditions + + # ------------------------------------------------------------------------------ @@ -716,6 +737,7 @@ def wrapper(simulation_object, **kwargs_in): "layout", "interp_order", "boundary_types", + "boundary_conditions", "refined_particle_nbr", "path", "nesting_buffer", @@ -786,6 +808,7 @@ def wrapper(simulation_object, **kwargs_in): ndim = compute_dimension(cells) kwargs["diag_options"] = check_diag_options(**kwargs) kwargs["boundary_types"] = check_boundaries(ndim, **kwargs) + kwargs["boundary_conditions"] = check_boundary_conditions(ndim, **kwargs) kwargs["refined_particle_nbr"] = check_refined_particle_nbr(ndim, **kwargs) kwargs["diag_export_format"] = kwargs.get("diag_export_format", "hdf5") @@ -1070,9 +1093,7 @@ def __init__(self, **kwargs): self.ndim = compute_dimension(self.cells) self.diagnostics = {} - self.uniform_model = None - self.maxwellian_fluid_model = None - self.mhd_model = None + self.model = None self.electrons = None self.load_balancer = None @@ -1187,21 +1208,7 @@ def set_model(self, model): :meta private: """ - self.uniform_model = mhd_model - - def set_maxwellian_fluid_model(self, maxwellian_fluid_model): - """ - - :meta private: - """ - self.maxwellian_fluid_model = maxwellian_fluid_model - - def set_mhd_model(self, mhd_model): - """ - - :meta private: - """ - self.mhd_model = mhd_model + self.model = model def set_electrons(self, electrons): """ diff --git a/pyphare/pyphare/pharein/uniform_model.py b/pyphare/pyphare/pharein/uniform_model.py index 055fcf754..9d079d012 100644 --- a/pyphare/pyphare/pharein/uniform_model.py +++ b/pyphare/pyphare/pharein/uniform_model.py @@ -11,7 +11,7 @@ def __init__(self, b=(1.0, 0.0, 0.0), e=(0.0, 0.0, 0.0), **kwargs): if global_vars.sim.model is not None: raise RuntimeError("A model is already created") - global_vars.sim.set_uniform_model(self) + global_vars.sim.set_model(self) if len(b) != 3 or (not isinstance(b, tuple) and not isinstance(b, list)): raise ValueError("invalid B") diff --git a/res/cmake/test.cmake b/res/cmake/test.cmake index 28f3181e3..26bd60283 100644 --- a/res/cmake/test.cmake +++ b/res/cmake/test.cmake @@ -11,6 +11,7 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests add_subdirectory(tests/core/data/grid) add_subdirectory(tests/core/data/gridlayout) add_subdirectory(tests/core/data/vecfield) + add_subdirectory(tests/core/data/tensorfield) add_subdirectory(tests/core/data/particles) add_subdirectory(tests/core/data/ions) add_subdirectory(tests/core/data/electrons) @@ -19,6 +20,7 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests add_subdirectory(tests/core/data/particle_initializer) add_subdirectory(tests/core/data/mhd_state) add_subdirectory(tests/core/utilities/box) + add_subdirectory(tests/core/utilities/point) add_subdirectory(tests/core/utilities/range) add_subdirectory(tests/core/utilities/index) add_subdirectory(tests/core/utilities/indexer) @@ -48,6 +50,7 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests add_subdirectory(tests/amr/models) add_subdirectory(tests/amr/multiphysics_integrator) add_subdirectory(tests/amr/tagging) + add_subdirectory(tests/amr/data/tensorfield/tensor_field_data) add_subdirectory(tests/diagnostic) diff --git a/src/amr/CMakeLists.txt b/src/amr/CMakeLists.txt index 786ed1a87..7c90075aa 100644 --- a/src/amr/CMakeLists.txt +++ b/src/amr/CMakeLists.txt @@ -15,6 +15,7 @@ set( SOURCES_INC data/field/coarsening/mhd_flux_coarsener.hpp data/field/field_data.hpp data/field/field_data_factory.hpp + data/field/field_data_traits.hpp data/field/field_geometry.hpp data/field/field_overlap.hpp data/field/field_variable.hpp @@ -25,10 +26,13 @@ set( SOURCES_INC data/field/refine/magnetic_field_regrider.hpp data/field/refine/electric_field_refiner.hpp data/field/refine/mhd_field_refiner.hpp + data/field/refine/field_refine_patch_strategy.hpp data/field/refine/mhd_flux_refiner.hpp data/field/refine/linear_weighter.hpp data/field/refine/field_refine_operator.hpp data/field/time_interpolate/field_linear_time_interpolate.hpp + data/tensorfield/tensor_field_data.hpp + data/tensorfield/tensor_field_data_traits.hpp resources_manager/field_resource.hpp resources_manager/particle_resource.hpp resources_manager/amr_utils.hpp diff --git a/src/amr/data/field/field_data.hpp b/src/amr/data/field/field_data.hpp index dcd0dbeca..765ea410e 100644 --- a/src/amr/data/field/field_data.hpp +++ b/src/amr/data/field/field_data.hpp @@ -41,6 +41,8 @@ namespace amr static constexpr std::size_t interp_order = GridLayoutT::interp_order; using Geometry = FieldGeometry; using gridlayout_type = GridLayoutT; + using grid_type = Grid_t; + using physical_quantity_type = PhysicalQuantity; static constexpr auto NO_ROTATE = SAMRAI::hier::Transformation::NO_ROTATE; @@ -300,7 +302,7 @@ namespace amr return patchData->gridLayout; } - + /// @warning this name is weird, as we are return a Grid and not a Field static Grid_t& getField(SAMRAI::hier::Patch const& patch, int id) { auto const& patchData = patch.getPatchData(id); diff --git a/src/amr/data/field/field_data_traits.hpp b/src/amr/data/field/field_data_traits.hpp new file mode 100644 index 000000000..304b28b8c --- /dev/null +++ b/src/amr/data/field/field_data_traits.hpp @@ -0,0 +1,41 @@ +#ifndef PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP +#define PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP + +#include +#include +#include + +namespace PHARE +{ +namespace amr +{ + /** + * @brief Concept ensuring a type satisfies the PHARE FieldData interface. + */ + template + concept IsFieldData + = std::derived_from + && requires(T a, T const ca, SAMRAI::hier::Patch const& patch) { + // Type aliases + typename T::gridlayout_type; + typename T::grid_type; + typename T::physical_quantity_type; + + // Static constexpr variables + requires std::same_as; + requires std::same_as; + + // Public member variables + requires std::same_as; + requires std::same_as; + + // API requirements + { a.getPointer() } -> std::same_as; + { T::getLayout(patch, 0) } -> std::same_as; + { T::getField(patch, 0) } -> std::same_as; + }; + +} // namespace amr +} // namespace PHARE + +#endif // PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP diff --git a/src/amr/data/field/refine/field_refine_patch_strategy.hpp b/src/amr/data/field/refine/field_refine_patch_strategy.hpp new file mode 100644 index 000000000..6c4431f24 --- /dev/null +++ b/src/amr/data/field/refine/field_refine_patch_strategy.hpp @@ -0,0 +1,190 @@ +#ifndef PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP +#define PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP + +#include "SAMRAI/geom/CartesianPatchGeometry.h" +#include "SAMRAI/hier/PatchGeometry.h" +#include "core/boundary/boundary_defs.hpp" +#include "core/boundary/boundary.hpp" +#include "core/utilities/constants.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition.hpp" + +#include "amr/data/field/field_data_traits.hpp" +#include "amr/data/tensorfield/tensor_field_data_traits.hpp" + +#include "SAMRAI/xfer/RefinePatchStrategy.h" +#include "SAMRAI/hier/BoundaryBox.h" +#include "SAMRAI/hier/Box.h" +#include "SAMRAI/hier/IntVector.h" +#include "SAMRAI/hier/PatchGeometry.h" +#include "SAMRAI/geom/CartesianPatchGeometry.h" + +#include +#include +#include + +namespace PHARE::amr +{ +using core::dirX; +using core::dirY; +using core::dirZ; + +/** + * @brief Strategy for filling physical boundary conditions and customizing patch refinment. + * + * This class implements the SAMRAI::xfer::RefinePatchStrategy interface to + * specify how physical boundary conditions must be enforced for patches that touch + * the domain boundaries. Refinement customization is deferred to child classes. + * + * @tparam ResMan The resources manager type. + * @tparam ScalarOrTensorFieldDataT The data type for fields or tensor fields. + * @tparam BoundaryManagerT Manager responsible for providing boundary condition objects. + */ +template + requires(IsFieldData || IsTensorFieldData) +class FieldRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy +{ +public: + static constexpr bool is_scalar = IsFieldData; + static constexpr bool is_tensor = !is_scalar; + + using field_geometry_type = FieldGeometrySelector::type; + using gridlayout_type = ScalarOrTensorFieldDataT::gridlayout_type; + using grid_type = ScalarOrTensorFieldDataT::grid_type; + using field_type = grid_type::field_type; + using scalar_or_tensor_field_type + = ScalarOrTensorFieldSelector::type; + + using patch_geometry_type = SAMRAI::hier::PatchGeometry; + using cartesian_patch_geometry_type = SAMRAI::geom::CartesianPatchGeometry; + + using boundary_type = BoundaryManagerT::boundary_type; + using boundary_condition_type + = core::IFieldBoundaryCondition; + + static constexpr std::size_t dimension = ScalarOrTensorFieldDataT::dimension; + + /** + * @brief Constructor. + * @param resources_manager Simulation resources manager. + * @param boundary_manager Manager handling boundary conditions. + */ + FieldRefinePatchStrategy(ResMan& resourcesManager, BoundaryManagerT& boundaryManager) + : rm_{resourcesManager} + , boundaryManager_{boundaryManager} + , data_id_{-1} + { + } + + /** + * @brief Check that the patch data identifier is registered. + */ + void assertIDsSet() const + { + assert(data_id_ >= 0 && "FieldRefinePatchStrategy: IDs must be registered before use"); + } + + /** + * @brief Register the SAMRAI patch data identifier. + * @param field_id Integer ID from the SAMRAI variable database. + */ + void registerIDs(int const field_id) { data_id_ = field_id; } + + /** + * @brief Apply physical boundary conditions via SAMRAI callback. + * + * Iterate over patch boundaries that touch a physical domain boundary and apply the appropriate + * PHARE boundary condition to ghost regions. + * + * @param patch The fine patch being refined. + * @param fill_time Simulation time for BC application. + * @param ghost_width_to_fill Width of ghost cell layer to be filled. + */ + void setPhysicalBoundaryConditions(SAMRAI::hier::Patch& patch, double const fill_time, + SAMRAI::hier::IntVector const& ghost_width_to_fill) override + { + gridlayout_type const& gridLayout = ScalarOrTensorFieldDataT::getLayout(patch, data_id_); + + // consistency check on the number of ghosts + // SAMRAI::hier::IntVector dataGhostWidths = patchData->getGhostCellWidth(); + if (ghost_width_to_fill != gridLayout.nbrGhosts()) + throw std::runtime_error("Error - inconsistent ghost cell widths"); + + // no check this is a valid cast + std::shared_ptr patchGeom + = std::static_pointer_cast(patch.getPatchGeometry()); + + std::vector const& boundaries + = patchGeom->getCodimensionBoundaries(static_cast(core::BoundaryCodim::One)); + + auto scalarOrTensorField = [&]() { + if constexpr (is_scalar) + { + return *(&(ScalarOrTensorFieldDataT::getField(patch, data_id_))); + } + else + { + return ScalarOrTensorFieldDataT::getTensorField(patch, data_id_); + }; + }(); + + // must be retrieved to pass as argument to patchGeom->getBoundaryFillBox later + SAMRAI::hier::Box const& patch_box = patch.getBox(); + + for (SAMRAI::hier::BoundaryBox const& bBox : boundaries) + { + // Boundary definitions in PHARE matches those of SAMRAI + core::BoundaryLocation const bLoc + = static_cast(bBox.getLocationIndex()); + + SAMRAI::hier::Box samraiBoxToFill + = patchGeom->getBoundaryFillBox(bBox, patch_box, ghost_width_to_fill); + auto localBox = gridLayout.AMRToLocal(phare_box_from(samraiBoxToFill)); + + std::shared_ptr boundary = boundaryManager_.getBoundary(bLoc); + if (!boundary) + throw std::runtime_error("boundary not found."); + std::shared_ptr bc + = boundary->getFieldCondition(scalarOrTensorField.physicalQuantity()); + if (!bc) + throw std::runtime_error("boundary condition not found."); + + bc->apply(scalarOrTensorField, bLoc, localBox, gridLayout, fill_time); + }; + } + + SAMRAI::hier::IntVector + getRefineOpStencilWidth(SAMRAI::tbox::Dimension const& dim) const override + { + return SAMRAI::hier::IntVector{dim, 1}; + } + + + void preprocessRefine(SAMRAI::hier::Patch& fine, SAMRAI::hier::Patch const& coarse, + SAMRAI::hier::Box const& fine_box, + SAMRAI::hier::IntVector const& ratio) override + { + } + + + void postprocessRefine(SAMRAI::hier::Patch& fine, SAMRAI::hier::Patch const& coarse, + SAMRAI::hier::Box const& fine_box, + SAMRAI::hier::IntVector const& ratio) override + { + } + + + static auto isNewFineFace(auto const& amrIdx, auto const dir) {} + + +protected: + /// Reference to the resources manager. + ResMan& rm_; + /// Reference to the boundary manager. + BoundaryManagerT& boundaryManager_; + /// SAMRAI patch data identifier. + int data_id_; +}; + +} // namespace PHARE::amr + +#endif // PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP diff --git a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp index 73741707d..c9705edb8 100644 --- a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp +++ b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp @@ -4,18 +4,11 @@ #include "core/utilities/types.hpp" #include "core/utilities/constants.hpp" -#include "amr/data/field/field_geometry.hpp" #include "amr/utilities/box/amr_box.hpp" #include "amr/data/field/field_geometry.hpp" #include "amr/resources_manager/amr_utils.hpp" - - -#include "SAMRAI/xfer/RefinePatchStrategy.h" -#include "core/utilities/types.hpp" - -#include -#include -#include +#include "amr/data/tensorfield/tensor_field_data_traits.hpp" +#include "amr/data/field/refine/field_refine_patch_strategy.hpp" namespace PHARE::amr { @@ -23,66 +16,61 @@ using core::dirX; using core::dirY; using core::dirZ; -template -class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy +/** + * @brief Strategy for magnetic field refinement in AMR patches. + * + * Implement Divergence-Preserving refinement (Toth and Roe, 2002) to + * ensure the null divergence property of the magnetic field across refinement levels. + * + * @tparam ResManT Resource manager type. + * @tparam VecFieldDataT Vector field data type. + * @tparam BoundaryManagerT Boundary manager type. + */ +template +class MagneticRefinePatchStrategy + : public FieldRefinePatchStrategy { public: - using Geometry = TensorFieldDataT::Geometry; - using gridlayout_type = TensorFieldDataT::gridlayout_type; - - static constexpr std::size_t N = TensorFieldDataT::N; - static constexpr std::size_t dimension = TensorFieldDataT::dimension; - - MagneticRefinePatchStrategy(ResMan& resourcesManager) - : rm_{resourcesManager} - , b_id_{-1} - { - } - - void assertIDsSet() const - { - assert(b_id_ >= 0 && "MagneticRefinePatchStrategy: IDs must be registered before use"); - } - - void registerIDs(int const b_id) { b_id_ = b_id; } - - void setPhysicalBoundaryConditions(SAMRAI::hier::Patch& patch, double const fill_time, - SAMRAI::hier::IntVector const& ghost_width_to_fill) override - { - } - - SAMRAI::hier::IntVector - getRefineOpStencilWidth(SAMRAI::tbox::Dimension const& dim) const override - { - return SAMRAI::hier::IntVector(dim, 1); // hard-coded 0th order base interpolation - } - - - void preprocessRefine(SAMRAI::hier::Patch& fine, SAMRAI::hier::Patch const& coarse, - SAMRAI::hier::Box const& fine_box, - SAMRAI::hier::IntVector const& ratio) override + using geometry_type = VecFieldDataT::Geometry; + using field_geometry_type = geometry_type::FieldGeometry_t; + using gridlayout_type = VecFieldDataT::gridlayout_type; + using Super = FieldRefinePatchStrategy; + + static constexpr size_t dimension = VecFieldDataT::dimension; + static constexpr size_t N = VecFieldDataT::N; + + using Super::data_id_; + + /** + * @brief Construct the magnetic refinement strategy. + * @param resources_manager Simulation resources manager. + * @param boundary_manager Manager handling boundary conditions. + */ + MagneticRefinePatchStrategy(ResManT& resourcesManager, BoundaryManagerT& boundaryManager) + : Super(resourcesManager, boundaryManager) { } - - // We compute the values of the new fine magnetic faces using what was already refined, ie - // the values on the old coarse faces. + /** + * @brief Compute fine magnetic face values using refined coarse faces. + * + * We compute the values of the new fine magnetic faces using what was already refined, ie + * the values on the old coarse faces. + */ void postprocessRefine(SAMRAI::hier::Patch& fine, SAMRAI::hier::Patch const& coarse, SAMRAI::hier::Box const& fine_box, SAMRAI::hier::IntVector const& ratio) override { - assertIDsSet(); + Super::assertIDsSet(); - auto& fields = TensorFieldDataT::getFields(fine, b_id_); + auto& fields = VecFieldDataT::getFields(fine, data_id_); auto& [bx, by, bz] = fields; auto layout = PHARE::amr::layoutFromPatch(fine); - auto fineBoxLayout = Geometry::layoutFromBox(fine_box, layout); + auto fineBoxLayout = geometry_type::layoutFromBox(fine_box, layout); auto const fine_field_box = core::for_N_make_array([&](auto i) { - using PhysicalQuantity = std::decay_t; - - return FieldGeometry::toFieldBox( - fine_box, fields[i].physicalQuantity(), fineBoxLayout); + return field_geometry_type::toFieldBox(fine_box, fields[i].physicalQuantity(), + fineBoxLayout); }); if constexpr (dimension == 1) @@ -161,8 +149,8 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy // modifying, but dual for the field we are indexing to compute // second and third order terms, then the formula reduces to offset // = 1 - int const xoffset = 1; - int const yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; + int xoffset = 1; + int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; bx(ix, iy) = 0.5 * (bx(ix - 1, iy) + bx(ix + 1, iy)) + 0.25 @@ -184,8 +172,8 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy // | if (isNewFineFace(idx, dirY)) { - int const xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; - int const yoffset = 1; + int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; + int yoffset = 1; by(ix, iy) = 0.5 * (by(ix, iy - 1) + by(ix, iy + 1)) + 0.25 @@ -210,9 +198,9 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy if (isNewFineFace(idx, dirX)) { - int const xoffset = 1; - int const yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; - int const zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1; + int xoffset = 1; + int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; + int zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1; bx(ix, iy, iz) = 0.5 * (bx(ix - 1, iy, iz) + bx(ix + 1, iy, iz)) @@ -269,9 +257,9 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy if (isNewFineFace(idx, dirY)) { - int const xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; - int const yoffset = 1; - int const zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1; + int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; + int yoffset = 1; + int zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1; by(ix, iy, iz) = 0.5 * (by(ix, iy - 1, iz) + by(ix, iy + 1, iz)) @@ -328,9 +316,9 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy if (isNewFineFace(idx, dirZ)) { - int const xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; - int const yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; - int const zoffset = 1; + int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1; + int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1; + int zoffset = 1; bz(ix, iy, iz) = 0.5 * (bz(ix, iy, iz - 1) + bz(ix, iy, iz + 1)) @@ -387,9 +375,6 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy // different offset for indexing and applying the +-1 factor to the // third order terms. That's the job of the ijk_factor_ array. static constexpr std::array ijk_factor_{-1, 1}; - - ResMan& rm_; - int b_id_; }; } // namespace PHARE::amr diff --git a/src/amr/data/tensorfield/tensor_field_data.hpp b/src/amr/data/tensorfield/tensor_field_data.hpp index 2c50728b1..0e41f799b 100644 --- a/src/amr/data/tensorfield/tensor_field_data.hpp +++ b/src/amr/data/tensorfield/tensor_field_data.hpp @@ -5,16 +5,20 @@ #include "core/logger.hpp" #include "core/data/field/field_box.hpp" +#include "core/data/grid/gridlayoutdefs.hpp" #include "core/data/tensorfield/tensorfield.hpp" #include "amr/data/field/field_geometry.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/data/tensorfield/tensor_field_overlap.hpp" +#include "amr/resources_manager/amr_utils.hpp" +#include "amr/data/field/field_overlap.hpp" #include "amr/data/tensorfield/tensor_field_geometry.hpp" #include #include +#include #include @@ -43,19 +47,19 @@ class TensorFieldData : public SAMRAI::hier::PatchData [&](auto i) { return Grid_t{compNames[i], qts[i], layout.allocSize(qts[i])}; }); } -public: - using value_type = Grid_t::value_type; - -private: - using SetEqualOp = core::Equals; + using SetEqualOp = core::Equals; public: static constexpr std::size_t dimension = GridLayoutT::dimension; static constexpr std::size_t interp_order = GridLayoutT::interp_order; static constexpr auto N = core::detail::tensor_field_dim_from_rank(); - using Geometry = TensorFieldGeometry; - using gridlayout_type = GridLayoutT; + using Geometry = TensorFieldGeometry; + using gridlayout_type = GridLayoutT; + using grid_type = Grid_t; + using value_type = Grid_t::value_type; + using field_type = typename Grid_t::field_type; + using tensor_field_type = core::TensorField; /*** \brief Construct a TensorFieldData from information associated to a patch * @@ -69,6 +73,7 @@ class TensorFieldData : public SAMRAI::hier::PatchData , gridLayout{layout} , grids{make_grids(core::detail::tensor_field_names(name), layout, qty)} , quantity_{qty} + , name_{name} { } @@ -336,6 +341,22 @@ class TensorFieldData : public SAMRAI::hier::PatchData return patchData->grids; } + /** + * @brief Get a TensorField associated to data with @p id on @p patch. + * + * @param patch the AMR patch + * @param id the resource index of the data + * @return a tensor field + **/ + static tensor_field_type getTensorField(SAMRAI::hier::Patch const& patch, int const id) + { + auto const& patchData = std::dynamic_pointer_cast(patch.getPatchData(id)); + if (!patchData) + throw std::runtime_error("cannot cast to TensorFieldData"); + tensor_field_type tensorField{patchData->name_, patchData->quantity_}; + tensorField.setBuffer(&patchData->grids); + return tensorField; + } template void operate(SAMRAI::hier::PatchData const& src, SAMRAI::hier::BoxOverlap const& overlap); @@ -344,11 +365,13 @@ class TensorFieldData : public SAMRAI::hier::PatchData SAMRAI::hier::BoxOverlap const& overlap); + GridLayoutT gridLayout; std::array grids; private: tensor_t quantity_; ///! PhysicalQuantity used for this field data + std::string name_; @@ -483,6 +506,7 @@ class TensorFieldData : public SAMRAI::hier::PatchData + template template void TensorFieldData::unpackStreamAnd( diff --git a/src/amr/data/tensorfield/tensor_field_data_traits.hpp b/src/amr/data/tensorfield/tensor_field_data_traits.hpp new file mode 100644 index 000000000..5e372f3d7 --- /dev/null +++ b/src/amr/data/tensorfield/tensor_field_data_traits.hpp @@ -0,0 +1,98 @@ +#ifndef PHARE_SRC_AMR_TENSOR_FIELD_TENSOR_FIELD_DATA_TRAITS_HPP +#define PHARE_SRC_AMR_TENSOR_FIELD_TENSOR_FIELD_DATA_TRAITS_HPP + +#include +#include + +#include "SAMRAI/hier/PatchData.h" +#include "SAMRAI/hier/Patch.h" + +namespace PHARE +{ +namespace amr +{ + /** + * @brief Concept ensuring a type satisfies the PHARE FieldData interface. + */ + template + concept IsTensorFieldData + = std::derived_from + && requires(T a, T const ca, SAMRAI::hier::Patch const& patch) { + // Type aliases + typename T::gridlayout_type; + typename T::grid_type; + typename T::tensor_field_type; + + // Static constexpr variables + requires std::same_as; + requires std::same_as; + requires std::same_as; + + // Public member variables + requires std::same_as; + requires std::same_as>; + + // API requirements + { a.getPointer() } -> std::same_as*>; + { T::getLayout(patch, 0) } -> std::same_as; + { + T::getFields(patch, 0) + } -> std::same_as&>; + { T::getTensorField(patch, 0) } -> std::same_as; + }; + + template + concept IsVecFieldData = IsTensorFieldData && (T::N == 3); + + /** + * @brief Compile-time utility to select the correct Geometry type. + * @tparam ScalarOrTensorFieldDataT The data structure representing the field data. + * @tparam is_scalar Boolean flag; true if the field is a scalar, false if it is a tensor. + */ + template + struct FieldGeometrySelector; + /** + * @brief Specialization for scalar field data + */ + template + struct FieldGeometrySelector + { + using type = ScalarOrTensorFieldDataT::Geometry; + }; + /** + * @brief Specialization for tensor field data + */ + template + struct FieldGeometrySelector + { + using type = ScalarOrTensorFieldDataT::Geometry::FieldGeometry_t; + }; + + /** + * @brief Compile-time utility to select the Field or TensorField type. + * @tparam ScalarOrTensorFieldDataT The data structure representing the field data. + * @tparam is_scalar Boolean flag; true if the field is a scalar, false if it is a tensor. + */ + template + struct ScalarOrTensorFieldSelector; + /** + * @brief Specialization for scalar field data + */ + template + struct ScalarOrTensorFieldSelector + { + using type = ScalarOrTensorFieldDataT::grid_type::field_type; + }; + /** + * @brief Specialization for tensor field data + */ + template + struct ScalarOrTensorFieldSelector + { + using type = ScalarOrTensorFieldDataT::tensor_field_type; + }; + +} // namespace amr +} // namespace PHARE + +#endif // PHARE_SRC_AMR_TENSOR_FIELD_TENSOR_FIELD_DATA_TRAITS_HPP diff --git a/src/amr/data/tensorfield/tensor_field_geometry.hpp b/src/amr/data/tensorfield/tensor_field_geometry.hpp index 328c0ec6e..ab6d56230 100644 --- a/src/amr/data/tensorfield/tensor_field_geometry.hpp +++ b/src/amr/data/tensorfield/tensor_field_geometry.hpp @@ -69,9 +69,12 @@ class TensorFieldGeometryBase : public SAMRAI::hier::BoxGeometry template class TensorFieldGeometry : public TensorFieldGeometryBase { - using tensor_t = typename PhysicalQuantity::template TensorType; +public: using FieldGeometry_t = FieldGeometry; +private: + using tensor_t = typename PhysicalQuantity::template TensorType; + auto static make_geoms(SAMRAI::hier::Box const& box, GridLayoutT const& layout, tensor_t const qty) { diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp index a808da472..b846002fa 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp @@ -1,26 +1,35 @@ #ifndef PHARE_HYBRID_HYBRID_MESSENGER_STRATEGY_HPP #define PHARE_HYBRID_HYBRID_MESSENGER_STRATEGY_HPP - -#include "core/def.hpp" // IWYU pragma: keep +#include "core/def.hpp" #include "core/logger.hpp" #include "core/def/phare_mpi.hpp" // IWYU pragma: keep #include "core/utilities/types.hpp" #include "core/hybrid/hybrid_quantities.hpp" #include "core/numerics/interpolator/interpolator.hpp" +#include "core/utilities/types.hpp" + +#include "core/utilities/types.hpp" +#include "refiner_pool.hpp" +#include "synchronizer_pool.hpp" +#include "amr/data/field/coarsening/moments_coarsener.hpp" #include "amr/types/amr_types.hpp" #include "amr/messengers/messenger_info.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/data/field/refine/field_refiner.hpp" +#include "amr/data/field/refine/field_moments_refiner.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" #include "amr/messengers/hybrid_messenger_strategy.hpp" +#include "amr/data/field/coarsening/electric_field_coarsener.hpp" #include "amr/data/field/field_variable_fill_pattern.hpp" #include "amr/data/field/coarsening/moments_coarsener.hpp" #include "amr/data/field/refine/field_moments_refiner.hpp" #include "amr/data/field/refine/field_refine_operator.hpp" #include "amr/data/field/refine/electric_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_field_init_refiner.hpp" #include "amr/data/field/refine/magnetic_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" #include "amr/data/field/refine/magnetic_field_regrider.hpp" #include "amr/data/field/coarsening/field_coarsen_operator.hpp" #include "amr/data/field/refine/magnetic_field_init_refiner.hpp" @@ -30,15 +39,21 @@ #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" #include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include "refiner_pool.hpp" -#include "synchronizer_pool.hpp" +#include "core/utilities/index/index.hpp" +#include "core/numerics/interpolator/interpolator.hpp" +#include "core/hybrid/hybrid_quantities.hpp" +#include "core/data/particles/particle_array.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/utilities/point/point.hpp" + +#include "SAMRAI/xfer/RefineAlgorithm.h" +#include "SAMRAI/xfer/RefineSchedule.h" +#include "SAMRAI/xfer/CoarsenAlgorithm.h" +#include "SAMRAI/xfer/CoarsenSchedule.h" +#include "SAMRAI/xfer/BoxGeometryVariableFillPattern.h" +#include "SAMRAI/hier/CoarseFineBoundary.h" +#include "SAMRAI/hier/IntVector.h" -#include -#include -#include -#include -#include -#include #include #include @@ -74,6 +89,7 @@ namespace amr using FieldT = VecFieldT::field_type; using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::HybridQuantity>; using ResourcesManagerT = HybridModel::resources_manager_type; + using BoundaryManagerT = HybridModel::boundary_manager_type; using IPhysicalModel = HybridModel::Interface; static constexpr std::size_t dimension = GridLayoutT::dimension; @@ -119,10 +135,12 @@ namespace amr static constexpr std::size_t rootLevelNumber = 0; - HybridHybridMessengerStrategy(std::shared_ptr const& manager, + HybridHybridMessengerStrategy(std::shared_ptr const& resourcesManager, + std::shared_ptr const& boundaryManager, int const firstLevel) : HybridMessengerStrategy{stratName} - , resourcesManager_{manager} + , resourcesManager_{resourcesManager} + , boundaryManager_{boundaryManager} , firstLevel_{firstLevel} { resourcesManager_->registerResources(Jold_); @@ -385,7 +403,6 @@ namespace amr magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); } - void fillElectricGhosts(VecFieldT& E, level_t const& level, double const fillTime) override { PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillElectricGhosts"); @@ -576,7 +593,6 @@ namespace amr // velLevelGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); // } - /** * @brief firstStep : in the HybridHybridMessengerStrategy, the firstStep method is * used to get level border ghost particles from the next coarser level. These @@ -730,7 +746,6 @@ namespace amr ionBulkVelSynchronizers_.sync(levelNumber); } - // this function coarsens the fluxSum onto the corresponding coarser fluxes (E in hybrid), // and fills the patch ghosts, making it ready for the faraday in the solver.reflux() void reflux(int const coarserLevelNumber, int const fineLevelNumber, @@ -776,8 +791,8 @@ namespace amr // we need a separate patch strategy for each refiner so that each one can register // their required ids magneticPatchStratPerGhostRefiner_ = [&]() { - std::vector>> + std::vector>> result; result.reserve(info->ghostMagnetic.size()); @@ -786,9 +801,9 @@ namespace amr { auto&& [id] = resourcesManager_->getIDsList(key); - auto patch_strat = std::make_shared< - MagneticRefinePatchStrategy>( - *resourcesManager_); + auto patch_strat = std::make_shared>(*resourcesManager_, + *boundaryManager_); patch_strat->registerIDs(id); @@ -869,11 +884,13 @@ namespace amr for (auto const& vecfield : info->ghostFlux) + { popFluxBorderSumRefiners_.emplace_back(resourcesManager_) .addStaticRefiner( sumVec_.name(), vecfield, nullptr, sumVec_.name(), std::make_shared< TensorFieldGhostInterpOverlapFillPattern>()); + } for (auto const& field : info->sumBorderFields) popDensityBorderSumRefiners_.emplace_back(resourcesManager_) @@ -1023,6 +1040,8 @@ namespace amr //! ResourceManager shared with other objects (like the HybridModel) std::shared_ptr resourcesManager_; + std::shared_ptr boundaryManager_; + int const firstLevel_; std::unordered_map beforePushCoarseTime_; @@ -1036,7 +1055,6 @@ namespace amr // these refiners are used to initialize electromagnetic fields when creating // a new level (initLevel) or regridding (regrid) - using InitRefinerPool = RefinerPool; using GhostRefinerPool = RefinerPool; using InitDomPartRefinerPool = RefinerPool; @@ -1167,11 +1185,11 @@ namespace amr std::make_shared()}; CoarsenOperator_ptr electricFieldCoarseningOp_{std::make_shared()}; - MagneticRefinePatchStrategy - magneticRefinePatchStrategy_{*resourcesManager_}; + MagneticRefinePatchStrategy + magneticRefinePatchStrategy_{*resourcesManager_, *boundaryManager_}; - std::vector< - std::shared_ptr>> + std::vector>> magneticPatchStratPerGhostRefiner_; }; diff --git a/src/amr/messengers/messenger_factory.hpp b/src/amr/messengers/messenger_factory.hpp index e92efd27b..83cbd5a94 100644 --- a/src/amr/messengers/messenger_factory.hpp +++ b/src/amr/messengers/messenger_factory.hpp @@ -84,10 +84,12 @@ class MessengerFactory { if (messengerName == HybridHybridMessengerStrategy_t::stratName) { - auto& resourcesManager = dynamic_cast(coarseModel).resourcesManager; + auto hybridCoarseModel = dynamic_cast(coarseModel); + auto& resourcesManager = hybridCoarseModel.resourcesManager; + auto& boundaryManager = hybridCoarseModel.boundaryManager; - auto messengerStrategy - = std::make_unique(resourcesManager, firstLevel); + auto messengerStrategy = std::make_unique( + resourcesManager, boundaryManager, firstLevel); return std::make_unique>(std::move(messengerStrategy)); } @@ -116,9 +118,12 @@ class MessengerFactory else if (messengerName == MHDMessenger::stratName) { - auto& mhdResourcesManager = dynamic_cast(coarseModel).resourcesManager; + auto mhdCoarseModel = dynamic_cast(coarseModel); + auto& mhdResourcesManager = mhdCoarseModel.resourcesManager; + auto& mhdBoundaryManager = mhdCoarseModel.boundaryManager; - return std::make_unique>(mhdResourcesManager, firstLevel); + return std::make_unique>(mhdResourcesManager, mhdBoundaryManager, + firstLevel); } else return {}; diff --git a/src/amr/messengers/mhd_messenger.hpp b/src/amr/messengers/mhd_messenger.hpp index ffacaa24f..2320b1960 100644 --- a/src/amr/messengers/mhd_messenger.hpp +++ b/src/amr/messengers/mhd_messenger.hpp @@ -17,17 +17,15 @@ #include "amr/messengers/messenger.hpp" #include "amr/messengers/messenger_info.hpp" #include "amr/messengers/mhd_messenger_info.hpp" +#include "amr/data/field/refine/field_refine_patch_strategy.hpp" #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" #include "amr/data/field/field_variable_fill_pattern.hpp" -#include "core/data/vecfield/vecfield.hpp" #include "core/mhd/mhd_quantities.hpp" -#include "core/def/phare_mpi.hpp" #include "SAMRAI/hier/CoarsenOperator.h" #include "SAMRAI/hier/PatchLevel.h" #include "SAMRAI/hier/RefineOperator.h" -#include "SAMRAI/hier/CoarseFineBoundary.h" #include #include @@ -38,9 +36,6 @@ #include -#include -#include - namespace PHARE { namespace amr @@ -60,6 +55,8 @@ namespace amr using GridLayoutT = MHDModel::gridlayout_type; using GridT = MHDModel::grid_type; using ResourcesManagerT = MHDModel::resources_manager_type; + using BoundaryManagerT = MHDModel::boundary_manager_type; + using FieldDataT = FieldData; using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::MHDQuantity>; static constexpr auto dimension = MHDModel::dimension; @@ -68,9 +65,10 @@ namespace amr static constexpr std::size_t rootLevelNumber = 0; static inline std::string const stratName = "MHDModel-MHDModel"; - MHDMessenger(std::shared_ptr resourcesManager, - int const firstLevel) + MHDMessenger(std::shared_ptr resourcesManager, + std::shared_ptr boundaryManager, int const firstLevel) : resourcesManager_{std::move(resourcesManager)} + , boundaryManager_{std::move(boundaryManager)} , firstLevel_{firstLevel} { // moment ghosts are primitive quantities @@ -135,8 +133,8 @@ namespace amr "MHDMessengerStrategy: missing electric field variable IDs"); } - // EalgoPatchGhost.registerRefine(*e_id, *e_id, *e_id, EfieldRefineOp_, - // nonOverwriteInteriorTFfillPattern); + EalgoPatchGhost.registerRefine(*e_id, *e_id, *e_id, EfieldRefineOp_, + nonOverwriteInteriorTFfillPattern); // refluxing // we first want to coarsen the flux sum onto the coarser level @@ -292,10 +290,10 @@ namespace amr { auto const level = hierarchy->getPatchLevel(levelNumber); - // magPatchGhostsRefineSchedules[levelNumber] - // = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); + magPatchGhostsRefineSchedules[levelNumber] + = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); - // elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); + elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); EpatchGhostRefluxedSchedules[levelNumber] = EpatchGhostRefluxedAlgo.createSchedule(level); @@ -321,8 +319,6 @@ namespace amr magFluxesZGhostRefiners_.registerLevel(hierarchy, level); magGhostsRefiners_.registerLevel(hierarchy, level); - magMaxRefiners_.registerLevel(hierarchy, level); - magMaxModelRefiners_.registerLevel(hierarchy, level); if (levelNumber != rootLevelNumber) { @@ -358,8 +354,6 @@ namespace amr bool isRegriddingL0 = levelNumber == 0 and oldLevel; magneticRegriding_(hierarchy, level, oldLevel, initDataTime); - magMaxModelRefiners_.fill(mhdModel.state.B, level->getLevelNumber(), initDataTime); - densityInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); momentumInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); totalEnergyInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); @@ -503,7 +497,6 @@ namespace amr setNaNsOnVecfieldGhosts(B, level); magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); - magMaxRefiners_.fill(B, level.getLevelNumber(), fillTime); } void fillCurrentGhosts(VecFieldT& J, level_t const& level, double const fillTime) @@ -517,102 +510,95 @@ namespace amr private: + using rm_t = typename MHDModel::resources_manager_type; + using InitRefinerPool = RefinerPool; + using GhostRefinerPool = RefinerPool; + using InitDomPartRefinerPool = RefinerPool; + using FieldRefinePatchStrategyT + = FieldRefinePatchStrategy; + using VectorFieldRefinePatchStrategyT + = FieldRefinePatchStrategy; + using MagneticRefinePatchStrategyT + = MagneticRefinePatchStrategy; + using FieldRefinePatchStrategyList + = std::vector>; + using VectorFieldRefinePatchStrategyList + = std::vector>; + using MagneticRefinePatchStrategyList + = std::vector>; + + // Maybe we also need conservative ghost refiners for amr operations, actually quite // likely void registerGhostComms_(std::unique_ptr const& info) { - // static refinement for J and E because in MHD they are temporaries, so keeping there + // static refinement for J because in MHD it is a temporary, so keeping its // state updated after each regrid is not a priority. However if we do not correctly // refine on regrid, the post regrid state is not up to date (in our case it will be nan // since we nan-initialise) and thus is is better to rely on static refinement, which // uses the state after computation of ampere or CT. - elecGhostsRefiners_.addStaticRefiners(info->ghostElectric, EfieldRefineOp_, - info->ghostElectric, - nonOverwriteInteriorTFfillPattern); - - currentGhostsRefiners_.addStaticRefiners(info->ghostCurrent, EfieldRefineOp_, - info->ghostCurrent, - nonOverwriteInteriorTFfillPattern); - - - rhoGhostsRefiners_.addTimeRefiners(info->ghostDensity, info->modelDensity, - rhoOld_.name(), mhdFieldRefineOp_, fieldTimeOp_, - nonOverwriteFieldFillPattern); - - - // velGhostsRefiners_.addTimeRefiners(info->ghostVelocity, info->modelVelocity, - // Vold_.name(), mhdVecFieldRefineOp_, - // vecFieldTimeOp_, - // nonOverwriteInteriorTFfillPattern); - // - // pressureGhostsRefiners_.addTimeRefiners(info->ghostPressure, info->modelPressure, - // Pold_.name(), mhdFieldRefineOp_, - // fieldTimeOp_, nonOverwriteFieldFillPattern); - - momentumGhostsRefiners_.addTimeRefiners( - info->ghostMomentum, info->modelMomentum, rhoVold_.name(), mhdVecFieldRefineOp_, - vecFieldTimeOp_, nonOverwriteInteriorTFfillPattern); - - totalEnergyGhostsRefiners_.addTimeRefiners( - info->ghostTotalEnergy, info->modelTotalEnergy, EtotOld_.name(), mhdFieldRefineOp_, - fieldTimeOp_, nonOverwriteFieldFillPattern); - - magFluxesXGhostRefiners_.addStaticRefiners( - info->ghostMagneticFluxesX, mhdVecFluxRefineOp_, info->ghostMagneticFluxesX, - nonOverwriteInteriorTFfillPattern); - - magFluxesYGhostRefiners_.addStaticRefiners( - info->ghostMagneticFluxesY, mhdVecFluxRefineOp_, info->ghostMagneticFluxesY, - nonOverwriteInteriorTFfillPattern); - - magFluxesZGhostRefiners_.addStaticRefiners( - info->ghostMagneticFluxesZ, mhdVecFluxRefineOp_, info->ghostMagneticFluxesZ, - nonOverwriteInteriorTFfillPattern); - - // we need a separate patch strategy for each refiner so that each one can register - // their required ids - magneticPatchStratPerGhostRefiner_ = [&]() { - std::vector>> - result; - - result.reserve(info->ghostMagnetic.size()); - - for (auto const& key : info->ghostMagnetic) - { - auto&& [id] = resourcesManager_->getIDsList(key); - - auto patch_strat = std::make_shared< - MagneticRefinePatchStrategy>( - *resourcesManager_); - - patch_strat->registerIDs(id); - - result.push_back(patch_strat); - } - return result; - }(); + registerGhostRefinePatchStrategies_(currentPatchStratPerGhostRefiner_, + info->ghostCurrent); + for (size_t i = 0; i < info->ghostCurrent.size(); ++i) + currentGhostsRefiners_.addStaticRefiner( + info->ghostCurrent[i], EfieldRefineOp_, info->ghostCurrent[i], + nonOverwriteInteriorTFfillPattern, currentPatchStratPerGhostRefiner_[i]); + + + registerGhostRefinePatchStrategies_(rhoPatchStratPerGhostRefiner_, info->ghostDensity); + for (size_t i = 0; i < info->ghostDensity.size(); ++i) + rhoGhostsRefiners_.addTimeRefiner( + info->ghostDensity[i], info->modelDensity, rhoOld_.name(), mhdFieldRefineOp_, + fieldTimeOp_, info->ghostDensity[i], nonOverwriteFieldFillPattern, + rhoPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(momentumPatchStratPerGhostRefiner_, + info->ghostMomentum); + for (size_t i = 0; i < info->ghostMomentum.size(); ++i) + momentumGhostsRefiners_.addTimeRefiner( + info->ghostMomentum[i], info->modelMomentum, rhoVold_.name(), + mhdVecFieldRefineOp_, vecFieldTimeOp_, info->ghostMomentum[i], + nonOverwriteInteriorTFfillPattern, momentumPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(totalEnergyPatchStratPerGhostRefiner_, + info->ghostTotalEnergy); + for (size_t i = 0; i < info->ghostTotalEnergy.size(); ++i) + totalEnergyGhostsRefiners_.addTimeRefiner( + info->ghostTotalEnergy[i], info->modelTotalEnergy, EtotOld_.name(), + mhdFieldRefineOp_, fieldTimeOp_, info->ghostTotalEnergy[i], + nonOverwriteFieldFillPattern, totalEnergyPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(magPatchStratPerGhostRefiner_, info->ghostMagnetic); for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) - { magGhostsRefiners_.addStaticRefiner( info->ghostMagnetic[i], BfieldRegridOp_, info->ghostMagnetic[i], - nonOverwriteInteriorTFfillPattern, magneticPatchStratPerGhostRefiner_[i]); - - magMaxRefiners_.addStaticRefiner( - info->ghostMagnetic[i], info->ghostMagnetic[i], nullptr, info->ghostMagnetic[i], - std::make_shared< - TensorFieldGhostInterpOverlapFillPattern>()); - } - - magMaxModelRefiners_.addStaticRefiner( - info->modelMagnetic, info->modelMagnetic, nullptr, info->modelMagnetic, - std::make_shared< - TensorFieldGhostInterpOverlapFillPattern>()); + nonOverwriteInteriorTFfillPattern, magPatchStratPerGhostRefiner_[i]); } - + /** + * @brief Register a list of refine patch strategy pointers corresponding to a list of keys. + * + * @tparam RefinePatchStrategyT type inheriting from SAMRAI's `RefinePatchStrategy` + * @param patchStrategies the list of refine patch strategy pointers. + * @param keys the list of keys. + */ + template + void registerGhostRefinePatchStrategies_( + std::vector>& patchStrategies, + std::vector const& keys) + { + patchStrategies.reserve(keys.size()); + for (auto const& key : keys) + { + auto&& [id] = resourcesManager_->getIDsList(key); + auto patchStrat + = std::make_shared(*resourcesManager_, *boundaryManager_); + patchStrat->registerIDs(id); + patchStrategies.push_back(patchStrat); + } + } // should this use conservative quantities ? When should we do the initial conversion ? // Maybe mhd_init @@ -636,7 +622,6 @@ namespace amr auto magSchedule = BregridAlgo.createSchedule( level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy, &magneticRefinePatchStrategy_); - magSchedule->fillData(initDataTime); } @@ -700,28 +685,20 @@ namespace amr VecFieldT Jold_{stratName + "Jold", core::MHDQuantity::Vector::J}; - - using rm_t = typename MHDModel::resources_manager_type; std::shared_ptr resourcesManager_; + std::shared_ptr boundaryManager_; int const firstLevel_; - using InitRefinerPool = RefinerPool; - using GhostRefinerPool = RefinerPool; - using InitDomPartRefinerPool = RefinerPool; - using VecFieldGhostMaxRefinerPool = RefinerPool; - - SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; // + SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; SAMRAI::xfer::RefineAlgorithm BalgoInit; SAMRAI::xfer::RefineAlgorithm BregridAlgo; - SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; // + SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; std::map> magInitRefineSchedules; - std::map> magGhostsRefineSchedules; // - std::map> - magPatchGhostsRefineSchedules; // + std::map> magGhostsRefineSchedules; + std::map> magPatchGhostsRefineSchedules; std::map> elecPatchGhostsRefineSchedules; - std::map> - magSharedNodeRefineSchedules; // + std::map> magSharedNodeRefineSchedules; SAMRAI::xfer::CoarsenAlgorithm ErefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; SAMRAI::xfer::CoarsenAlgorithm HydroXrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; @@ -758,8 +735,6 @@ namespace amr GhostRefinerPool magFluxesZGhostRefiners_{resourcesManager_}; GhostRefinerPool magGhostsRefiners_{resourcesManager_}; - VecFieldGhostMaxRefinerPool magMaxRefiners_{resourcesManager_}; - VecFieldGhostMaxRefinerPool magMaxModelRefiners_{resourcesManager_}; InitRefinerPool densityInitRefiners_{resourcesManager_}; InitRefinerPool momentumInitRefiners_{resourcesManager_}; @@ -836,12 +811,15 @@ namespace amr CoarsenOp_ptr mhdVecFluxCoarseningOp_{std::make_shared()}; CoarsenOp_ptr electricFieldCoarseningOp_{std::make_shared()}; - MagneticRefinePatchStrategy - magneticRefinePatchStrategy_{*resourcesManager_}; + MagneticRefinePatchStrategyT magneticRefinePatchStrategy_{*resourcesManager_, + *boundaryManager_}; + + FieldRefinePatchStrategyList rhoPatchStratPerGhostRefiner_; + FieldRefinePatchStrategyList totalEnergyPatchStratPerGhostRefiner_; + VectorFieldRefinePatchStrategyList momentumPatchStratPerGhostRefiner_; + MagneticRefinePatchStrategyList magPatchStratPerGhostRefiner_; - std::vector< - std::shared_ptr>> - magneticPatchStratPerGhostRefiner_; + VectorFieldRefinePatchStrategyList currentPatchStratPerGhostRefiner_; }; } // namespace amr diff --git a/src/amr/physical_models/hybrid_model.hpp b/src/amr/physical_models/hybrid_model.hpp index 853a2583a..e0da95b4c 100644 --- a/src/amr/physical_models/hybrid_model.hpp +++ b/src/amr/physical_models/hybrid_model.hpp @@ -6,6 +6,8 @@ #include "core/def/phare_mpi.hpp" // IWYU pragma: keep #include "core/utilities/mpi_utils.hpp" #include "core/models/hybrid_state.hpp" +#include "core/hybrid/hybrid_quantities.hpp" +#include "core/boundary/boundary_manager.hpp" #include "core/data/ions/particle_initializers/particle_initializer_factory.hpp" #include "initializer/data_provider.hpp" @@ -18,6 +20,7 @@ #include #include #include +#include namespace PHARE::solver { @@ -47,6 +50,8 @@ class HybridModel : public IPhysicalModel using ions_type = Ions; using particle_array_type = Ions::particle_array_type; using resources_manager_type = amr::ResourcesManager; + using boundary_manager_type + = core::BoundaryManager; using ParticleInitializerFactory = core::ParticleInitializerFactory; @@ -56,6 +61,7 @@ class HybridModel : public IPhysicalModel core::HybridState state; std::shared_ptr resourcesManager; + std::shared_ptr boundaryManager; void initialize(level_t& level) override; @@ -92,6 +98,11 @@ class HybridModel : public IPhysicalModel , state{dict} , resourcesManager{std::move(_resourcesManager)} { + std::initializer_list scalarQuantities = {}; + std::initializer_list vectorQuantities = {}; + + boundaryManager = std::make_shared( + dict["grid"]["boundary_conditions"], scalarQuantities, vectorQuantities); } diff --git a/src/amr/physical_models/mhd_model.hpp b/src/amr/physical_models/mhd_model.hpp index 6fc406804..fca7a8fb5 100644 --- a/src/amr/physical_models/mhd_model.hpp +++ b/src/amr/physical_models/mhd_model.hpp @@ -3,7 +3,9 @@ #include "core/def.hpp" #include "core/def/phare_mpi.hpp" // IWYU pragma: keep +#include "core/mhd/mhd_quantities.hpp" #include "core/models/mhd_state.hpp" +#include "core/boundary/boundary_manager.hpp" #include "amr/messengers/mhd_messenger_info.hpp" #include "amr/physical_models/physical_model.hpp" @@ -11,6 +13,8 @@ #include +#include +#include #include #include @@ -28,19 +32,22 @@ class MHDModel : public IPhysicalModel using level_t = amr_types::level_t; using Interface = IPhysicalModel; - using physical_quantity_type = core::MHDQuantity; using vecfield_type = VecFieldT; using field_type = vecfield_type::field_type; using state_type = core::MHDState; using gridlayout_type = GridLayoutT; using grid_type = Grid_t; using resources_manager_type = amr::ResourcesManager; + using physical_quantity_type = core::MHDQuantity; + using boundary_manager_type + = core::BoundaryManager; static constexpr std::string_view model_type_name = "MHDModel"; static inline std::string const model_name{model_type_name}; state_type state; std::shared_ptr resourcesManager; + std::shared_ptr boundaryManager; // diagnostics buffers vecfield_type V_diag_{"diagnostics_V_", core::MHDQuantity::Vector::V}; @@ -81,6 +88,17 @@ class MHDModel : public IPhysicalModel resourcesManager->registerResources(P_diag_); resourcesManager->registerResources(tmpField_); resourcesManager->registerResources(tmpVec_); + + std::vector scalarQuantities + = {core::MHDQuantity::Scalar::rho, core::MHDQuantity::Scalar::Etot}; + std::vector vectorQuantities = { + core::MHDQuantity::Vector::B, + core::MHDQuantity::Vector::J, + core::MHDQuantity::Vector::E, + core::MHDQuantity::Vector::rhoV, + }; + boundaryManager = std::make_shared( + dict["grid"]["boundary_conditions"], scalarQuantities, vectorQuantities); } ~MHDModel() override = default; diff --git a/src/amr/wrappers/hierarchy.hpp b/src/amr/wrappers/hierarchy.hpp index fa966c6f7..b4ba5cecd 100644 --- a/src/amr/wrappers/hierarchy.hpp +++ b/src/amr/wrappers/hierarchy.hpp @@ -343,7 +343,15 @@ auto griddingAlgorithmDatabase(PHARE::initializer::PHAREDict const& grid) } int periodicity[dimension]; - std::fill_n(periodicity, dimension, 1); // 1==periodic, hardedcoded for all dims for now. + auto boundary_types = parseDimXYZType(grid, "boundary_type"); + for (size_t i = 0; i < dimension; ++i) { + if (boundary_types[i] == "periodic") + periodicity[i] = 1; + else if (boundary_types[i] == "physical") + periodicity[i] = 0; + else + throw std::runtime_error("Error: wrong boundary type " + boundary_types[i]); + } db->putIntegerArray("periodic_dimension", periodicity, dimension); return db; } diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 10273d690..1984498f7 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -4,8 +4,10 @@ project(phare_core) set( SOURCES_INC data/electromag/electromag.hpp data/field/field.hpp + data/field/field_traits.hpp data/grid/gridlayoutdefs.hpp data/grid/gridlayout.hpp + data/grid/gridlayout_traits.hpp data/grid/gridlayout_impl.hpp data/grid/gridlayoutimplyee.hpp data/grid/gridlayout_utils.hpp @@ -26,6 +28,9 @@ set( SOURCES_INC data/vecfield/vecfield_initializer.hpp hybrid/hybrid_quantities.hpp numerics/boundary_condition/boundary_condition.hpp + numerics/boundary_condition/field_boundary_condition.hpp + numerics/boundary_condition/field_boundary_condition_dispatcher.hpp + numerics/boundary_condition/field_neumann_boundary_condition.hpp numerics/interpolator/interpolator.hpp numerics/pusher/boris.hpp numerics/pusher/pusher.hpp diff --git a/src/core/boundary/boundary.hpp b/src/core/boundary/boundary.hpp new file mode 100644 index 000000000..aa05a3506 --- /dev/null +++ b/src/core/boundary/boundary.hpp @@ -0,0 +1,139 @@ +#ifndef PHARE_CORE_BOUNDARY_BOUNDARY_HPP +#define PHARE_CORE_BOUNDARY_BOUNDARY_HPP + +#include "core/boundary/boundary_defs.hpp" +#include "core/data/field/field_traits.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/data/grid/gridlayout_traits.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_factory.hpp" + +#include +#include +#include +#include + +namespace PHARE::core +{ +/** + * @brief A Boundary is associated to one of the physical boundary (XLower, XUpper) and manages the + * collection of boundary conditions associated with each physical quantities that requires one. + * This class is not polymorphic itself, but the different type of boundaries are obtained thanks to + * the polymorphism of the boundary conditions. Which runtime type to choose for a boundary + * condition applied to a physical quantity is controled by the @cBoundaryFactory following the + * desired @c BoundaryType. + * + * @tparam PhysicalQuantityT The model category of physical quantities (MHDQuantity or + * HybridQuantity). + * @tparam FieldT The type for scalar fields. + * @tparam GridLayoutT The type for the grid layout. + */ +template +class Boundary +{ +public: + using scalar_quantity_type = FieldT::physical_quantity_type; + static_assert(std::same_as); + using vector_quantity_type = PhysicalQuantityT::Vector; + using vector_field_type = VecField; + using scalar_field_condition_type = IFieldBoundaryCondition; + using vector_field_condition_type = IFieldBoundaryCondition; + + Boundary() = delete; + Boundary(BoundaryType type, BoundaryLocation location) + : type_{type} + , location_{location} {}; + ~Boundary() = default; + + inline BoundaryType getType() const { return type_; }; + inline BoundaryLocation getLocation() const { return location_; }; + + /** + * @brief Retrieve the registered field boundary condition corresponding to a physical quantity. + * + * @tparam TensorPhysicalQuantityT Type of the physical quantity, expected to be either @c + * PhysicalQuantity::Scalar, or @c PhysicalQuantityT::Vector. + * + * @param quantity The physical quantity whose field boundary condition is wanted + * @return A shared pointer to the boundary condition if a one has been previously registered + * for the physical quantity, nullptr otherwise. + */ + template + auto getFieldCondition(TensorPhysicalQuantityT quantity) const + { + if constexpr (std::same_as) + { + auto it = scalar_field_conditions_.find(quantity); + return (it != scalar_field_conditions_.end()) ? it->second : nullptr; + } + else if constexpr (std::same_as) + { + auto it = vector_field_conditions_.find(quantity); + return (it != vector_field_conditions_.end()) ? it->second : nullptr; + } + else + { + static_assert(dependant_false_, + "Tensoriality of the physical quantity not supported."); + } + } + + /** + * @brief Register a field boundary condition for a quantity to the boundary. + * + * @tparam type The corresponding value of the @c BoundaryType enum corresponding to the desired + * boundary type. + * @tparam TensorPhysicalQuantityT Type of the physical quantity, expected to be either @c + * PhysicalQuantity::Scalar, or @c PhysicalQuantityT::Vector. + * @tparam Args Types of the arguments for the FieldConditionT constructor. + * + * @param quantity The physical quantity (scalar or vector) to which the field condition should + * apply. + * @param args The arguments for the field BC constructor, passed by perfect forwarding to the + * field BC factory. + */ + template + void registerFieldCondition(TensorPhysicalQuantityT quantity, Args&&... args) + { + if constexpr (std::same_as) + { + scalar_field_conditions_[quantity] + = FieldBoundaryConditionFactory::create( + std::forward(args)...); + } + else if constexpr (std::same_as) + { + vector_field_conditions_[quantity] + = FieldBoundaryConditionFactory::create( + std::forward(args)...); + } + else + { + static_assert(dependant_false_, + "Tensoriality of the physical quantity not supported."); + } + } + +private: + using _scalar_field_condition_map_type + = std::unordered_map>; + using _vector_field_condition_map_type + = std::unordered_map>; + + /** Utility to make compilation fail in certain conditions. */ + template + static constexpr bool dependant_false_ = false; + + /** The type of the boundary (open, inflow, reflective ...) */ + BoundaryType type_; + /** The location of the boundary (XLower, XUpper, ...) */ + BoundaryLocation location_; + /** The list of registered scalar field boundary conditions on the boundary */ + _scalar_field_condition_map_type scalar_field_conditions_; + /** The list of registered vector field boundary conditions on the boundary */ + _vector_field_condition_map_type vector_field_conditions_; +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_BOUNDARY_BOUNDARY_HPP diff --git a/src/core/boundary/boundary_defs.hpp b/src/core/boundary/boundary_defs.hpp new file mode 100644 index 000000000..ed45e4277 --- /dev/null +++ b/src/core/boundary/boundary_defs.hpp @@ -0,0 +1,147 @@ +#ifndef PHARE_CORE_BOUNDARY_BOUNDARY_DEFS_HPP +#define PHARE_CORE_BOUNDARY_BOUNDARY_DEFS_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" + +#include "unordered_map" + +namespace PHARE::core +{ +/** + * @brief Physical behavior of a boundary. + */ +enum class BoundaryType { Reflective, Inflow, Outflow, Open }; + +/* + * @brief Possible codimension of a boundary. + */ +enum class BoundaryCodim { One = 1, Two = 2, Three = 3 }; + +//@{ +//! @name Definitions for boundary array sizes in 1d, 2d, or 3d: +int const NUM_1D_NODES = 2; + +int const NUM_2D_EDGES = 4; +int const NUM_2D_NODES = 4; + +int const NUM_3D_FACES = 6; +int const NUM_3D_EDGES = 12; +int const NUM_3D_NODES = 8; +//@} + +/** + * @brief Possible locations of 1-codimensional boundary (a face in 3D, an edge in 2D, an extremity + * in 1D). + */ +enum class BoundaryLocation { + XLower = 0, + XUpper = 1, + YLower = 2, + YUpper = 3, + ZLower = 4, + ZUpper = 5 +}; + +/// @brief Return the side of a boundary location. +/// @param boundaryLoc The boundary location. +/// @return The boundary side. +constexpr Side getSide(BoundaryLocation boundaryLoc) +{ + switch (boundaryLoc) + { + case BoundaryLocation::XLower: + case BoundaryLocation::YLower: + case BoundaryLocation::ZLower: return Side::Lower; break; + + case BoundaryLocation::XUpper: + case BoundaryLocation::YUpper: + case BoundaryLocation::ZUpper: return Side::Upper; break; + + default: throw std::runtime_error("Invalid BoundaryLocation."); + } +}; + +/// @brief Return the direction of a boundary location. +/// @param boundaryLoc The boundary location. +/// @return The boundary direction. +constexpr Direction getDirection(BoundaryLocation boundaryLoc) +{ + switch (boundaryLoc) + { + case BoundaryLocation::XLower: + case BoundaryLocation::XUpper: return Direction::X; break; + + case BoundaryLocation::YLower: + case BoundaryLocation::YUpper: return Direction::Y; break; + + case BoundaryLocation::ZLower: + case BoundaryLocation::ZUpper: return Direction::Z; break; + + default: throw std::runtime_error("Invalid BoundaryLocation."); + } +}; + +/* + * @brief Possible locations of a 2-codimensional boundary (an edge in 3D, a corner in 2D) + */ +enum class Codim2BoundaryLocation { + XLower_YLower = 0, + XHI_YLower = 1, + XLower_YUpper = 2, + XHI_YUpper = 3 +}; + +/* + * @brief Possible locations of a 3-codimensional boundary (a corner in 3D) + */ +enum class Codim3BoundaryLocation { + XLower_YLower_ZLower = 0, + XHI_YLower_ZLower = 1, + XLower_YUpper_ZLower = 2, + XHI_YUpper_ZLower = 3, + XLower_YLower_ZUpper = 4, + XHI_YLower_ZUpper = 5, + XLower_YUpper_ZUpper = 6, + XHI_YUpper_ZUpper = 7 +}; + +/** + * @brief Get the BoundaryType from input keyword, and throw and error if the keyword does not + * correspond to any known boundary type. + */ +inline BoundaryType getBoundaryTypeFromString(std::string const& name) +{ + static std::unordered_map const typeMap_ = { + {"open", BoundaryType::Open}, + {"inflow", BoundaryType::Inflow}, + {"reflective", BoundaryType::Reflective}, + {"outflow", BoundaryType::Outflow}, + }; + + auto it = typeMap_.find(name); + if (it == typeMap_.end()) + throw std::runtime_error("Wrong boundary type name = " + name); + return it->second; +} + +/** + * @brief Get the BoundaryType from input keyword, and throw and error if the keyword foes not + * correspond to any known boundary type. + */ +inline BoundaryLocation getBoundaryLocationFromString(std::string const& name) +{ + static std::unordered_map const typeMap_ = { + {"xlower", BoundaryLocation::XLower}, {"xupper", BoundaryLocation::XUpper}, + {"ylower", BoundaryLocation::YLower}, {"yupper", BoundaryLocation::YUpper}, + {"zlower", BoundaryLocation::ZLower}, {"zupper", BoundaryLocation::ZUpper}, + }; + + auto it = typeMap_.find(name); + if (it == typeMap_.end()) + throw std::runtime_error("Wrong boundary location name = " + name); + return it->second; +} + +} // namespace PHARE::core + +#endif /* PHARE_CORE_BOUNDARY_BOUNDARY_DEFS_HPP */ diff --git a/src/core/boundary/boundary_factory.hpp b/src/core/boundary/boundary_factory.hpp new file mode 100644 index 000000000..675d2fd1c --- /dev/null +++ b/src/core/boundary/boundary_factory.hpp @@ -0,0 +1,151 @@ +#ifndef PHARE_CORE_BOUNDARY_BOUNDARY_FACTORY +#define PHARE_CORE_BOUNDARY_BOUNDARY_FACTORY + +#include "core/boundary/boundary_defs.hpp" +#include "core/boundary/boundary.hpp" +#include "core/data/field/field_traits.hpp" +#include "core/data/grid/gridlayout_traits.hpp" + +#include "initializer/data_provider.hpp" + +#include +#include +#include + +namespace PHARE::core +{ +/** + * @brief This class contains all the recipes to create a boundary object according to the desired + * type of physical boundary (reflective, open, ...). It can extracts all the necessary data from + * the input data dict associated to the boundary (value of physical quantities on the boundary for + * an Inflow condition for instance), and create the right boundary conditions associated to each + * physical quantity that requires one. + * + * @tparam PhysicalQuantityT The model category of physical quantities (MHDQuantity or + * HybridQuantity). + * @tparam FieldT The type for scalar fields. + * @tparam GridLayoutT The type for the grid layout. + */ +template +class BoundaryFactory +{ +public: + using boundary_type = Boundary; + using boundary_ptr_type = std::unique_ptr; + using scalar_quantity_list_type = std::vector; + using vector_quantity_list_type = std::vector; + + BoundaryFactory() = delete; + + /** + * @brief Create a boundary with the type indicated in the input dict, and register to it all + * corresponding field boundary conditions. + * + * @param location The location of the boundary. + * @param dict Input dictionnary related to the boundary. + * @param scalars Scalar quantities for which it is necessary to register a field boundary + * condition. + * @param vectors Vector quantities for which it is necessary to register a field boundary + * condition. + * + * @return A unique pointer to the created @c Boundary object. + */ + static boundary_ptr_type create(BoundaryLocation location, initializer::PHAREDict dict, + scalar_quantity_list_type const& scalars, + vector_quantity_list_type const& vectors) + { + std::string typeName = dict["type"].to(); + BoundaryType type = getBoundaryTypeFromString(typeName); + _model_menu_type const quantities{scalars, vectors}; + initializer::PHAREDict const data + = (dict.contains("data")) ? dict["data"] : initializer::PHAREDict{}; + + // initialize the boundary + boundary_ptr_type boundary = std::make_unique(type, location); + + // register the right boundary condition per physical quantity following the boundary type + switch (type) + { + case BoundaryType::Reflective: + register_reflective_conditions_(boundary, data, quantities); + break; + case BoundaryType::Inflow: + throw std::runtime_error("Inflow boundary type not implemented."); + case BoundaryType::Outflow: + throw std::runtime_error("Outflow boundary type not implemented."); + case BoundaryType::Open: register_open_conditions_(boundary, data, quantities); break; + + default: throw std::runtime_error("Boundary type not implemented."); + } + return boundary; + } + +private: + /** @brief Utility struct to group scalar and vector quantities together */ + struct _model_menu_type + { + scalar_quantity_list_type const& scalars; + vector_quantity_list_type const& vectors; + }; + + /** @brief Register boundary conditions to make a reflective boundary */ + static void register_reflective_conditions_(boundary_ptr_type& boundary, + PHARE::initializer::PHAREDict const& data, + _model_menu_type const& quantities) + { + for (auto const quantity : quantities.scalars) + { + boundary->template registerFieldCondition( + quantity); + } + for (auto const quantity : quantities.vectors) + { + switch (quantity) + { + case (PhysicalQuantityT::Vector::B): + boundary->template registerFieldCondition< + FieldBoundaryConditionType::DivergenceFreeTransverseNeumann>(quantity); + break; + case (PhysicalQuantityT::Vector::J): + boundary->template registerFieldCondition< + FieldBoundaryConditionType::AntiSymmetric>(quantity); + break; + default: + boundary + ->template registerFieldCondition( + quantity); + break; + } + } + } + + /** @brief Register boundary conditions to make an open boundary */ + static void register_open_conditions_(boundary_ptr_type& boundary, + initializer::PHAREDict const& data, + _model_menu_type const& quantities) + { + for (auto const quantity : quantities.scalars) + { + boundary->template registerFieldCondition( + quantity); + } + for (auto const quantity : quantities.vectors) + { + switch (quantity) + { + case (PhysicalQuantityT::Vector::B): + boundary->template registerFieldCondition< + FieldBoundaryConditionType::DivergenceFreeTransverseNeumann>(quantity); + break; + default: + boundary->template registerFieldCondition( + quantity); + break; + } + } + } +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_BOUNDARY_BOUNDARY_FACTORY diff --git a/src/core/boundary/boundary_manager.hpp b/src/core/boundary/boundary_manager.hpp new file mode 100644 index 000000000..4262494fa --- /dev/null +++ b/src/core/boundary/boundary_manager.hpp @@ -0,0 +1,108 @@ +#ifndef PHARE_CORE_BOUNDARY_BOUNDARY_MANAGER +#define PHARE_CORE_BOUNDARY_BOUNDARY_MANAGER + +#include "core/boundary/boundary.hpp" +#include "core/boundary/boundary_defs.hpp" +#include "core/boundary/boundary_factory.hpp" +#include "core/data/field/field_traits.hpp" +#include "core/data/grid/gridlayout_traits.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_factory.hpp" + +#include "initializer/data_provider.hpp" + +#include +#include +#include +#include +#include +#include + +namespace PHARE::core +{ +/** + * @brief Manage the lifecycle and retrieval of physical boundary conditions. + * + * Store and provide access to boundary condition objects for both + * scalar and vector fields based on the boundary location and physical quantity. + * + * @tparam PhysicalQuantityT Type defining scalar and vector quantities (MHDQuantity or + * HybridQuantity). + * @tparam FieldT The scalar field type. + * @tparam GridLayoutT The grid layout type. + */ +template +class BoundaryManager +{ +public: + using boundary_type = Boundary; + using boundary_factory_type = BoundaryFactory; + using scalar_quantity_type = FieldT::physical_quantity_type; + static_assert(std::same_as); + using vector_field_type = VecField; + using scalar_condition_type = IFieldBoundaryCondition; + using vector_condition_type = IFieldBoundaryCondition; + + BoundaryManager() = delete; + + /** + * @brief Constructor. Register boundary conditions based on inputfile data. + * @param dict Configuration dictionary. + * @param scalar_quantities List of scalar quantities to manage. + * @param vector_quantities List of vector quantities to manage. + */ + BoundaryManager(PHARE::initializer::PHAREDict const& dict, + std::vector const& scalarQuantities, + std::vector const& vectorQuantities) + { + dict.visit(cppdict::visit_all_nodes, + [&](std::string const& locationName, initializer::PHAREDict::data_t _) { + /// @todo I don't do anything with the second argument because it cannot be + /// transformed back into a dict. Maybe add the corresponding constructor to + /// cppdict, or add the possibility to have a lambda with the second arg + /// being a dict ? + BoundaryLocation location = getBoundaryLocationFromString(locationName); + boundaries_[location] = boundary_factory_type::create( + location, dict[locationName], scalarQuantities, vectorQuantities); + }); + } + + + /** + * @brief Retrieve the boundary for a specific location. + * + * @param location The location of the desired boundary. + * @return Shared pointer to the matching boundary, or nullptr if not found. + * + */ + std::shared_ptr getBoundary(BoundaryLocation location) const + { + auto it = boundaries_.find(location); + return (it != boundaries_.end()) ? it->second : nullptr; + } + + +private: + using _boundary_map_type = std::unordered_map>; + + /** + * @brief Utility struct to group scalar and vector quantities together + * + */ + struct SimulationMenu + { + std::vector const& scalars; + std::vector const& vectors; + }; + + /** + * @brief List of boundaries mapped by their location + * + */ + _boundary_map_type boundaries_; +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_BOUNDARY_BOUNDARY_MANAGER diff --git a/src/core/data/field/field_traits.hpp b/src/core/data/field/field_traits.hpp new file mode 100644 index 000000000..86db2f12d --- /dev/null +++ b/src/core/data/field/field_traits.hpp @@ -0,0 +1,45 @@ +#ifndef PHARE_CORE_DATA_FIELD_FIELD_TRAITS +#define PHARE_CORE_DATA_FIELD_FIELD_TRAITS + +#include +#include +#include + +namespace PHARE::core +{ + +/** + * @brief Define the requirements for a Field type. + * + * A Field must provide static metadata, identification methods, + * memory access, and dimension-specific indexing operators. + */ +template +concept IsField = requires(T field) { + { T::dimension } -> std::convertible_to; + typename T::value_type; + typename T::physical_quantity_type; + + { field.name() } -> std::convertible_to; + { field.physicalQuantity() } -> std::same_as; + + { field.isUsable() } -> std::same_as; + { field.data() } -> std::same_as; // Inherited from NdArrayView + + requires((T::dimension == 1 && requires(T f) { + { f(std::declval()) } -> std::same_as; + }) || (T::dimension == 2 && requires(T f) { + { + f(std::declval(), std::declval()) + } -> std::same_as; + }) || (T::dimension == 3 && requires(T f) { + { + f(std::declval(), std::declval(), + std::declval()) + } -> std::same_as; + })); +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_DATA_FIELD_FIELD_TRAITS diff --git a/src/core/data/grid/gridlayout.hpp b/src/core/data/grid/gridlayout.hpp index 5dc1442a3..e592ffdc0 100644 --- a/src/core/data/grid/gridlayout.hpp +++ b/src/core/data/grid/gridlayout.hpp @@ -336,7 +336,7 @@ namespace core NO_DISCARD auto physicalEndIndex(QtyCentering centering) const { std::uint32_t icentering = static_cast(centering); - return physicalStartIndexTable_[icentering]; + return physicalEndIndexTable_[icentering]; } /** @@ -788,6 +788,66 @@ namespace core return result; } + /** + * @brief Returns the mirrored index of @p index with respect to a boundary. + * + * @tparam direction The direction normal to the boundary. + * @tparam side Whether we are reflecting across the Lower or Upper boundary. + * @tparam centering The staggering of the data (Primal cells or Dual nodes) along @p + * direction + * + * @param index The directional index to be reflected. + * + * @return The reflected directional index. + * + */ + template + NO_DISCARD inline constexpr std::uint32_t boundaryMirrored(std::uint32_t const index) const + { + int32_t constexpr s = static_cast(side); + size_t constexpr iCentering = static_cast(centering); + size_t constexpr iDir = static_cast(direction); + + int32_t const i = static_cast(index); + + uint32_t const boundaryLimitIndex = (side == Side::Lower) + ? physicalStartIndexTable_[iCentering][iDir] + : physicalEndIndexTable_[iCentering][iDir]; + + int32_t const b = static_cast(boundaryLimitIndex); + + if constexpr (centering == QtyCentering::primal) + { + return static_cast(i - 2 * (i - b)); + } + else // if constexpr (centering == QtyCentering::dual) + { + return static_cast(i - 2 * (i - b) + s); + }; + } + + /** + * @brief Mirrors a multidimensional @p point across a boundary plane + * + * @tparam dimension The number of spatial dimensions + * @tparam direction The axis along which to mirror (X, Y, or Z) + * @tparam side Upper or Lower boundary + * @tparam centering Primal or Dual centering along @p direction + * + * @param point The input point to be mirrored + * + * @return A new Point with the mirrored coordinate in the @p direction axis + * + */ + template + NO_DISCARD inline constexpr Point + boundaryMirrored(Point const point) const + { + constexpr std::size_t iDir = static_cast(direction); + auto mirroredPoint = point; + mirroredPoint[iDir] = boundaryMirrored(point[iDir]); + return mirroredPoint; + } // ---------------------------------------------------------------------- // LAYOUT SPECIFIC METHODS // @@ -927,6 +987,27 @@ namespace core return newCentering; } + /** + * @brief toFieldBox takes a local cell-centered box and creates a box + * that is adequate for the specified quantity. The layout is used to know + * the centering, nbr of ghosts of the specified quantity. + * + * @see FieldGeometry::toFieldBox + * + * */ + NO_DISCARD Box toFieldBox(Box box, + Quantity::Scalar qty) const + { + auto const centerings = centering(qty); + core::for_N([&](auto i) { + auto const is_primal = (centerings[i] == core::QtyCentering::primal) ? 1 : 0; + box.upper[i] = box.upper[i] + is_primal; + } // + ); + + return box; + } + /** * @brief momentsToEx return the indexes and associated coef to compute the linear * interpolation necessary to project moments onto Ex. diff --git a/src/core/data/grid/gridlayout_traits.hpp b/src/core/data/grid/gridlayout_traits.hpp new file mode 100644 index 000000000..df355d332 --- /dev/null +++ b/src/core/data/grid/gridlayout_traits.hpp @@ -0,0 +1,102 @@ +#ifndef PHARE_CORE_GRID_GRIDLAYOUT_TRAITS_HPP +#define PHARE_CORE_GRID_GRIDLAYOUT_TRAITS_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/utilities/box/box.hpp" +#include "core/utilities/point/point.hpp" + +#include +#include +#include + +namespace PHARE::core +{ +/** + * @brief Define the requirements for a grid layout type. + * + * @see GridLayout + * + */ +template +concept IsGridLayout + = requires(GridLayoutT g, Direction d, QtyCentering c, int idx, std::uint32_t u_idx, + typename GridLayoutT::Quantity::Scalar qty, + typename GridLayoutT::Quantity::Vector v_qty, + Point p_int, Box b_int) { + { GridLayoutT::dimension } -> std::convertible_to; + { GridLayoutT::interp_order } -> std::convertible_to; + { GridLayoutT::nbrParticleGhosts() } -> std::convertible_to; + + { g.origin() } -> std::same_as>; + { g.meshSize() } -> std::same_as const&>; + { g.inverseMeshSize(d) } -> std::convertible_to; + { g.inverseMeshSize() } -> std::same_as>; + { + g.nbrCells() + } -> std::convertible_to const&>; + { g.cellVolume() } -> std::convertible_to; + { g.layoutName() } -> std::same_as; + { g.levelNumber() } -> std::convertible_to; + + { g.AMRBox() } -> std::same_as const&>; + { g.localToAMR(p_int) } -> std::same_as>; + { g.localToAMR(b_int) } -> std::same_as>; + { g.AMRToLocal(p_int) } -> std::same_as>; + { g.AMRToLocal(b_int) } -> std::same_as>; + + { + GridLayoutT::centering(qty) + } -> std::same_as>; + { + GridLayoutT::centering(v_qty) + } -> std::same_as, 3>>; + { GridLayoutT::changeCentering(c) } -> std::same_as; + { GridLayoutT::nextIndex(c, u_idx) } -> std::convertible_to; + { GridLayoutT::prevIndex(c, u_idx) } -> std::convertible_to; + + { g.physicalStartIndex(c, d) } -> std::convertible_to; + { g.physicalEndIndex(c, d) } -> std::convertible_to; + { g.ghostStartIndex(c, d) } -> std::convertible_to; + { g.ghostEndIndex(c, d) } -> std::convertible_to; + { g.ghostStartToEnd(c, d) } -> std::same_as>; + { g.physicalStartToEnd(c, d) } -> std::same_as>; + + { g.allocSize(qty) } -> std::same_as>; + { + g.allocSizeDerived(qty, d) + } -> std::same_as>; + { + g.nbrPhysicalNodes(qty) + } -> std::same_as>; + { GridLayoutT::nbrGhosts(c) } -> std::convertible_to; + + { + g.cellCenteredCoordinates(p_int) + } -> std::same_as>; + + // --- 8. Projection Helpers (Static Methods) --- + // { GridLayoutT::momentsToEx() }; + // { GridLayoutT::momentsToEy() }; + // { GridLayoutT::momentsToEz() }; + // { GridLayoutT::ExToMoments() }; + // { GridLayoutT::EyToMoments() }; + // { GridLayoutT::EzToMoments() }; + // { GridLayoutT::JxToMoments() }; + // { GridLayoutT::JyToMoments() }; + // { GridLayoutT::JzToMoments() }; + // { GridLayoutT::BxToEx() }; + // { GridLayoutT::ByToEx() }; + // { GridLayoutT::BzToEx() }; + // { GridLayoutT::JxToEx() }; + // { GridLayoutT::JyToEy() }; + // { GridLayoutT::JzToEz() }; + // { GridLayoutT::faceXToCellCenter() }; + // { GridLayoutT::edgeXToCellCenter() }; + + { g.amr_lcl_idx(b_int) }; + { g.amr_lcl_idx() }; + }; + +} // namespace PHARE::core + +#endif // PHARE_CORE_GRID_GRIDLAYOUT_TRAITS_HPP diff --git a/src/core/data/grid/gridlayoutdefs.hpp b/src/core/data/grid/gridlayoutdefs.hpp index 02d715e97..6a1f166b2 100644 --- a/src/core/data/grid/gridlayoutdefs.hpp +++ b/src/core/data/grid/gridlayoutdefs.hpp @@ -12,7 +12,12 @@ namespace PHARE { namespace core { - enum class Direction { X, Y, Z }; + enum class Direction { X = 0, Y = 1, Z = 2 }; + + /** + *@brief To indicate the side of the domain. + */ + enum class Side { Lower = -1, Upper = 1 }; enum class QtyCentering : std::uint16_t { primal = 0, dual = 1 }; diff --git a/src/core/data/tensorfield/tensorfield_traits.hpp b/src/core/data/tensorfield/tensorfield_traits.hpp new file mode 100644 index 000000000..cb370693c --- /dev/null +++ b/src/core/data/tensorfield/tensorfield_traits.hpp @@ -0,0 +1,115 @@ +#ifndef PHARE_CORE_DATA_TENSOR_FIELD_TRAITS +#define PHARE_CORE_DATA_TENSOR_FIELD_TRAITS + +#include "core/data/field/field_traits.hpp" +#include "core/data/vecfield/vecfield_component.hpp" + +#include +#include + +namespace PHARE::core +{ +/** + * @brief Define the requirements for a tensor field type. + * + * @see TensorField + */ +template +concept IsTensorField = requires(T tf, T const ctf, T const& crtf, Component component, size_t i) { + requires IsField; + typename T::value_type; + typename T::tensor_t; + + requires std::same_as; + requires std::same_as; + { T::size() } -> std::convertible_to; + requires std::bool_constant<(T::size() >= 0)>::value; + + { tf.name() } -> std::same_as; + { tf.getComponent(component) } -> std::same_as; + { ctf.getComponent(component) } -> std::same_as; + { tf(component) } -> std::same_as; + { tf.getComponentName(component) } -> std::same_as; + { tf[i] } -> std::same_as; + // missing 'components' overloads + { tf.copyData(crtf) } -> std::same_as; + + { + tf.begin() + } -> std::same_as::iterator>; + { tf.end() } -> std::same_as; + { + ctf.begin() + } -> std::same_as::const_iterator>; + { ctf.end() } -> std::same_as; + + { ctf.componentNames() } -> std::same_as const&>; + { ctf.physicalQuantity() } -> std::same_as; +}; + +/** + * @brief A type verifying this concept is either a Field or a TensorField. + */ +template +concept IsScalarOrTensorField = IsField || IsTensorField; + + +/** + * @brief Select the physical quantity type based on tensoriality. + */ +template +struct PhysicalQuantityTypeSelector; +/** @brief Specialization for scalar fields */ +template +struct PhysicalQuantityTypeSelector +{ + using type = ScalarOrTensorFieldT::physical_quantity_type; +}; +/** @brief Specialization for tensor fields */ +template +struct PhysicalQuantityTypeSelector +{ + using type = ScalarOrTensorFieldT::tensor_t; +}; + + +/** + * @brief Select the underlying field type based on tensoriality. + */ +template +struct FieldTypeSelector; +/** @brief Specialization for scalar fields */ +template +struct FieldTypeSelector +{ + using type = ScalarOrTensorFieldT; +}; +/** @brief Specialization for tensor fields */ +template +struct FieldTypeSelector +{ + using type = ScalarOrTensorFieldT::field_type; +}; + + +/** + * @brief Select the underlying field type based on tensoriality. + */ +template +struct NumberOfComponentsSelector; +/** @brief Specialization for scalar fields */ +template +struct NumberOfComponentsSelector +{ + static constexpr size_t value = 1; +}; +/** @brief Specialization for tensor fields */ +template +struct NumberOfComponentsSelector +{ + static constexpr size_t value = ScalarOrTensorFieldT::size(); +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_DATA_TENSOR_FIELD_TRAITS diff --git a/src/core/data/vecfield/vecfield_traits.hpp b/src/core/data/vecfield/vecfield_traits.hpp new file mode 100644 index 000000000..18ce8c4c0 --- /dev/null +++ b/src/core/data/vecfield/vecfield_traits.hpp @@ -0,0 +1,11 @@ +#ifndef PHARE_CORE_DATA_VECFIELD_TRAITS_HPP +#define PHARE_CORE_DATA_VECFIELD_TRAITS_HPP + +#include "core/data/tensorfield/tensorfield_traits.hpp" + +namespace PHARE::core +{ +template +concept IsVecField = PHARE::core::IsTensorField || (T::N == 3); +} +#endif // PHARE_CORE_DATA_VECFIELD_TRAITS_HPP diff --git a/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp new file mode 100644 index 000000000..c7c311814 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp @@ -0,0 +1,110 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_ANTISYMMETRIC_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_ANTISYMMETRIC_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" +#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" + +namespace PHARE::core +{ +/** + * @brief Anti-symmetric boundary condition for scalar and vector fields. + * + * For scalars, this class imposes a zero value on the boundary. For vectors, it imposes a null + * value for tangential components, and a Neumann (zero-gradient) condition for the normal + * component. + * + * @tparam ScalarOrTensorFieldT Type of the field or tensor field. + * @tparam GridLayoutT Grid layout configuration. + * + */ +template +class FieldAntiSymmetricBoundaryCondition + : public FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldAntiSymmetricBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldAntiSymmetricBoundaryCondition>; + using physical_quantity_type = Super::physical_quantity_type; + using field_type = Super::field_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr size_t N = Super::N; + static constexpr bool is_scalar = Super::is_scalar; + + FieldAntiSymmetricBoundaryCondition() = default; + + FieldAntiSymmetricBoundaryCondition(FieldAntiSymmetricBoundaryCondition const&) = default; + FieldAntiSymmetricBoundaryCondition& operator=(FieldAntiSymmetricBoundaryCondition const&) + = default; + FieldAntiSymmetricBoundaryCondition(FieldAntiSymmetricBoundaryCondition&&) = default; + FieldAntiSymmetricBoundaryCondition& operator=(FieldAntiSymmetricBoundaryCondition&&) = default; + + virtual ~FieldAntiSymmetricBoundaryCondition() = default; + + + /** @brief Implement getType to return Anti-symmetric. */ + FieldBoundaryConditionType getType() const override + { + return FieldBoundaryConditionType::AntiSymmetric; + } + + + /** + * @brief Apply the anti-symmetric condition using compile-time specialized parameters. + * + * @tparam direction Normal direction of the boundary. + * @tparam side Boundary side (Lower or Upper). + * @tparam Centerings Component-wise centerings. + * + */ + template + void apply_specialized(ScalarOrTensorFieldT& scalarOrTensorField, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + constexpr std::array centerings = {Centerings...}; + + // no other way than using a lambda builder + auto fields = [&]() { + if constexpr (is_scalar) + return std::make_tuple(scalarOrTensorField); + else + return scalarOrTensorField.components(); + }(); + + for_N([&](auto i) { + constexpr QtyCentering centering = centerings[i]; + field_type& field = std::get(fields); + if constexpr ((i != static_cast(direction)) || is_scalar) + // if the component is tangent to the boundary, or if we are handling a scalar + { + scalar_dirichlet_condition_.template apply_specialized( + field, localGhostBox, gridLayout, time); + } + else + // if the component is normal to the boundary + { + scalar_neumann_condition_.template apply_specialized( + field, localGhostBox, gridLayout, time); + } + }); + } + +private: + using _scalar_neumann_condition_type = FieldNeumannBoundaryCondition; + using _scalar_dirichlet_condition_type + = FieldDirichletBoundaryCondition; + + _scalar_neumann_condition_type scalar_neumann_condition_{}; + _scalar_dirichlet_condition_type scalar_dirichlet_condition_{}; + +}; // class FieldAntiSymmetricBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_ANTISYMMETRIC_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/field_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_boundary_condition.hpp new file mode 100644 index 000000000..fe33492d1 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_boundary_condition.hpp @@ -0,0 +1,91 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP + +#include "core/boundary/boundary_defs.hpp" +#include "core/data/field/field_traits.hpp" +#include "core/data/tensorfield/tensorfield_traits.hpp" +#include "core/data/grid/gridlayout_traits.hpp" +#include "core/utilities/box/box.hpp" + +#include + +namespace PHARE::core +{ + +/** + * @brief Supported types of field boundary conditions. + * + * @note The enum fields are ordered from lowest to highest priority at edges/corner. + * + */ +enum class FieldBoundaryConditionType : int { + Dirichlet, + AntiSymmetric, + Symmetric, + Neumann, + DivergenceFreeTransverseNeumann +}; + + +/** + * @brief Interface for applying boundary conditions to scalar or tensor fields. + * + * Concrete field boundary conditions are provided by implementating this interface. + * + * @tparam ScalarOrTensorFieldT The type of the scalarOrTensorField (must satisfy IsField or + * IsTensorField). + * @tparam The grid layout type (must satisfy IsGridLayout). + * + */ +template + requires(IsField || IsTensorField) +class IFieldBoundaryCondition +{ +public: + static constexpr bool is_scalar = IsField; + static constexpr size_t dimension = GridLayoutT::dimension; + static constexpr size_t N = NumberOfComponentsSelector::value; + + using This = IFieldBoundaryCondition; + using physical_quantity_type + = PhysicalQuantityTypeSelector::type; + using field_type = FieldTypeSelector::type; + + + /** @brief Return the type of the boundary condition. */ + virtual FieldBoundaryConditionType getType() const = 0; + + virtual ~IFieldBoundaryCondition() = default; + + + /** + * @brief Enforce the boundary condition on the provided scalar/tensor @p scalarOrTensorField, + * by filling accordingly the ghost cells contained in the local box @p localGhostBox, at the + * physical time @p time, and considering that the boundary is located at @p boundaryLocation. + * + * @param scalarOrTensorField The scalar or tensor to which we apply the boundary condition. + * @param boundaryLocation The location of the physical boundary. + * @param localGhostBox The box containing the ghost cells/nodes to fill. + * @param gridLayout The grid layout. + * @param time The physical time, useful for time-dependant boundary conditions. + * + */ + virtual void apply(ScalarOrTensorFieldT& scalarOrTensorField, + BoundaryLocation const boundaryLocation, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + = 0; + + + /** + * @brief Define comparison of field boundary conditions based on the enum @c + * ScalarOrTensorFieldT. + */ + std::strong_ordering operator<=>(This const& other) const + { + return this->getType() <=> other.getType(); + } +}; + +} // namespace PHARE::core +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp b/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp new file mode 100644 index 000000000..048fd808c --- /dev/null +++ b/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp @@ -0,0 +1,132 @@ +#ifndef PHARE_CORE_DATA_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_DISPATCHER +#define PHARE_CORE_DATA_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_DISPATCHER + +#include "core/boundary/boundary_defs.hpp" +#include "core/data/grid/gridlayout_traits.hpp" +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition.hpp" + + +namespace PHARE::core +{ +/** + * @brief Intermediate dispatcher for scalarOrTensorField boundary conditions class, inheriting from + * @link PHARE::core::IFieldBoundaryCondition @endlink, and whom concrete implementations must + * inherit from. + * + * Provides a mechanism to dispatch runtime boundary information (location, centering) + * to compile-time specialized methods in concrete implementations. It implements the Curious + * Recurring Template Pattern so the complicated dispatching code is not duplicated in concrete + * implementations of scalarOrTensorField boundary conditions. Actual implementations are expected + * to implement an @c apply_specialized templated function with the following interface: + * + * @code + * template + * void apply_specialized( + * ScalarOrTensorFieldT& scalarOrTensorField, + * Box const& local_ghost_box, + * GridLayoutT const& grid_layout, + * double const& time + * ); + * @endcode + * + * @tparam ScalarOrTensorFieldT Type of scalarOrTensorField managed. + * @tparam GridLayoutT Grid layout configuration. + * @tparam Derived The concrete class inheriting from this dispatcher. + * + */ +template +class FieldBoundaryConditionDispatcher + : public IFieldBoundaryCondition +{ +public: + using Super = IFieldBoundaryCondition; + using typename Super::field_type; + using typename Super::physical_quantity_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr bool is_scalar = Super::is_scalar; + static constexpr size_t N = Super::N; + + /** + * @brief Implements the @link PHARE::core::IFieldBoundaryCondition::apply @endlink abstract + * function. + * + * Triggers the recursive dispatching of centerings, directions, and sides to + * specialized implementations. + */ + void apply(ScalarOrTensorFieldT& scalarOrTensorField, + BoundaryLocation const boundaryLocation, + Box const& localGhostBox, GridLayoutT const& gridLayout, + double const time) override + { + dispatch_centerings<>(scalarOrTensorField, boundaryLocation, localGhostBox, gridLayout, + time); + } + +protected: + /** + * @brief Helper that recursively promote runtime centerings, direction and side to compile-time + * tags. + * + * The recursive character of this helper is necessary because we need to promote the centering + * of all components of the (tensor) scalarOrTensorField in the direction normal to the + * boundary. + */ + template + void dispatch_centerings(ScalarOrTensorFieldT& scalarOrTensorField, + BoundaryLocation const boundaryLocation, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + Direction direction = getDirection(boundaryLocation); + Side side = getSide(boundaryLocation); + physical_quantity_type quantity = scalarOrTensorField.physicalQuantity(); + + std::array centerings; + if constexpr (is_scalar) + { + centerings[0] = GridLayoutT::centering(quantity)[static_cast(direction)]; + } + else + { + auto full_centerings = GridLayoutT::centering(quantity); + for (size_t i = 0; i < N; ++i) + centerings[i] = full_centerings[i][static_cast(direction)]; + } + + constexpr size_t nAlreadyPromoted = sizeof...(AlreadyPromoted); + + if constexpr (nAlreadyPromoted == N) + // base case: all directional centerings have been promoted + { + auto d_v = promote(direction); + auto s_v = promote(side); + + std::visit( + [&](auto d_tag, auto s_tag) { + static_cast(this) + ->template apply_specialized( + scalarOrTensorField, localGhostBox, gridLayout, time); + }, + d_v, s_v); + } + else + // we grow the list of promoted centerings to call the next recursive version of the + // function + { + auto c_v + = promote(centerings[nAlreadyPromoted]); + + std::visit( + [&](auto c_tag) { + this->dispatch_centerings( + scalarOrTensorField, boundaryLocation, localGhostBox, gridLayout, time); + }, + c_v); + }; + } +}; +} // namespace PHARE::core + +#endif // PHARE_CORE_DATA_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_DISPATCHER diff --git a/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp new file mode 100644 index 000000000..5f0250da1 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp @@ -0,0 +1,94 @@ +#ifndef PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY +#define PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY + +#include "core/data/tensorfield/tensorfield_traits.hpp" +#include "core/data/vecfield/vecfield_traits.hpp" + +#include "core/numerics/boundary_condition/field_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp" + +#include +#include + +namespace PHARE::core +{ +/** + * @brief Factory for creating field boundary condition objects. + * + */ +class FieldBoundaryConditionFactory +{ +public: + /** + * @brief Main function to create field boundary conditions + * + * It passes the required arguments for the constructor of the actual field boundary condition + * by perfect forwarding. + * + * @tparam type The value of the enum @c FieldBoundaryConditionType. + * @tparam ScalarOrTensorFieldT Field or TensorField. + * @tparam GridLayoutT The type of grid layout. + * @tparam Args The types of the arguments for the constructor of the boundary condition + * corresponding to @p type. + * + * @param args Arguments passed by perfect forwarding to the boundary condition constructor + * corresponding to @p type. + * + * @return A unique pointer to the created field boundary condition. + */ + template + static std::unique_ptr> + create(Args&&... args) + { + if constexpr (type == FieldBoundaryConditionType::Neumann) + { + return std::make_unique< + FieldNeumannBoundaryCondition>( + std::forward(args)...); + } + else if constexpr (type == FieldBoundaryConditionType::Dirichlet) + { + return std::make_unique< + FieldDirichletBoundaryCondition>( + std::forward(args)...); + } + else if constexpr (type == FieldBoundaryConditionType::Symmetric) + { + return std::make_unique< + FieldSymmetricBoundaryCondition>( + std::forward(args)...); + } + else if constexpr (type == FieldBoundaryConditionType::AntiSymmetric) + { + return std::make_unique< + FieldAntiSymmetricBoundaryCondition>( + std::forward(args)...); + } + else if constexpr (type == FieldBoundaryConditionType::DivergenceFreeTransverseNeumann) + { + if constexpr (IsVecField) + { + return std::make_unique>(std::forward(args)...); + } + else + { + throw std::runtime_error("Divergence-free transverse Neumann condition only " + "applies to vector fields."); + } + } + else + { + throw std::runtime_error("Unhandled FieldBoundaryConditionType"); + }; + } +}; + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY diff --git a/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp new file mode 100644 index 000000000..01f2aed28 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp @@ -0,0 +1,111 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIRICHLET_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIRICHLET_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" + +#include +#include + +namespace PHARE::core +{ +/** + * @brief Dirichlet boundary condition for scalar and vector fields. + * + * Impose a constajt value on the boundary by linearly extrapolating the (tensor) field in the ghost + * cells. + * + * @tparam ScalarOrTensorFieldT Type of the field or tensor field. + * @tparam GridLayoutT Grid layout configuration. + * + */ +template +class FieldDirichletBoundaryCondition + : public FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldDirichletBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldDirichletBoundaryCondition>; + using physical_quantity_type = Super::physical_quantity_type; + using field_type = Super::field_type; + using value_type = field_type::value_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr size_t N = Super::N; + static constexpr bool is_scalar = Super::is_scalar; + + FieldDirichletBoundaryCondition() = default; + + FieldDirichletBoundaryCondition(value_type value) + : value_{value} {}; + + FieldDirichletBoundaryCondition(std::array value) + : value_{value} {}; + + FieldDirichletBoundaryCondition(FieldDirichletBoundaryCondition const&) = default; + FieldDirichletBoundaryCondition& operator=(FieldDirichletBoundaryCondition const&) = default; + FieldDirichletBoundaryCondition(FieldDirichletBoundaryCondition&&) = default; + FieldDirichletBoundaryCondition& operator=(FieldDirichletBoundaryCondition&&) = default; + + virtual ~FieldDirichletBoundaryCondition() = default; + + + /** @brief Implement getType to return Dirichlet. */ + FieldBoundaryConditionType getType() const override + { + return FieldBoundaryConditionType::Dirichlet; + } + + + /** + * @brief Apply the Dirichlet condition using compile-time specialized parameters. + * + * @tparam direction Normal direction of the boundary. + * @tparam side Boundary side (Lower or Upper). + * @tparam Centerings Component-wise centerings. + * + */ + template + void apply_specialized(ScalarOrTensorFieldT& scalarOrTensorField, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + constexpr std::array centerings = {Centerings...}; + + auto fields = [&]() { + if constexpr (is_scalar) + return std::make_tuple(scalarOrTensorField); + else + return scalarOrTensorField.components(); + }(); + + for_N([&](auto i) { + constexpr QtyCentering centering = centerings[i]; + field_type& field = std::get(fields); + auto fieldBox = gridLayout.toFieldBox(localGhostBox, field.physicalQuantity()); + for (_index_type const& index : fieldBox) + { + _index_type mirrorIndex + = gridLayout.template boundaryMirrored( + index); + // if the ghost is on the boundary (possible if primal), set to value, + // else set with a linear extrapolation + field(index) = (mirrorIndex[i] == index[i]) ? value_[i] + : 2.0 * value_[i] - field(mirrorIndex); + } + }); + } + +private: + using _index_type = Point; + + std::array value_{0}; /**< Value to impose on the boundary, zero by default. */ + +}; // class FieldDirichletBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIRICHLET_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp new file mode 100644 index 000000000..6d384d703 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp @@ -0,0 +1,162 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIVERGENCE_FREE_TRANSVERSE_NEUMANN_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIVERGENCE_FREE_TRANSVERSE_NEUMANN_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" +#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" + +#include + +namespace PHARE::core +{ +/** + * @brief Boundary condition implementation for vector fields, that enforces the normal derivative + * of the tangential components to be zero, and sets the normal component in the ghost cells such + * that its numerical divergence is zero. + * + * First tangential components are mirrored, then the normal component is filled on ghost cells to + * have a null divergence. + * + * @warning This condition only makes sense for a vector field with same centerings than the + * magnetic vector field. + * + * @tparam VecFieldT Type of the vector field. + * @tparam GridLayoutT Grid layout configuration. + * + */ +template +class FieldDivergenceFreeTransverseNeumannBoundaryCondition + : public FieldBoundaryConditionDispatcher< + VecFieldT, GridLayoutT, + FieldDivergenceFreeTransverseNeumannBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + VecFieldT, GridLayoutT, + FieldDivergenceFreeTransverseNeumannBoundaryCondition>; + using physical_quantity_type = Super::physical_quantity_type; + using field_type = Super::field_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr size_t N = Super::N; + static_assert( + N == 3, + "Divergence-free transverse Neumann boundary condition only applies to vector fields."); + + FieldDivergenceFreeTransverseNeumannBoundaryCondition() = default; + + FieldDivergenceFreeTransverseNeumannBoundaryCondition( + FieldDivergenceFreeTransverseNeumannBoundaryCondition const&) + = default; + FieldDivergenceFreeTransverseNeumannBoundaryCondition& + operator=(FieldDivergenceFreeTransverseNeumannBoundaryCondition const&) + = default; + FieldDivergenceFreeTransverseNeumannBoundaryCondition( + FieldDivergenceFreeTransverseNeumannBoundaryCondition&&) + = default; + FieldDivergenceFreeTransverseNeumannBoundaryCondition& + operator=(FieldDivergenceFreeTransverseNeumannBoundaryCondition&&) + = default; + + virtual ~FieldDivergenceFreeTransverseNeumannBoundaryCondition() = default; + + + /** @brief Implements getType. */ + FieldBoundaryConditionType getType() const override + { + return FieldBoundaryConditionType::DivergenceFreeTransverseNeumann; + } + + + /** + * @brief Apply the condition using compile-time specialized parameters. + * + * @tparam direction Normal direction of the boundary. + * @tparam side Boundary side (Lower or Upper). + * @tparam Centerings Component-wise centerings. + * + */ + template + void apply_specialized(VecFieldT& vecField, Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + constexpr std::array centerings = {Centerings...}; + + auto fields = vecField.components(); + + // here we check the condition that the vector field has same staggering than the magnetic + // field + assert(gridLayout.centering(vecField) == gridLayout.centering(physical_quantity_type::B)); + + // handle transverse components + for_N([&](auto iTransverse) { + if constexpr (iTransverse != static_cast(direction)) + { + constexpr QtyCentering centering = centerings[iTransverse]; + field_type& tField = std::get(fields); + scalar_neumann_condition_.template apply_specialized( + tField, localGhostBox, gridLayout, time); + } + }); + + // handle normal component, by iterating on ghost cells from closest to farthest to the + // physical domain. + constexpr size_t iNormal = static_cast(direction); + field_type& nField = std::get(fields); + + // define a lambda for iterating in different orders + auto apply_loop_for_normal_component = [&](auto begin, auto end) { + for (auto it = begin; it != end; ++it) + { + _index const& index = *it; + + // compute the "transverse divergence" in the cell + double transverseDiv = 0.0; + for_N([&](auto iTransverse) { + if constexpr (iTransverse != iNormal) + { + field_type& tField = std::get(fields); + _index const& upper_index = index.template neighbor(); + transverseDiv += tField(upper_index) - tField(index); + } + }); + + // set the last unset normal component using the disrete form of 'div = 0' + if constexpr (side == Side::Upper) + { + _index const& index_to_set = index.template neighbor(); + _index const& index_already_set = index; + nField(index_to_set) = nField(index_already_set) - transverseDiv; + } + else // if constexpr (side == Side::Lower) + { + _index const& index_to_set = index; // to continue + _index const& index_already_set = index.template neighbor(); + nField(index_to_set) = nField(index_already_set) + transverseDiv; + } + } + }; + + // apply the loop in the required following which side of the box we are on + if constexpr (side == Side::Upper) + { + apply_loop_for_normal_component(localGhostBox.begin(), localGhostBox.end()); + } + else // if constexpr (side == Side::Lower) + { + apply_loop_for_normal_component(localGhostBox.rbegin(), localGhostBox.rend()); + } + } + +private: + using _scalar_neumann_boundary_condition_type + = FieldNeumannBoundaryCondition; + using _index = Point; + + _scalar_neumann_boundary_condition_type scalar_neumann_condition_; + +}; // class FieldDivergenceFreeTransverseNeumannBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIVERGENCE_FREE_TRANSVERSE_NEUMANN_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp new file mode 100644 index 000000000..f3ba31f08 --- /dev/null +++ b/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp @@ -0,0 +1,101 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NEUMANN_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NEUMANN_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" + +#include +#include + +namespace PHARE::core +{ +/** + * @brief Neumann boundary condition implementation for fields and tensor fields. + * + * This class implements a zero-gradient boundary condition by mirroring values + * from the physical domain into the ghost regions. + * + * @tparam ScalarOrTensorFieldT Type of the field or tensor field. + * @tparam GridLayoutT Grid layout configuration. + * + */ +template +class FieldNeumannBoundaryCondition + : public FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldNeumannBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldNeumannBoundaryCondition>; + using physical_quantity_type = Super::physical_quantity_type; + using field_type = Super::field_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr size_t N = Super::N; + static constexpr bool is_scalar = Super::is_scalar; + + FieldNeumannBoundaryCondition() = default; + + FieldNeumannBoundaryCondition(FieldNeumannBoundaryCondition const&) = default; + FieldNeumannBoundaryCondition& operator=(FieldNeumannBoundaryCondition const&) = default; + FieldNeumannBoundaryCondition(FieldNeumannBoundaryCondition&&) = default; + FieldNeumannBoundaryCondition& operator=(FieldNeumannBoundaryCondition&&) = default; + + virtual ~FieldNeumannBoundaryCondition() = default; + + + /** @brief Implement getType to return Neumann. */ + FieldBoundaryConditionType getType() const override + { + return FieldBoundaryConditionType::Neumann; + } + + + /** + * @brief Apply the Neumann condition using compile-time specialized parameters. + * + * @tparam direction Normal direction of the boundary. + * @tparam side Boundary side (Lower or Upper). + * @tparam Centerings Component-wise centerings. + * + */ + template + void apply_specialized(ScalarOrTensorFieldT& scalarOrTensorField, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + using Index = Point; + + constexpr std::array centerings = {Centerings...}; + + // no other way than using a lambda builder + auto fields = [&]() { + if constexpr (is_scalar) + return std::make_tuple(scalarOrTensorField); + else + return scalarOrTensorField.components(); + }(); + + for_N([&](auto i) { + constexpr QtyCentering centering = centerings[i]; + field_type& field = std::get(fields); + auto fieldBox = gridLayout.toFieldBox(localGhostBox, field.physicalQuantity()); + Index physicalLimitIndex = (side == Side::Lower) + ? gridLayout.physicalStartIndex(centering) + : gridLayout.physicalEndIndex(centering); + for (Index const& index : fieldBox) + { + Index mirrorIndex + = gridLayout.template boundaryMirrored( + index); + field(index) = field(mirrorIndex); + } + }); + } +}; // class FieldNeumannBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NEUMANN_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp new file mode 100644 index 000000000..bd79423ff --- /dev/null +++ b/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp @@ -0,0 +1,109 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_SYMMETRIC_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_SYMMETRIC_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" +#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" + +namespace PHARE::core +{ +/** + * @brief Symmetric boundary condition for scalar and vector fields. + * + * For scalars, this class imposes a null derivative along the normal (equivalent to a Neumann + * boudary condition). For vectors it imposes a Neumann bonditions on the tangential components, and + * a null value for the normal component. + * + * @tparam ScalarOrTensorFieldT Type of the field or tensor field. + * @tparam GridLayoutT Grid layout configuration. + * + */ +template +class FieldSymmetricBoundaryCondition + : public FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldSymmetricBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldSymmetricBoundaryCondition>; + using physical_quantity_type = Super::physical_quantity_type; + using field_type = Super::field_type; + + static constexpr size_t dimension = Super::dimension; + static constexpr size_t N = Super::N; + static constexpr bool is_scalar = Super::is_scalar; + + FieldSymmetricBoundaryCondition() = default; + + FieldSymmetricBoundaryCondition(FieldSymmetricBoundaryCondition const&) = default; + FieldSymmetricBoundaryCondition& operator=(FieldSymmetricBoundaryCondition const&) = default; + FieldSymmetricBoundaryCondition(FieldSymmetricBoundaryCondition&&) = default; + FieldSymmetricBoundaryCondition& operator=(FieldSymmetricBoundaryCondition&&) = default; + + virtual ~FieldSymmetricBoundaryCondition() = default; + + + /** @brief Implement getType to return Symmetric. */ + FieldBoundaryConditionType getType() const override + { + return FieldBoundaryConditionType::Symmetric; + } + + + /** + * @brief Apply the symmetric condition using compile-time specialized parameters. + * + * @tparam direction Normal direction of the boundary. + * @tparam side Boundary side (Lower or Upper). + * @tparam Centerings Component-wise centerings. + * + */ + template + void apply_specialized(ScalarOrTensorFieldT& scalarOrTensorField, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + constexpr std::array centerings = {Centerings...}; + + // no other way than using a lambda builder + auto fields = [&]() { + if constexpr (is_scalar) + return std::make_tuple(scalarOrTensorField); + else + return scalarOrTensorField.components(); + }(); + + for_N([&](auto i) { + constexpr QtyCentering centering = centerings[i]; + field_type& field = std::get(fields); + if constexpr ((i != static_cast(direction)) || is_scalar) + // if the component is tangent to the boundary, or if we are handling a scalar + { + scalar_neumann_condition_.template apply_specialized( + field, localGhostBox, gridLayout, time); + } + else + // if the component is normal to the boundary + { + scalar_dirichlet_condition_.template apply_specialized( + field, localGhostBox, gridLayout, time); + } + }); + } + +private: + using _scalar_neumann_condition_type = FieldNeumannBoundaryCondition; + using _scalar_dirichlet_condition_type + = FieldDirichletBoundaryCondition; + + _scalar_neumann_condition_type scalar_neumann_condition_{}; + _scalar_dirichlet_condition_type scalar_dirichlet_condition_{}; + +}; // class FieldSymmetricBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_SYMMETRIC_BOUNDARY_CONDITION_HPP diff --git a/src/core/numerics/boundary_condition/boundary_condition.cpp b/src/core/numerics/boundary_condition/no_boundary.hpp; similarity index 100% rename from src/core/numerics/boundary_condition/boundary_condition.cpp rename to src/core/numerics/boundary_condition/no_boundary.hpp; diff --git a/src/core/utilities/box/box.hpp b/src/core/utilities/box/box.hpp index 57bdd9ecf..ab8f212e8 100644 --- a/src/core/utilities/box/box.hpp +++ b/src/core/utilities/box/box.hpp @@ -18,6 +18,8 @@ namespace PHARE::core template class box_iterator; +template +class box_reverse_iterator; /** Represents a 1D, 2D or 3D box of integer or floating point @@ -28,6 +30,7 @@ struct Box { using value_type = Type; using iterator = box_iterator; + using reverse_iterator = box_reverse_iterator; static constexpr auto dimension = dim; @@ -140,6 +143,46 @@ struct Box } } + NO_DISCARD auto rbegin() { return reverse_iterator{this, upper}; } + NO_DISCARD auto rbegin() const { return reverse_iterator{this, upper}; } + + NO_DISCARD auto rend() + { + static_assert(dim <= 3 and dim > 0); + // The "end" of a reverse iterator is one step before the lower bound + if constexpr (dim == 1) + { + return reverse_iterator{this, {lower[0] - 1}}; + } + else if constexpr (dim == 2) + { + return reverse_iterator{this, {lower[0] - 1, lower[1] - 1}}; + } + else + { + return reverse_iterator{this, {lower[0] - 1, lower[1] - 1, lower[2] - 1}}; + } + } + + + NO_DISCARD auto rend() const + { + static_assert(dim <= 3 and dim > 0); + // The "end" of a reverse iterator is one step before the lower bound + if constexpr (dim == 1) + { + return reverse_iterator{this, {lower[0] - 1}}; + } + else if constexpr (dim == 2) + { + return reverse_iterator{this, {lower[0] - 1, lower[1] - 1}}; + } + else + { + return reverse_iterator{this, {lower[0] - 1, lower[1] - 1, lower[2] - 1}}; + } + } + NO_DISCARD constexpr static std::size_t nbrRemainBoxes() { @@ -202,6 +245,53 @@ class box_iterator }; +template +class box_reverse_iterator +{ +public: + box_reverse_iterator(Box const* box, Point index = Point{}) + : box_{box} + , index_{index} + { + } + + auto& operator*() const { return index_; } + auto operator->() const { return &index_; } + + // Reverse increment logic: starts at upper, moves toward lower + void decrement(std::size_t idim) + { + index_[idim]--; + if (idim == 0) + return; + + // If we go below the lower bound of the current dimension + if (index_[idim] < box_->lower[idim]) + { + decrement(idim - 1); + // If the parent dimension is still valid, reset current to upper + if (index_[idim - 1] >= box_->lower[idim - 1]) + index_[idim] = box_->upper[idim]; + } + } + + box_reverse_iterator& operator++() + { + decrement(dim - 1); + return *this; + } + + bool operator!=(box_reverse_iterator const& other) const + { + return box_ != other.box_ or index_ != other.index_; + } + +private: + Box const* box_; + Point index_; +}; + + template Box(Point lower, Point upper) -> Box; diff --git a/src/core/utilities/meta/meta_utilities.hpp b/src/core/utilities/meta/meta_utilities.hpp index 293bf64fc..19451c3e8 100644 --- a/src/core/utilities/meta/meta_utilities.hpp +++ b/src/core/utilities/meta/meta_utilities.hpp @@ -4,6 +4,8 @@ #include "core/utilities/types.hpp" +#include +#include #include @@ -175,6 +177,47 @@ namespace core return p; } + /** + * @brief A type-wrapper for compile-time constant values. + * This alias allows runtime enum values to be "lifted" into the type system, + * enabling their use as template arguments for kernel specialization. + * @tparam x The enum or integral value to be wrapped. + */ + template + using Tag = std::integral_constant; + + + /** + * @brief Maps a runtime enum value to a type-safe variant of Tags. + * This function effectively "lifts" a value into the type system. + * It uses a fold expression to iterate over the provided @p Values. + * @tparam Values The full range of enum values (e.g. {X, Y, Z}). + * @param val The runtime enum variable to be promoted. + */ + template + auto promote(auto val) + { + // Ensure the pack is not empty to avoid compilation errors + static_assert(sizeof...(Values) > 0, "promote requires at least one enum value"); + + // Ensure all values belong to same enum + using T = std::common_type_t; + static_assert((std::is_same_v && ...), + "All promoted values must belong to the same Enum type"); + + // The variant type is composed of the Tag types for each value + std::variant...> result; + + // Fold expression to find the match at runtime + bool found = ((val == Values ? (result = Tag{}, true) : false) || ...); + + if (!found) + throw std::runtime_error("Unknown enum value during promotion"); + + return result; + } + + } // namespace core } // namespace PHARE diff --git a/src/core/utilities/point/point.hpp b/src/core/utilities/point/point.hpp index d24fcaf89..5701f7b97 100644 --- a/src/core/utilities/point/point.hpp +++ b/src/core/utilities/point/point.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include "core/utilities/meta/meta_utilities.hpp" #include "core/def.hpp" @@ -259,6 +260,19 @@ namespace core // else no return cause not yet handled } + + template + NO_DISCARD constexpr auto neighbor() const + { + constexpr size_t d = static_cast(direction); + static_assert(std::is_integral_v, + "'offset' template parameter must have an integral type."); + + Point result = *this; + result[d] += static_cast(offset); + return result; + } + private: std::array r{}; }; diff --git a/tests/amr/data/field/field_data/CMakeLists.txt b/tests/amr/data/field/field_data/CMakeLists.txt new file mode 100644 index 000000000..d06bda973 --- /dev/null +++ b/tests/amr/data/field/field_data/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required (VERSION 3.20.1) +project(test-field-data) + +set(SOURCES_INC + ) + +set(SOURCES_CPP + test_field_data.cpp + ) + +add_executable(${PROJECT_NAME} ${SOURCES_INC} ${SOURCES_CPP}) + +target_include_directories(${PROJECT_NAME} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + $ + ) + +target_link_libraries(${PROJECT_NAME} PRIVATE + phare_amr + ${GTEST_LIBS}) + + +target_include_directories(${PROJECT_NAME} PRIVATE + $) + +target_link_libraries(${PROJECT_NAME} PRIVATE ${SAMRAI_LIBRARIES}) + +add_phare_test(${PROJECT_NAME} ${CMAKE_CURRENT_BINARY_DIR}) + + diff --git a/tests/amr/data/field/field_data/test_field_data.cpp b/tests/amr/data/field/field_data/test_field_data.cpp new file mode 100644 index 000000000..4c32d7f84 --- /dev/null +++ b/tests/amr/data/field/field_data/test_field_data.cpp @@ -0,0 +1,14 @@ +#include "amr/data/field/field_data.hpp" +#include "amr/data/field/field_data_traits.hpp" + +#include "simulator/phare_types.hpp" + +namespace PHARE::amr +{ +constexpr SimOpts opts; +using Types = PHARE_Types::core_types; +using Grid = Types::Grid_t; +using GridLayout = Types::GridLayout_t; + +static_assert(IsFieldData>); +} // namespace PHARE::amr diff --git a/tests/amr/data/tensorfield/tensor_field_data/CMakeLists.txt b/tests/amr/data/tensorfield/tensor_field_data/CMakeLists.txt new file mode 100644 index 000000000..aca11b522 --- /dev/null +++ b/tests/amr/data/tensorfield/tensor_field_data/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required (VERSION 3.20.1) +project(test-tensor-field-data) + +set(SOURCES_INC + ) + +set(SOURCES_CPP + test_tensor_field_data.cpp + ) + +add_executable(${PROJECT_NAME} ${SOURCES_INC} ${SOURCES_CPP}) + +target_include_directories(${PROJECT_NAME} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + $ + ) + +target_link_libraries(${PROJECT_NAME} PRIVATE + phare_amr + ${GTEST_LIBS}) + + +target_include_directories(${PROJECT_NAME} PRIVATE + $) + +target_link_libraries(${PROJECT_NAME} PRIVATE ${SAMRAI_LIBRARIES}) + +add_phare_test(${PROJECT_NAME} ${CMAKE_CURRENT_BINARY_DIR}) + + diff --git a/tests/amr/data/tensorfield/tensor_field_data/test_tensor_field_data.cpp b/tests/amr/data/tensorfield/tensor_field_data/test_tensor_field_data.cpp new file mode 100644 index 000000000..18d28bbef --- /dev/null +++ b/tests/amr/data/tensorfield/tensor_field_data/test_tensor_field_data.cpp @@ -0,0 +1,26 @@ +#include "amr/data/tensorfield/tensor_field_data.hpp" +#include "amr/data/tensorfield/tensor_field_data_traits.hpp" +#include "core/mhd/mhd_quantities.hpp" + +#include "simulator/phare_types.hpp" + +#include "gtest/gtest.h" + +namespace PHARE::amr +{ +constexpr SimOpts opts; +constexpr std::size_t rank = 1; +using Types = PHARE_Types::core_types; +using Grid = Types::Grid_t; +using GridLayout = Types::GridLayout_t; +using PhysicalQuantity = MHDQuantity; + +static_assert(IsTensorFieldData>); +} // namespace PHARE::amr + +int main(int argc, char** argv) +{ + ::testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/tests/core/data/field/CMakeLists.txt b/tests/core/data/field/CMakeLists.txt new file mode 100644 index 000000000..3b2b44f99 --- /dev/null +++ b/tests/core/data/field/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required (VERSION 3.20.1) +project(test-field) + +set(SOURCES_INC + ) + +set(SOURCES_CPP + test_field.cpp + ) + +add_executable(${PROJECT_NAME} ${SOURCES_INC} ${SOURCES_CPP}) + +target_include_directories(${PROJECT_NAME} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + $ + ) + +target_link_libraries(${PROJECT_NAME} PRIVATE + phare_amr + ${GTEST_LIBS}) + + +target_include_directories(${PROJECT_NAME} PRIVATE + $) + +target_link_libraries(${PROJECT_NAME} PRIVATE ${SAMRAI_LIBRARIES}) + +add_phare_test(${PROJECT_NAME} ${CMAKE_CURRENT_BINARY_DIR}) + + diff --git a/tests/core/data/field/test_field.cpp b/tests/core/data/field/test_field.cpp new file mode 100644 index 000000000..5e1cc89e8 --- /dev/null +++ b/tests/core/data/field/test_field.cpp @@ -0,0 +1,8 @@ +#include "core/data/field/field.hpp" +#include "core/data/field/field_traits.hpp" +#include "core/mhd/mhd_quantities.hpp" + +namespace PHARE::core +{ +static_assert(IsField>); +} diff --git a/tests/core/data/gridlayout/CMakeLists.txt b/tests/core/data/gridlayout/CMakeLists.txt index 92bf68d80..ee7ea1eab 100644 --- a/tests/core/data/gridlayout/CMakeLists.txt +++ b/tests/core/data/gridlayout/CMakeLists.txt @@ -24,6 +24,7 @@ set(SOURCES_CPP gridlayout_laplacian.cpp gridlayout_field_centered_coord.cpp gridlayout_indexing.cpp + gridlayout_traits.cpp test_linear_combinaisons_yee.cpp test_nextprev.cpp test_main.cpp diff --git a/tests/core/data/gridlayout/gridlayout_traits.cpp b/tests/core/data/gridlayout/gridlayout_traits.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/tests/core/data/ion_population/test_ion_population_fixtures.hpp b/tests/core/data/ion_population/test_ion_population_fixtures.hpp index 8330ae735..7314c9b5d 100644 --- a/tests/core/data/ion_population/test_ion_population_fixtures.hpp +++ b/tests/core/data/ion_population/test_ion_population_fixtures.hpp @@ -23,7 +23,7 @@ struct UsableIonsDefaultTypes public: auto static constexpr dim = ParticleArray_::dimension; auto static constexpr interp = interp_; - SimOpts<> static constexpr opts{dim, interp_}; + SimOpts static constexpr opts{dim, interp_}; using PHARE_Types = PHARE::core::PHARE_Types; using ParticleArray_t = ParticleArray_; diff --git a/tests/core/data/tensorfield/CMakeLists.txt b/tests/core/data/tensorfield/CMakeLists.txt new file mode 100644 index 000000000..7e30ae8f8 --- /dev/null +++ b/tests/core/data/tensorfield/CMakeLists.txt @@ -0,0 +1,31 @@ +cmake_minimum_required (VERSION 3.20.1) +project(test-tensor-field) + +set(SOURCES_INC + ) + +set(SOURCES_CPP + test_tensorfield.cpp + ) + +add_executable(${PROJECT_NAME} ${SOURCES_INC} ${SOURCES_CPP}) + +target_include_directories(${PROJECT_NAME} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + $ + ) + +target_link_libraries(${PROJECT_NAME} PRIVATE + phare_amr + ${GTEST_LIBS}) + + +target_include_directories(${PROJECT_NAME} PRIVATE + $) + +target_link_libraries(${PROJECT_NAME} PRIVATE ${SAMRAI_LIBRARIES}) + +add_phare_test(${PROJECT_NAME} ${CMAKE_CURRENT_BINARY_DIR}) + + diff --git a/tests/core/data/tensorfield/test_tensorfield.cpp b/tests/core/data/tensorfield/test_tensorfield.cpp new file mode 100644 index 000000000..3c047907f --- /dev/null +++ b/tests/core/data/tensorfield/test_tensorfield.cpp @@ -0,0 +1,27 @@ +#include "core/data/tensorfield/tensorfield.hpp" +#include "core/data/tensorfield/tensorfield_traits.hpp" +#include "core/mhd/mhd_quantities.hpp" + +#include "simulator/phare_types.hpp" + +#include "gtest/gtest.h" + +namespace PHARE::amr +{ +constexpr SimOpts opts; +constexpr std::size_t rank = 1; +using Types = PHARE_Types::core_types; +using Grid = Types::Grid_t; +using Field = Grid::field_type; +using GridLayout = Types::GridLayout_t; +using PhysicalQuantity = MHDQuantity; + +static_assert(IsTensorField>); +} // namespace PHARE::amr + +int main(int argc, char** argv) +{ + ::testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/tests/core/utilities/box/test_box.cpp b/tests/core/utilities/box/test_box.cpp index 1e2fac759..fafd8e44e 100644 --- a/tests/core/utilities/box/test_box.cpp +++ b/tests/core/utilities/box/test_box.cpp @@ -5,7 +5,6 @@ #include "core/utilities/box/box.hpp" #include "core/utilities/point/point.hpp" -#include "gmock/gmock.h" #include "gtest/gtest.h" using namespace PHARE::core; @@ -189,7 +188,6 @@ TEST(BoxIterator, hasEnd) EXPECT_EQ(expected3, *actual3); } - TEST(BoxIterator, iterates) { Box b1{{1}, {10}}; @@ -257,7 +255,120 @@ TEST(BoxIterator, iterates) } +TEST(BoxReverseIterator, hasBegin) +{ + Box b1{{1}, {10}}; + Box const cb1{{1}, {10}}; + Box b2{{1, 4}, {10, 12}}; + Box b3{{1, 4, 9}, {10, 12, 24}}; + + auto expected = Point{10}; + auto actual = std::rbegin(b1); + EXPECT_EQ(expected, *actual); + + auto const cexpected = Point{10}; + auto cactual = std::rbegin(cb1); + EXPECT_EQ(cexpected, *cactual); + + auto expected2 = Point{10, 12}; + auto actual2 = std::rbegin(b2); + EXPECT_EQ(expected2, *actual2); + + auto expected3 = Point{10, 12, 24}; + auto actual3 = std::rbegin(b3); + EXPECT_EQ(expected3, *actual3); +} + + +TEST(BoxReverseIterator, hasEnd) +{ + Box b1{{1}, {10}}; + Box const cb1{{1}, {10}}; + Box b2{{1, 4}, {10, 12}}; + Box b3{{1, 4, 9}, {10, 12, 24}}; + + auto expected = Point{0}; + auto actual = std::rend(b1); + EXPECT_EQ(expected, *actual); + + auto const cexpected = Point{0}; + auto cactual = std::rend(cb1); + EXPECT_EQ(cexpected, *cactual); + auto expected2 = Point{0, 3}; + auto actual2 = std::rend(b2); + EXPECT_EQ(expected2, *actual2); + + auto expected3 = Point{0, 3, 8}; + auto actual3 = std::rend(b3); + EXPECT_EQ(expected3, *actual3); +} + +TEST(BoxReverseIterator, iterates) +{ + Box b1{{1}, {10}}; + Box const cb1{{1}, {10}}; + Box b2{{1, 4}, {10, 12}}; + Box b3{{1, 4, 9}, {10, 12, 24}}; + + { + auto expected = Point{9}; + auto actual = std::rbegin(b1); + EXPECT_EQ(expected, *(++actual)); + } + { + auto const cexpected = Point{9}; + auto cactual = std::rbegin(cb1); + EXPECT_EQ(cexpected, *(++cactual)); + } + { + auto expected2 = Point{10, 11}; + auto actual2 = std::rbegin(b2); + EXPECT_EQ(expected2, *(++actual2)); + } + { + auto expected3 = Point{10, 12, 23}; + auto actual3 = std::rbegin(b3); + EXPECT_EQ(expected3, *(++actual3)); + } + { + Box small{{2, 1}, {3, 2}}; + auto it = std::rbegin(small); + ++it; + auto expected = Point{3, 1}; + EXPECT_EQ(expected, *it); + ++it; + expected = Point{2, 2}; + EXPECT_EQ(expected, *it); + } + { + auto dummy1 = Point{}; + for (auto it = b1.rbegin(); it != b1.rend(); ++it) + { + dummy1 = *it; + } + auto expected1 = Point{1}; + EXPECT_EQ(expected1, dummy1); + } + { + auto dummy = Point{}; + for (auto it = b2.rbegin(); it != b2.rend(); ++it) + { + dummy = *it; + } + auto expected = Point{1, 4}; + EXPECT_EQ(expected, dummy); + } + { + auto dummy3 = Point{}; + for (auto it = b3.rbegin(); it != b3.rend(); ++it) + { + dummy3 = *it; + } + auto expected = Point{1, 4, 9}; + EXPECT_EQ(expected, dummy3); + } +} int main(int argc, char** argv) { diff --git a/tests/core/utilities/point/test_point.cpp b/tests/core/utilities/point/test_point.cpp index 34ebcd525..fd0422edd 100644 --- a/tests/core/utilities/point/test_point.cpp +++ b/tests/core/utilities/point/test_point.cpp @@ -5,7 +5,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" -using namespace PHARE; +using namespace PHARE::core; TEST(Point, canBeBuiltByTemplateDeduction) @@ -41,7 +41,16 @@ TEST(Point, canBeSummedWithARvaluePoint) EXPECT_EQ(3, p1[0]); } - +TEST(Point, canComputeItsNeighbors) +{ + Point p2 = {1, 2}; + Point actual2 = p2.neighbor<0, 1>(); + Point expected2 = Point{2, 2}; + EXPECT_EQ(actual2, expected2); + actual2 = p2.neighbor<1, 1>(); + expected2 = Point{1, 3}; + EXPECT_EQ(actual2, expected2); +} int main(int argc, char** argv) From fab001f558684052c017dfbcd3b501984237ae26 Mon Sep 17 00:00:00 2001 From: Ivan Girault Date: Fri, 20 Feb 2026 15:43:37 +0100 Subject: [PATCH 4/7] fix parsing of boundary conditions; correct box_reverse_iterator --- pyphare/pyphare/pharein/initialize/general.py | 5 +- pyphare/pyphare/pharein/simulation.py | 58 ++++++++++---- src/core/boundary/boundary_defs.hpp | 7 +- src/core/boundary/boundary_factory.hpp | 2 + src/core/utilities/box/box.hpp | 79 +++++++++---------- tests/core/utilities/box/test_box.cpp | 36 ++++++++- 6 files changed, 120 insertions(+), 67 deletions(-) diff --git a/pyphare/pyphare/pharein/initialize/general.py b/pyphare/pyphare/pharein/initialize/general.py index 98417356a..68b733ef8 100644 --- a/pyphare/pyphare/pharein/initialize/general.py +++ b/pyphare/pyphare/pharein/initialize/general.py @@ -109,8 +109,9 @@ def populateDict(sim): sides = "lower", "upper" for direction in directions[:sim.ndim]: for side in sides: - add_string(f"simulation/grid/boundary_conditions/{direction}{ - side}/type", sim.boundary_conditions[f"{direction}{side}"]["type"]) + location = f"{direction}{side}" + add_string(f"simulation/grid/boundary_conditions/{location}/type", + sim.boundary_conditions[f"{location}"]["type"]) add_int("simulation/interp_order", sim.interp_order) add_int("simulation/refined_particle_nbr", sim.refined_particle_nbr) diff --git a/pyphare/pyphare/pharein/simulation.py b/pyphare/pyphare/pharein/simulation.py index 546b5350d..39fae2052 100644 --- a/pyphare/pyphare/pharein/simulation.py +++ b/pyphare/pyphare/pharein/simulation.py @@ -270,21 +270,51 @@ def check_boundaries(ndim, **kwargs): # ------------------------------------------------------------------------------ def check_boundary_conditions(ndim, **kwargs): - valid_bc_types = ("open", "reflective") - directions = "x", "y", "z" + valid_bc_types = ("open", "reflective", "none") + all_directions = ["x", "y", "z"][:ndim] sides = "lower", "upper" - valid_boundary_names = [f"{directions[i]}{side}" for side in sides for i in range(ndim)] - default_boundary_conditions = {name: {"type": "open"} - for name in valid_boundary_names} - boundary_conditions = kwargs.get( - "boundary_conditions", default_boundary_conditions) - for name, condition in boundary_conditions.items(): - if not name in valid_boundary_names: - raise ValueError(f"Wrong boundary name {name}: should belong to {valid_boundary_names}") - condition_type = condition["type"] - if not condition_type in valid_bc_types: - raise ValueError( - f"Non-existing boundary condition type {condition_type}.") + boundary_types = kwargs["boundary_types"] + physical_directions = [] + periodic_directions = [] + for dir, type in zip(all_directions, boundary_types): + if type == "physical": + physical_directions.append(dir) + elif type == "periodic": + periodic_directions.append(dir) + physical_boundary_locations = [f"{dir}{side}" for dir in physical_directions for side in sides] + all_boundary_locations = [f"{dir}{side}" for side in sides for dir in all_directions] + default_boundary_conditions = {location: {"type": "none"} for location in all_boundary_locations} + boundary_conditions = kwargs.get("boundary_conditions", {}) + + if not isinstance(boundary_conditions, dict): + raise TypeError(f"A dict should be passed to argument 'boundary_conditions'") + + # check first that all provided locations are valid + for location in boundary_conditions: + if not location in all_boundary_locations: + raise ValueError(f"Wrong boundary name {location}: should belong to {all_boundary_locations}") + + # attribute a default 'none' type to all unspecified boundaries + for location in all_boundary_locations: + if location not in boundary_conditions: + boundary_conditions[location] = {'type': 'none'} + + # check that all boundaries have a dict, which contain a 'type' key in their dict associated to a valid value + for location in all_boundary_locations: + boundary_condition = boundary_conditions[location] + if not isinstance(boundary_condition, dict): + raise TypeError(f"A dict should be passed to the boundary {location} for specifying a boundary condition") + if 'type' not in boundary_condition: + raise KeyError(f"No key 'type' found in the boundary_condition dict passed to {location}") + boundary_type = boundary_condition['type'] + if boundary_type not in valid_bc_types: + raise ValueError(f"Boundary type {boundary_type} is not valid: it should belong to {valid_bc_types}") + + # now check that all physical boundary have a boundary type other than 'none' + for location in physical_boundary_locations: + if boundary_conditions[location]['type'] == 'none': + raise KeyError(f"{location} is a physical boundary and should be provided with a valid type other than 'none'.") + return boundary_conditions diff --git a/src/core/boundary/boundary_defs.hpp b/src/core/boundary/boundary_defs.hpp index ed45e4277..389ef9a2a 100644 --- a/src/core/boundary/boundary_defs.hpp +++ b/src/core/boundary/boundary_defs.hpp @@ -10,7 +10,7 @@ namespace PHARE::core /** * @brief Physical behavior of a boundary. */ -enum class BoundaryType { Reflective, Inflow, Outflow, Open }; +enum class BoundaryType { None, Reflective, Inflow, Outflow, Open }; /* * @brief Possible codimension of a boundary. @@ -112,9 +112,8 @@ enum class Codim3BoundaryLocation { inline BoundaryType getBoundaryTypeFromString(std::string const& name) { static std::unordered_map const typeMap_ = { - {"open", BoundaryType::Open}, - {"inflow", BoundaryType::Inflow}, - {"reflective", BoundaryType::Reflective}, + {"none", BoundaryType::None}, {"open", BoundaryType::Open}, + {"inflow", BoundaryType::Inflow}, {"reflective", BoundaryType::Reflective}, {"outflow", BoundaryType::Outflow}, }; diff --git a/src/core/boundary/boundary_factory.hpp b/src/core/boundary/boundary_factory.hpp index 675d2fd1c..055b9f3cf 100644 --- a/src/core/boundary/boundary_factory.hpp +++ b/src/core/boundary/boundary_factory.hpp @@ -66,6 +66,8 @@ class BoundaryFactory // register the right boundary condition per physical quantity following the boundary type switch (type) { + case BoundaryType::None: + // do nothing case BoundaryType::Reflective: register_reflective_conditions_(boundary, data, quantities); break; diff --git a/src/core/utilities/box/box.hpp b/src/core/utilities/box/box.hpp index ab8f212e8..c12bd5a87 100644 --- a/src/core/utilities/box/box.hpp +++ b/src/core/utilities/box/box.hpp @@ -148,42 +148,20 @@ struct Box NO_DISCARD auto rend() { - static_assert(dim <= 3 and dim > 0); - // The "end" of a reverse iterator is one step before the lower bound - if constexpr (dim == 1) - { - return reverse_iterator{this, {lower[0] - 1}}; - } - else if constexpr (dim == 2) - { - return reverse_iterator{this, {lower[0] - 1, lower[1] - 1}}; - } - else - { - return reverse_iterator{this, {lower[0] - 1, lower[1] - 1, lower[2] - 1}}; - } + // Point precisely to the index that index_[0]-- creates after passing 'lower' + auto end_idx = lower; + end_idx[0]--; + return reverse_iterator{this, end_idx}; } - NO_DISCARD auto rend() const { - static_assert(dim <= 3 and dim > 0); - // The "end" of a reverse iterator is one step before the lower bound - if constexpr (dim == 1) - { - return reverse_iterator{this, {lower[0] - 1}}; - } - else if constexpr (dim == 2) - { - return reverse_iterator{this, {lower[0] - 1, lower[1] - 1}}; - } - else - { - return reverse_iterator{this, {lower[0] - 1, lower[1] - 1, lower[2] - 1}}; - } + // Point precisely to the index that index_[0]-- creates after passing 'lower' + auto end_idx = lower; + end_idx[0]--; + return reverse_iterator{this, end_idx}; } - NO_DISCARD constexpr static std::size_t nbrRemainBoxes() { if constexpr (dim == 1) @@ -255,26 +233,42 @@ class box_reverse_iterator { } - auto& operator*() const { return index_; } - auto operator->() const { return &index_; } + // Return by reference is okay here because we are NOT using std::reverse_iterator's temporary + // shift + auto const& operator*() const { return index_; } + auto const* operator->() const { return &index_; } - // Reverse increment logic: starts at upper, moves toward lower void decrement(std::size_t idim) { - index_[idim]--; - if (idim == 0) - return; - - // If we go below the lower bound of the current dimension - if (index_[idim] < box_->lower[idim]) + // If we are at the lower bound for this specific dimension + if (index_[idim] == box_->lower[idim]) { + if (idim == 0) + { + // We have finished the entire box. + // Underflow X so it matches the rend() sentinel. + index_[0]--; + return; + } + + // Try to decrement the parent dimension decrement(idim - 1); - // If the parent dimension is still valid, reset current to upper - if (index_[idim - 1] >= box_->lower[idim - 1]) + + // ONLY reset this dimension to 'upper' if the parent + // didn't just underflow past its own lower bound. + using signed_t = std::make_signed_t; + if (static_cast(index_[0]) >= static_cast(box_->lower[0])) + { index_[idim] = box_->upper[idim]; + } + // If parent IS invalid, we leave index_[idim] at lower[idim] + // to keep the point consistent for the rend() comparison. + } + else + { + index_[idim]--; } } - box_reverse_iterator& operator++() { decrement(dim - 1); @@ -291,7 +285,6 @@ class box_reverse_iterator Point index_; }; - template Box(Point lower, Point upper) -> Box; diff --git a/tests/core/utilities/box/test_box.cpp b/tests/core/utilities/box/test_box.cpp index fafd8e44e..3e1a11313 100644 --- a/tests/core/utilities/box/test_box.cpp +++ b/tests/core/utilities/box/test_box.cpp @@ -1,6 +1,7 @@ #include #include +#include #include "core/utilities/box/box.hpp" #include "core/utilities/point/point.hpp" @@ -295,11 +296,11 @@ TEST(BoxReverseIterator, hasEnd) auto cactual = std::rend(cb1); EXPECT_EQ(cexpected, *cactual); - auto expected2 = Point{0, 3}; + auto expected2 = Point{0, 4}; auto actual2 = std::rend(b2); EXPECT_EQ(expected2, *actual2); - auto expected3 = Point{0, 3, 8}; + auto expected3 = Point{0, 4, 9}; auto actual3 = std::rend(b3); EXPECT_EQ(expected3, *actual3); } @@ -308,7 +309,7 @@ TEST(BoxReverseIterator, iterates) { Box b1{{1}, {10}}; Box const cb1{{1}, {10}}; - Box b2{{1, 4}, {10, 12}}; + Box b2{{0, 0}, {10, 12}}; Box b3{{1, 4, 9}, {10, 12, 24}}; { @@ -356,7 +357,7 @@ TEST(BoxReverseIterator, iterates) { dummy = *it; } - auto expected = Point{1, 4}; + auto expected = Point{0, 0}; EXPECT_EQ(expected, dummy); } { @@ -370,6 +371,33 @@ TEST(BoxReverseIterator, iterates) } } +TEST(BoxReverseIterator, handlesUnsignedInteger) +{ + Box b2{{0, 0}, {10, 12}}; + Box b3{{0, 0, 0}, {12, 11, 34}}; + + { + auto dummy = Point{}; + for (auto it = b2.rbegin(); it != b2.rend(); ++it) + { + dummy = *it; + } + auto expected = Point{0, 0}; + EXPECT_EQ(expected, dummy); + } + + { + auto dummy = Point{}; + for (auto it = b3.rbegin(); it != b3.rend(); ++it) + { + dummy = *it; + } + auto expected = Point{0, 0, 0}; + EXPECT_EQ(expected, dummy); + } +} + + int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); From db16aa1842a2078b3881c0747f50ec305b1bb193 Mon Sep 17 00:00:00 2001 From: Ivan Girault Date: Tue, 24 Feb 2026 11:19:07 +0100 Subject: [PATCH 5/7] fix rebase errors --- pyphare/pyphare/pharein/diagnostics.py | 2 +- pyphare/pyphare/pharein/restarts.py | 4 ++-- pyphare/pyphare/pharesee/run/utils.py | 9 --------- tests/amr/models/test_models.cpp | 4 ++++ tests/functional/mhd_convergence/convergence.py | 1 + tests/simulator/test_diagnostics.py | 4 ++-- tests/simulator/test_initialization.py | 8 ++++---- 7 files changed, 14 insertions(+), 18 deletions(-) diff --git a/pyphare/pyphare/pharein/diagnostics.py b/pyphare/pyphare/pharein/diagnostics.py index 4d2a08bd1..90664b7e3 100644 --- a/pyphare/pyphare/pharein/diagnostics.py +++ b/pyphare/pyphare/pharein/diagnostics.py @@ -277,7 +277,7 @@ def to_dict(self): def population_in_model(population): - return population in [p for p in global_vars.sim.maxwellian_fluid_model.populations] + return population in [p for p in global_vars.sim.model.populations] class FluidDiagnostics_(Diagnostics): diff --git a/pyphare/pyphare/pharein/restarts.py b/pyphare/pyphare/pharein/restarts.py index 1c5a1006c..aa6076031 100644 --- a/pyphare/pyphare/pharein/restarts.py +++ b/pyphare/pyphare/pharein/restarts.py @@ -59,7 +59,7 @@ def restart_time(restart_options): if restart_options["restart_time"] == "auto": return find_latest_time_from_restarts(restart_options) return restart_options["restart_time"] - return 0 + return None def find_latest_time_from_restarts(restart_options): @@ -73,7 +73,7 @@ def find_latest_time_from_restarts(restart_options): except ValueError: ... # skipped - return 0 if len(dirs) == 0 else sorted(dirs)[-1] + return None if len(dirs) == 0 else sorted(dirs)[-1] # ------------------------------------------------------------------------------ diff --git a/pyphare/pyphare/pharesee/run/utils.py b/pyphare/pyphare/pharesee/run/utils.py index 6e0a79086..618669f4e 100644 --- a/pyphare/pyphare/pharesee/run/utils.py +++ b/pyphare/pyphare/pharesee/run/utils.py @@ -6,15 +6,6 @@ from pyphare.core.gridlayout import yee_centering -from pyphare.core.gridlayout import yee_centering - - -from pyphare.core.gridlayout import yee_centering - - -from pyphare.core.gridlayout import yee_centering - - def _current1d(by, bz, xby, xbz): # jx = 0 # jy = -dxBz diff --git a/tests/amr/models/test_models.cpp b/tests/amr/models/test_models.cpp index 0468d6044..e87cf9692 100644 --- a/tests/amr/models/test_models.cpp +++ b/tests/amr/models/test_models.cpp @@ -130,6 +130,10 @@ PHARE::initializer::PHAREDict createDict() dict["electrons"]["pressure_closure"]["name"] = std::string{"isothermal"}; dict["electrons"]["pressure_closure"]["Te"] = 0.12; + dict["grid"]["boundary_conditions"]["xlower"]["type"] = std::string{"none"}; + dict["grid"]["boundary_conditions"]["xupper"]["type"] = std::string{"none"}; + + return dict; } diff --git a/tests/functional/mhd_convergence/convergence.py b/tests/functional/mhd_convergence/convergence.py index 5873519a8..d56d8175f 100644 --- a/tests/functional/mhd_convergence/convergence.py +++ b/tests/functional/mhd_convergence/convergence.py @@ -152,3 +152,4 @@ def main(): if __name__ == "__main__": main() + diff --git a/tests/simulator/test_diagnostics.py b/tests/simulator/test_diagnostics.py index ede9376e6..46a4b3e05 100644 --- a/tests/simulator/test_diagnostics.py +++ b/tests/simulator/test_diagnostics.py @@ -257,9 +257,9 @@ def test_dump_elapsed_time_diags(self, dim=1, interp=1): simInput = copy.deepcopy(simArgs) # configure simulation dim sized values for key in ["cells", "dl", "boundary_types"]: - simInput[key] = [simInput[key] for d in range(ndim)] + simInput[key] = [simInput[key] for d in range(dim)] - b0 = [[10 for i in range(ndim)], [19 for i in range(ndim)]] + b0 = [[10 for i in range(dim)], [19 for i in range(dim)]] simInput["refinement_boxes"] = {"L0": {"B0": b0}} diag_path = self.unique_diag_dir_for_test_case(f"{out}/test", dim, interp) diff --git a/tests/simulator/test_initialization.py b/tests/simulator/test_initialization.py index d7c772965..f5e5c82e2 100644 --- a/tests/simulator/test_initialization.py +++ b/tests/simulator/test_initialization.py @@ -267,7 +267,7 @@ def _test_B_is_as_provided_by_user(self, dim, interp_order, ppc=100, **kwargs): from pyphare.pharein import global_vars - model = global_vars.sim.maxwellian_fluid_model + model = global_vars.sim.model bx_fn = model.model_dict["bx"] by_fn = model.model_dict["by"] @@ -361,7 +361,7 @@ def _test_bulkvel_is_as_provided_by_user( from pyphare.pharein import global_vars - model = global_vars.sim.maxwellian_fluid_model + model = global_vars.sim.model # protons and beam have same bulk vel here so take only proton func. vx_fn = model.model_dict["protons"]["vx"] vy_fn = model.model_dict["protons"]["vy"] @@ -430,7 +430,7 @@ def _test_density_is_as_provided_by_user(self, ndim, interp_order, **kwargs): from pyphare.pharein import global_vars - model = global_vars.sim.maxwellian_fluid_model + model = global_vars.sim.model proton_density_fn = model.model_dict["protons"]["density"] beam_density_fn = model.model_dict["beam"]["density"] @@ -503,7 +503,7 @@ def _test_density_decreases_as_1overSqrtN( from pyphare.pharein import global_vars - model = global_vars.sim.maxwellian_fluid_model + model = global_vars.sim.model density_fn = model.model_dict["protons"]["density"] patch = hier.level(0).patches[0] From 951ddcc6de82fc677e5a0a4fecb769a2e86b4831 Mon Sep 17 00:00:00 2001 From: Ivan Girault Date: Wed, 25 Feb 2026 12:11:29 +0100 Subject: [PATCH 6/7] phil's remarks --- src/amr/data/field/field_data_traits.hpp | 64 +- .../refine/field_refine_patch_strategy.hpp | 16 +- .../refine/magnetic_refine_patch_strategy.hpp | 5 - .../data/tensorfield/tensor_field_data.hpp | 4 - .../tensorfield/tensor_field_data_traits.hpp | 2 +- .../hybrid_hybrid_messenger_strategy.hpp | 1812 ++++++++--------- src/amr/messengers/messenger_factory.hpp | 7 +- src/amr/messengers/mhd_messenger.hpp | 1293 ++++++------ src/amr/physical_models/mhd_model.hpp | 5 +- src/core/boundary/boundary.hpp | 6 +- src/core/boundary/boundary_defs.hpp | 9 +- src/core/boundary/boundary_factory.hpp | 2 +- src/core/boundary/boundary_manager.hpp | 10 +- src/core/data/field/field_traits.hpp | 1 - src/core/data/grid/gridlayout_traits.hpp | 4 +- ...field_antisymmetric_boundary_condition.hpp | 10 +- .../field_boundary_condition.hpp | 4 +- .../field_boundary_condition_dispatcher.hpp | 5 +- .../field_boundary_condition_factory.hpp | 2 +- .../field_dirichlet_boundary_condition.hpp | 2 +- ..._transverse_neumann_boundary_condition.hpp | 4 +- .../field_neumann_boundary_condition.hpp | 2 +- .../field_symmetric_boundary_condition.hpp | 6 +- 23 files changed, 1595 insertions(+), 1680 deletions(-) diff --git a/src/amr/data/field/field_data_traits.hpp b/src/amr/data/field/field_data_traits.hpp index 304b28b8c..fb49ba35c 100644 --- a/src/amr/data/field/field_data_traits.hpp +++ b/src/amr/data/field/field_data_traits.hpp @@ -1,41 +1,39 @@ #ifndef PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP #define PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP -#include -#include #include +#include -namespace PHARE -{ -namespace amr +#include + +namespace PHARE::amr { - /** - * @brief Concept ensuring a type satisfies the PHARE FieldData interface. - */ - template - concept IsFieldData - = std::derived_from - && requires(T a, T const ca, SAMRAI::hier::Patch const& patch) { - // Type aliases - typename T::gridlayout_type; - typename T::grid_type; - typename T::physical_quantity_type; - - // Static constexpr variables - requires std::same_as; - requires std::same_as; - - // Public member variables - requires std::same_as; - requires std::same_as; - - // API requirements - { a.getPointer() } -> std::same_as; - { T::getLayout(patch, 0) } -> std::same_as; - { T::getField(patch, 0) } -> std::same_as; - }; - -} // namespace amr -} // namespace PHARE +/** + * @brief Concept ensuring a type satisfies the PHARE FieldData interface. + */ +template +concept IsFieldData + = std::derived_from + && requires(T a, T const ca, SAMRAI::hier::Patch const& patch) { + // Type aliases + typename T::gridlayout_type; + typename T::grid_type; + typename T::physical_quantity_type; + + // Static constexpr variables + requires std::same_as; + requires std::same_as; + + // Public member variables + requires std::same_as; + requires std::same_as; + + // API requirements + { a.getPointer() } -> std::same_as; + { T::getLayout(patch, 0) } -> std::same_as; + { T::getField(patch, 0) } -> std::same_as; + }; + +} // namespace PHARE::amr #endif // PHARE_SRC_AMR_FIELD_FIELD_DATA_TRAITS_HPP diff --git a/src/amr/data/field/refine/field_refine_patch_strategy.hpp b/src/amr/data/field/refine/field_refine_patch_strategy.hpp index 6c4431f24..74fc113a4 100644 --- a/src/amr/data/field/refine/field_refine_patch_strategy.hpp +++ b/src/amr/data/field/refine/field_refine_patch_strategy.hpp @@ -1,21 +1,18 @@ #ifndef PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP #define PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP -#include "SAMRAI/geom/CartesianPatchGeometry.h" -#include "SAMRAI/hier/PatchGeometry.h" + #include "core/boundary/boundary_defs.hpp" -#include "core/boundary/boundary.hpp" -#include "core/utilities/constants.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" #include "amr/data/field/field_data_traits.hpp" #include "amr/data/tensorfield/tensor_field_data_traits.hpp" -#include "SAMRAI/xfer/RefinePatchStrategy.h" -#include "SAMRAI/hier/BoundaryBox.h" #include "SAMRAI/hier/Box.h" #include "SAMRAI/hier/IntVector.h" +#include "SAMRAI/hier/BoundaryBox.h" #include "SAMRAI/hier/PatchGeometry.h" +#include "SAMRAI/xfer/RefinePatchStrategy.h" #include "SAMRAI/geom/CartesianPatchGeometry.h" #include @@ -24,10 +21,6 @@ namespace PHARE::amr { -using core::dirX; -using core::dirY; -using core::dirZ; - /** * @brief Strategy for filling physical boundary conditions and customizing patch refinment. * @@ -177,11 +170,8 @@ class FieldRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy protected: - /// Reference to the resources manager. ResMan& rm_; - /// Reference to the boundary manager. BoundaryManagerT& boundaryManager_; - /// SAMRAI patch data identifier. int data_id_; }; diff --git a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp index c9705edb8..5c04e2a46 100644 --- a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp +++ b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp @@ -2,19 +2,14 @@ #define PHARE_AMR_MAGNETIC_REFINE_PATCH_STRATEGY_HPP #include "core/utilities/types.hpp" -#include "core/utilities/constants.hpp" #include "amr/utilities/box/amr_box.hpp" -#include "amr/data/field/field_geometry.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/data/tensorfield/tensor_field_data_traits.hpp" #include "amr/data/field/refine/field_refine_patch_strategy.hpp" namespace PHARE::amr { -using core::dirX; -using core::dirY; -using core::dirZ; /** * @brief Strategy for magnetic field refinement in AMR patches. diff --git a/src/amr/data/tensorfield/tensor_field_data.hpp b/src/amr/data/tensorfield/tensor_field_data.hpp index 0e41f799b..454d273fb 100644 --- a/src/amr/data/tensorfield/tensor_field_data.hpp +++ b/src/amr/data/tensorfield/tensor_field_data.hpp @@ -5,20 +5,16 @@ #include "core/logger.hpp" #include "core/data/field/field_box.hpp" -#include "core/data/grid/gridlayoutdefs.hpp" #include "core/data/tensorfield/tensorfield.hpp" #include "amr/data/field/field_geometry.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/data/tensorfield/tensor_field_overlap.hpp" -#include "amr/resources_manager/amr_utils.hpp" -#include "amr/data/field/field_overlap.hpp" #include "amr/data/tensorfield/tensor_field_geometry.hpp" #include #include -#include #include diff --git a/src/amr/data/tensorfield/tensor_field_data_traits.hpp b/src/amr/data/tensorfield/tensor_field_data_traits.hpp index 5e372f3d7..4dec8feb4 100644 --- a/src/amr/data/tensorfield/tensor_field_data_traits.hpp +++ b/src/amr/data/tensorfield/tensor_field_data_traits.hpp @@ -4,8 +4,8 @@ #include #include -#include "SAMRAI/hier/PatchData.h" #include "SAMRAI/hier/Patch.h" +#include "SAMRAI/hier/PatchData.h" namespace PHARE { diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp index b846002fa..8782911f9 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp @@ -1,1201 +1,1165 @@ #ifndef PHARE_HYBRID_HYBRID_MESSENGER_STRATEGY_HPP #define PHARE_HYBRID_HYBRID_MESSENGER_STRATEGY_HPP -#include "core/def.hpp" #include "core/logger.hpp" #include "core/def/phare_mpi.hpp" // IWYU pragma: keep -#include "core/utilities/types.hpp" #include "core/hybrid/hybrid_quantities.hpp" #include "core/numerics/interpolator/interpolator.hpp" -#include "core/utilities/types.hpp" -#include "core/utilities/types.hpp" #include "refiner_pool.hpp" #include "synchronizer_pool.hpp" -#include "amr/data/field/coarsening/moments_coarsener.hpp" #include "amr/types/amr_types.hpp" #include "amr/messengers/messenger_info.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/data/field/refine/field_refiner.hpp" -#include "amr/data/field/refine/field_moments_refiner.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" #include "amr/messengers/hybrid_messenger_strategy.hpp" -#include "amr/data/field/coarsening/electric_field_coarsener.hpp" #include "amr/data/field/field_variable_fill_pattern.hpp" #include "amr/data/field/coarsening/moments_coarsener.hpp" -#include "amr/data/field/refine/field_moments_refiner.hpp" #include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/coarsening/moments_coarsener.hpp" #include "amr/data/field/refine/electric_field_refiner.hpp" -#include "amr/data/field/refine/magnetic_field_init_refiner.hpp" #include "amr/data/field/refine/magnetic_field_refiner.hpp" -#include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" -#include "amr/data/field/refine/magnetic_field_regrider.hpp" #include "amr/data/field/coarsening/field_coarsen_operator.hpp" -#include "amr/data/field/refine/magnetic_field_init_refiner.hpp" #include "amr/data/field/coarsening/default_field_coarsener.hpp" -#include "amr/data/field/coarsening/electric_field_coarsener.hpp" +#include "amr/data/field/refine/magnetic_field_init_refiner.hpp" #include "amr/data/particles/particles_variable_fill_pattern.hpp" +#include "amr/data/field/coarsening/electric_field_coarsener.hpp" #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" #include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include "core/utilities/index/index.hpp" -#include "core/numerics/interpolator/interpolator.hpp" -#include "core/hybrid/hybrid_quantities.hpp" -#include "core/data/particles/particle_array.hpp" -#include "core/data/vecfield/vecfield.hpp" -#include "core/utilities/point/point.hpp" - -#include "SAMRAI/xfer/RefineAlgorithm.h" #include "SAMRAI/xfer/RefineSchedule.h" -#include "SAMRAI/xfer/CoarsenAlgorithm.h" #include "SAMRAI/xfer/CoarsenSchedule.h" -#include "SAMRAI/xfer/BoxGeometryVariableFillPattern.h" -#include "SAMRAI/hier/CoarseFineBoundary.h" -#include "SAMRAI/hier/IntVector.h" +#include "SAMRAI/xfer/RefineAlgorithm.h" +#include "SAMRAI/xfer/CoarsenAlgorithm.h" #include #include -#include #include #include #include - - -namespace PHARE +namespace PHARE::amr { -namespace amr +/** \brief An HybridMessenger is the specialization of a HybridMessengerStrategy for hybrid + * to hybrid data communications. + */ +template +class HybridHybridMessengerStrategy : public HybridMessengerStrategy { - /** \brief An HybridMessenger is the specialization of a HybridMessengerStrategy for hybrid - * to hybrid data communications. - */ - template - class HybridHybridMessengerStrategy : public HybridMessengerStrategy + using amr_types = PHARE::amr::SAMRAI_Types; + using level_t = amr_types::level_t; + using patch_t = amr_types::patch_t; + using hierarchy_t = amr_types::hierarchy_t; + + using GridT = HybridModel::grid_type; + using IonsT = HybridModel::ions_type; + using ElectromagT = HybridModel::electromag_type; + using VecFieldT = HybridModel::vecfield_type; + using TensorFieldT = IonsT::tensorfield_type; + using GridLayoutT = HybridModel::gridlayout_type; + using FieldT = VecFieldT::field_type; + using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::HybridQuantity>; + using ResourcesManagerT = HybridModel::resources_manager_type; + using BoundaryManagerT = HybridModel::boundary_manager_type; + using IPhysicalModel = HybridModel::Interface; + + static constexpr std::size_t dimension = GridLayoutT::dimension; + static constexpr std::size_t interpOrder = GridLayoutT::interp_order; + + using InteriorParticleRefineOp = RefinementParams::InteriorParticleRefineOp; + using CoarseToFineRefineOpOld = RefinementParams::CoarseToFineRefineOpOld; + using CoarseToFineRefineOpNew = RefinementParams::CoarseToFineRefineOpNew; + + template + using FieldRefineOp = FieldRefineOperator; + + template + using VecFieldRefineOp = VecFieldRefineOperator; + + using DefaultFieldRefineOp = FieldRefineOp>; + using DefaultVecFieldRefineOp = VecFieldRefineOp>; + // using FieldMomentsRefineOp = FieldRefineOp>; + // using VecFieldMomentsRefineOp = VecFieldRefineOp>; + using MagneticFieldInitRefineOp = VecFieldRefineOp>; + using MagneticFieldRefineOp = VecFieldRefineOp>; + using ElectricFieldRefineOp = VecFieldRefineOp>; + using FieldTimeInterp = FieldLinearTimeInterpolate; + + using VecFieldTimeInterp + = VecFieldLinearTimeInterpolate; + + template + using FieldCoarsenOp = FieldCoarsenOperator; + + template + using VecFieldCoarsenOp + = VecFieldCoarsenOperator; + + using DefaultFieldCoarsenOp = FieldCoarsenOp>; + using DefaultVecFieldCoarsenOp = VecFieldCoarsenOp>; + using MomentsFieldCoarsenOp = FieldCoarsenOp>; + using MomentsVecFieldCoarsenOp = VecFieldCoarsenOp>; + using ElectricFieldCoarsenOp = VecFieldCoarsenOp>; + +public: + static inline std::string const stratName = "HybridModel-HybridModel"; + static constexpr std::size_t rootLevelNumber = 0; + + + HybridHybridMessengerStrategy(std::shared_ptr const& resourcesManager, + std::shared_ptr const& boundaryManager, + int const firstLevel) + : HybridMessengerStrategy{stratName} + , resourcesManager_{resourcesManager} + , boundaryManager_{boundaryManager} + , firstLevel_{firstLevel} { - using amr_types = PHARE::amr::SAMRAI_Types; - using level_t = amr_types::level_t; - using patch_t = amr_types::patch_t; - using hierarchy_t = amr_types::hierarchy_t; - - using GridT = HybridModel::grid_type; - using IonsT = HybridModel::ions_type; - using ElectromagT = HybridModel::electromag_type; - using VecFieldT = HybridModel::vecfield_type; - using TensorFieldT = IonsT::tensorfield_type; - using GridLayoutT = HybridModel::gridlayout_type; - using FieldT = VecFieldT::field_type; - using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::HybridQuantity>; - using ResourcesManagerT = HybridModel::resources_manager_type; - using BoundaryManagerT = HybridModel::boundary_manager_type; - using IPhysicalModel = HybridModel::Interface; - - static constexpr std::size_t dimension = GridLayoutT::dimension; - static constexpr std::size_t interpOrder = GridLayoutT::interp_order; - - using InteriorParticleRefineOp = RefinementParams::InteriorParticleRefineOp; - using CoarseToFineRefineOpOld = RefinementParams::CoarseToFineRefineOpOld; - using CoarseToFineRefineOpNew = RefinementParams::CoarseToFineRefineOpNew; - - template - using FieldRefineOp = FieldRefineOperator; - - template - using VecFieldRefineOp = VecFieldRefineOperator; - - using DefaultFieldRefineOp = FieldRefineOp>; - using DefaultVecFieldRefineOp = VecFieldRefineOp>; - // using FieldMomentsRefineOp = FieldRefineOp>; - // using VecFieldMomentsRefineOp = VecFieldRefineOp>; - using MagneticFieldInitRefineOp = VecFieldRefineOp>; - using MagneticFieldRefineOp = VecFieldRefineOp>; - using ElectricFieldRefineOp = VecFieldRefineOp>; - using FieldTimeInterp = FieldLinearTimeInterpolate; - - using VecFieldTimeInterp - = VecFieldLinearTimeInterpolate; - - template - using FieldCoarsenOp = FieldCoarsenOperator; - - template - using VecFieldCoarsenOp - = VecFieldCoarsenOperator; - - using DefaultFieldCoarsenOp = FieldCoarsenOp>; - using DefaultVecFieldCoarsenOp = VecFieldCoarsenOp>; - using MomentsFieldCoarsenOp = FieldCoarsenOp>; - using MomentsVecFieldCoarsenOp = VecFieldCoarsenOp>; - using ElectricFieldCoarsenOp = VecFieldCoarsenOp>; - - public: - static inline std::string const stratName = "HybridModel-HybridModel"; - static constexpr std::size_t rootLevelNumber = 0; - - - HybridHybridMessengerStrategy(std::shared_ptr const& resourcesManager, - std::shared_ptr const& boundaryManager, - int const firstLevel) - : HybridMessengerStrategy{stratName} - , resourcesManager_{resourcesManager} - , boundaryManager_{boundaryManager} - , firstLevel_{firstLevel} - { - resourcesManager_->registerResources(Jold_); - resourcesManager_->registerResources(NiOld_); - resourcesManager_->registerResources(ViOld_); - resourcesManager_->registerResources(sumVec_); - resourcesManager_->registerResources(sumField_); - resourcesManager_->registerResources(sumTensor_); - } - - virtual ~HybridHybridMessengerStrategy() = default; - + resourcesManager_->registerResources(Jold_); + resourcesManager_->registerResources(NiOld_); + resourcesManager_->registerResources(ViOld_); + resourcesManager_->registerResources(sumVec_); + resourcesManager_->registerResources(sumField_); + resourcesManager_->registerResources(sumTensor_); + } + virtual ~HybridHybridMessengerStrategy() = default; - /* ------------------------------------------------------------------------ - methods used for the IMessenger interface - ------------------------------------------------------------------------ */ - /** - * @brief allocate the messenger strategy internal variables to the model - * resourceManager - */ - void allocate(patch_t& patch, double const allocateTime) const override - { - resourcesManager_->allocate(Jold_, patch, allocateTime); - resourcesManager_->allocate(NiOld_, patch, allocateTime); - resourcesManager_->allocate(ViOld_, patch, allocateTime); - resourcesManager_->allocate(sumVec_, patch, allocateTime); - resourcesManager_->allocate(sumField_, patch, allocateTime); - resourcesManager_->allocate(sumTensor_, patch, allocateTime); - } - + /* ------------------------------------------------------------------------ + methods used for the IMessenger interface + ------------------------------------------------------------------------ */ - /** - * @brief setup creates all SAMRAI algorithms to communicate data involved in a - * messenger between the coarse and fine levels. - * - * This method creates the SAMRAI algorithms for communications associated between pairs - * of variables. The function does not create the SAMRAI schedules since they depend on - * the levels - */ - void registerQuantities([[maybe_unused]] std::unique_ptr fromCoarserInfo, - std::unique_ptr fromFinerInfo) override - { - std::unique_ptr hybridInfo{ - dynamic_cast(fromFinerInfo.release())}; + /** + * @brief allocate the messenger strategy internal variables to the model + * resourceManager + */ + void allocate(patch_t& patch, double const allocateTime) const override + { + resourcesManager_->allocate(Jold_, patch, allocateTime); + resourcesManager_->allocate(NiOld_, patch, allocateTime); + resourcesManager_->allocate(ViOld_, patch, allocateTime); + resourcesManager_->allocate(sumVec_, patch, allocateTime); + resourcesManager_->allocate(sumField_, patch, allocateTime); + resourcesManager_->allocate(sumTensor_, patch, allocateTime); + } + + + + /** + * @brief setup creates all SAMRAI algorithms to communicate data involved in a + * messenger between the coarse and fine levels. + * + * This method creates the SAMRAI algorithms for communications associated between pairs + * of variables. The function does not create the SAMRAI schedules since they depend on + * the levels + */ + void registerQuantities([[maybe_unused]] std::unique_ptr fromCoarserInfo, + std::unique_ptr fromFinerInfo) override + { + std::unique_ptr hybridInfo{ + dynamic_cast(fromFinerInfo.release())}; - auto&& [b_id] = resourcesManager_->getIDsList(hybridInfo->modelMagnetic); + auto&& [b_id] = resourcesManager_->getIDsList(hybridInfo->modelMagnetic); - magneticRefinePatchStrategy_.registerIDs(b_id); + magneticRefinePatchStrategy_.registerIDs(b_id); - // we do not overwrite interior on patch ghost filling. In theory this doesn't matter - // much since the only interior values are the outermost layer of faces of the domain, - // and should be near equal from one patch to the other. - BalgoPatchGhost.registerRefine(b_id, b_id, b_id, nullptr, - nonOverwriteInteriorTFfillPattern); + // we do not overwrite interior on patch ghost filling. In theory this doesn't matter + // much since the only interior values are the outermost layer of faces of the domain, + // and should be near equal from one patch to the other. + BalgoPatchGhost.registerRefine(b_id, b_id, b_id, nullptr, + nonOverwriteInteriorTFfillPattern); - // for regrid, we need to overwrite the interior or else only the new ghosts would be - // filled. We also need to use the regrid operator, which checks for nans before filling - // the new values, as we do not want to overwrite the copy that was already done for the - // faces that were already there before regrid. - BregridAlgo.registerRefine(b_id, b_id, b_id, BRefineOp_, - overwriteInteriorTFfillPattern); + // for regrid, we need to overwrite the interior or else only the new ghosts would be + // filled. We also need to use the regrid operator, which checks for nans before filling + // the new values, as we do not want to overwrite the copy that was already done for the + // faces that were already there before regrid. + BregridAlgo.registerRefine(b_id, b_id, b_id, BRefineOp_, overwriteInteriorTFfillPattern); - auto&& [e_id] = resourcesManager_->getIDsList(hybridInfo->modelElectric); + auto&& [e_id] = resourcesManager_->getIDsList(hybridInfo->modelElectric); - EalgoPatchGhost.registerRefine(e_id, e_id, e_id, EfieldRefineOp_, - nonOverwriteInteriorTFfillPattern); + EalgoPatchGhost.registerRefine(e_id, e_id, e_id, EfieldRefineOp_, + nonOverwriteInteriorTFfillPattern); - auto&& [e_reflux_id] = resourcesManager_->getIDsList(hybridInfo->refluxElectric); - auto&& [e_fluxsum_id] = resourcesManager_->getIDsList(hybridInfo->fluxSumElectric); + auto&& [e_reflux_id] = resourcesManager_->getIDsList(hybridInfo->refluxElectric); + auto&& [e_fluxsum_id] = resourcesManager_->getIDsList(hybridInfo->fluxSumElectric); - RefluxAlgo.registerCoarsen(e_reflux_id, e_fluxsum_id, electricFieldCoarseningOp_); + RefluxAlgo.registerCoarsen(e_reflux_id, e_fluxsum_id, electricFieldCoarseningOp_); - // we then need to refill the ghosts so that they agree with the newly refluxed cells - PatchGhostRefluxedAlgo.registerRefine(e_reflux_id, e_reflux_id, e_reflux_id, - EfieldRefineOp_, - nonOverwriteInteriorTFfillPattern); + // we then need to refill the ghosts so that they agree with the newly refluxed cells + PatchGhostRefluxedAlgo.registerRefine(e_reflux_id, e_reflux_id, e_reflux_id, + EfieldRefineOp_, nonOverwriteInteriorTFfillPattern); - registerGhostComms_(hybridInfo); - registerInitComms_(hybridInfo); - registerSyncComms_(hybridInfo); - } + registerGhostComms_(hybridInfo); + registerInitComms_(hybridInfo); + registerSyncComms_(hybridInfo); + } - /** - * @brief all RefinerPool must be notified the level levelNumber now exist. - * not doing so will result in communication to/from that level being impossible - */ - void registerLevel(std::shared_ptr const& hierarchy, - int const levelNumber) override - { - auto const level = hierarchy->getPatchLevel(levelNumber); + /** + * @brief all RefinerPool must be notified the level levelNumber now exist. + * not doing so will result in communication to/from that level being impossible + */ + void registerLevel(std::shared_ptr const& hierarchy, + int const levelNumber) override + { + auto const level = hierarchy->getPatchLevel(levelNumber); - magPatchGhostsRefineSchedules[levelNumber] - = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); + magPatchGhostsRefineSchedules[levelNumber] + = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); - elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); + elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); - // technically not needed for finest as refluxing is not done onto it. - patchGhostRefluxedSchedules[levelNumber] = PatchGhostRefluxedAlgo.createSchedule(level); + // technically not needed for finest as refluxing is not done onto it. + patchGhostRefluxedSchedules[levelNumber] = PatchGhostRefluxedAlgo.createSchedule(level); - elecGhostsRefiners_.registerLevel(hierarchy, level); - magGhostsRefiners_.registerLevel(hierarchy, level); - currentGhostsRefiners_.registerLevel(hierarchy, level); - // chargeDensityLevelGhostsRefiners_.registerLevel(hierarchy, level); - // velLevelGhostsRefiners_.registerLevel(hierarchy, level); - domainGhostPartRefiners_.registerLevel(hierarchy, level); + elecGhostsRefiners_.registerLevel(hierarchy, level); + magGhostsRefiners_.registerLevel(hierarchy, level); + currentGhostsRefiners_.registerLevel(hierarchy, level); + // chargeDensityLevelGhostsRefiners_.registerLevel(hierarchy, level); + // velLevelGhostsRefiners_.registerLevel(hierarchy, level); + domainGhostPartRefiners_.registerLevel(hierarchy, level); - chargeDensityPatchGhostsRefiners_.registerLevel(hierarchy, level); - velPatchGhostsRefiners_.registerLevel(hierarchy, level); + chargeDensityPatchGhostsRefiners_.registerLevel(hierarchy, level); + velPatchGhostsRefiners_.registerLevel(hierarchy, level); - for (auto& refiner : popFluxBorderSumRefiners_) - refiner.registerLevel(hierarchy, level); + for (auto& refiner : popFluxBorderSumRefiners_) + refiner.registerLevel(hierarchy, level); - for (auto& refiner : popDensityBorderSumRefiners_) - refiner.registerLevel(hierarchy, level); + for (auto& refiner : popDensityBorderSumRefiners_) + refiner.registerLevel(hierarchy, level); - for (auto& refiner : ionFluxBorderMaxRefiners_) - refiner.registerLevel(hierarchy, level); - for (auto& refiner : ionDensityBorderMaxRefiners_) - refiner.registerLevel(hierarchy, level); + for (auto& refiner : ionFluxBorderMaxRefiners_) + refiner.registerLevel(hierarchy, level); + for (auto& refiner : ionDensityBorderMaxRefiners_) + refiner.registerLevel(hierarchy, level); - // root level is not initialized with a schedule using coarser level data - // so we don't create these schedules if root level - // TODO this 'if' may not be OK if L0 is regrided - if (levelNumber != rootLevelNumber) - { - // refluxing - auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); - refluxSchedules[levelNumber] = RefluxAlgo.createSchedule(coarseLevel, level); - - // those are for refinement - magInitRefineSchedules[levelNumber] = BalgoInit.createSchedule( - level, nullptr, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); - - electricInitRefiners_.registerLevel(hierarchy, level); - domainParticlesRefiners_.registerLevel(hierarchy, level); - lvlGhostPartOldRefiners_.registerLevel(hierarchy, level); - lvlGhostPartNewRefiners_.registerLevel(hierarchy, level); - - // and these for coarsening - electroSynchronizers_.registerLevel(hierarchy, level); - chargeDensitySynchronizers_.registerLevel(hierarchy, level); - ionBulkVelSynchronizers_.registerLevel(hierarchy, level); - } + // root level is not initialized with a schedule using coarser level data + // so we don't create these schedules if root level + // TODO this 'if' may not be OK if L0 is regrided + if (levelNumber != rootLevelNumber) + { + // refluxing + auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); + refluxSchedules[levelNumber] = RefluxAlgo.createSchedule(coarseLevel, level); + + // those are for refinement + magInitRefineSchedules[levelNumber] = BalgoInit.createSchedule( + level, nullptr, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); + + electricInitRefiners_.registerLevel(hierarchy, level); + domainParticlesRefiners_.registerLevel(hierarchy, level); + lvlGhostPartOldRefiners_.registerLevel(hierarchy, level); + lvlGhostPartNewRefiners_.registerLevel(hierarchy, level); + + // and these for coarsening + electroSynchronizers_.registerLevel(hierarchy, level); + chargeDensitySynchronizers_.registerLevel(hierarchy, level); + ionBulkVelSynchronizers_.registerLevel(hierarchy, level); } + } - /** - * @brief regrid performs the regriding communications for Hybrid to Hybrid messengers - , all quantities that are in initialization refiners need to be regridded - */ - void regrid(std::shared_ptr const& hierarchy, int const levelNumber, - std::shared_ptr const& oldLevel, IPhysicalModel& model, - double const initDataTime) override - { - auto& hybridModel = dynamic_cast(model); - auto level = hierarchy->getPatchLevel(levelNumber); + /** + * @brief regrid performs the regriding communications for Hybrid to Hybrid messengers + , all quantities that are in initialization refiners need to be regridded + */ + void regrid(std::shared_ptr const& hierarchy, int const levelNumber, + std::shared_ptr const& oldLevel, IPhysicalModel& model, + double const initDataTime) override + { + auto& hybridModel = dynamic_cast(model); + auto level = hierarchy->getPatchLevel(levelNumber); - bool const isRegriddingL0 = levelNumber == 0 and oldLevel; + bool const isRegriddingL0 = levelNumber == 0 and oldLevel; - magneticRegriding_(hierarchy, level, oldLevel, initDataTime); - electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); + magneticRegriding_(hierarchy, level, oldLevel, initDataTime); + electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); + domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - // we now call only levelGhostParticlesOld.fill() and not .regrid() - // regrid() would refine from next coarser in regions of level not overlaping - // oldLevel, but copy from domain particles of oldLevel where there is an - // overlap while we do not a priori see why this could be wrong,but this led to - // occasional failures of the SAMRAI MPI module. See - // https://github.com/PHAREHUB/PHARE/issues/604 calling .fill() ensures that - // levelGhostParticlesOld particles are filled exclusively from spliting next - // coarser domain ones like when a new finest level is created. + // we now call only levelGhostParticlesOld.fill() and not .regrid() + // regrid() would refine from next coarser in regions of level not overlaping + // oldLevel, but copy from domain particles of oldLevel where there is an + // overlap while we do not a priori see why this could be wrong,but this led to + // occasional failures of the SAMRAI MPI module. See + // https://github.com/PHAREHUB/PHARE/issues/604 calling .fill() ensures that + // levelGhostParticlesOld particles are filled exclusively from spliting next + // coarser domain ones like when a new finest level is created. - if (levelNumber != rootLevelNumber) - { - lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); - copyLevelGhostOldToPushable_(*level, model); - } + if (levelNumber != rootLevelNumber) + { + lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); + copyLevelGhostOldToPushable_(*level, model); + } - // levelGhostNew will be refined in next firstStep + // levelGhostNew will be refined in next firstStep - // after filling the new level with the regrid schedule, some - // nodes may not have been copied correctly, due to a bug in SAMRAI - // it seems these nodes are only on ghost box border if that border - // overlaps an old level patch border. See https://github.com/LLNL/SAMRAI/pull/293 - } + // after filling the new level with the regrid schedule, some + // nodes may not have been copied correctly, due to a bug in SAMRAI + // it seems these nodes are only on ghost box border if that border + // overlaps an old level patch border. See https://github.com/LLNL/SAMRAI/pull/293 + } - std::string fineModelName() const override { return HybridModel::model_name; } + std::string fineModelName() const override { return HybridModel::model_name; } - std::string coarseModelName() const override { return HybridModel::model_name; } + std::string coarseModelName() const override { return HybridModel::model_name; } - std::unique_ptr emptyInfoFromCoarser() override - { - return std::make_unique(); - } + std::unique_ptr emptyInfoFromCoarser() override + { + return std::make_unique(); + } - std::unique_ptr emptyInfoFromFiner() override - { - return std::make_unique(); - } + std::unique_ptr emptyInfoFromFiner() override + { + return std::make_unique(); + } - /** - * @brief initLevel is used to initialize hybrid data on the level levelNumer at - * time initDataTime from hybrid coarser data. - */ - void initLevel(IPhysicalModel& model, level_t& level, double const initDataTime) override - { - auto levelNumber = level.getLevelNumber(); + /** + * @brief initLevel is used to initialize hybrid data on the level levelNumer at + * time initDataTime from hybrid coarser data. + */ + void initLevel(IPhysicalModel& model, level_t& level, double const initDataTime) override + { + auto levelNumber = level.getLevelNumber(); - auto& hybridModel = static_cast(model); + auto& hybridModel = static_cast(model); - magInitRefineSchedules[levelNumber]->fillData(initDataTime); - electricInitRefiners_.fill(levelNumber, initDataTime); + magInitRefineSchedules[levelNumber]->fillData(initDataTime); + electricInitRefiners_.fill(levelNumber, initDataTime); - // no need to call these : - // magGhostsRefiners_.fill(levelNumber, initDataTime); - // elecGhostsRefiners_.fill(levelNumber, initDataTime); - // because the SAMRAI schedules in the 'init' communicators - // already fill the patch ghost box from the neighbor interior box. - // so ghost nodes are already filled . + // no need to call these : + // magGhostsRefiners_.fill(levelNumber, initDataTime); + // elecGhostsRefiners_.fill(levelNumber, initDataTime); + // because the SAMRAI schedules in the 'init' communicators + // already fill the patch ghost box from the neighbor interior box. + // so ghost nodes are already filled . - PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); - domainParticlesRefiners_.fill(levelNumber, initDataTime); - PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); + PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); + domainParticlesRefiners_.fill(levelNumber, initDataTime); + PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); - lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); + lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); - // levelGhostParticles will be pushed during the advance phase - // they need to be identical to levelGhostParticlesOld before advance - copyLevelGhostOldToPushable_(level, model); - } + // levelGhostParticles will be pushed during the advance phase + // they need to be identical to levelGhostParticlesOld before advance + copyLevelGhostOldToPushable_(level, model); + } - /* ------------------------------------------------------------------------ - methods used for the HybridMessenger interface - ------------------------------------------------------------------------ */ + /* ------------------------------------------------------------------------ + methods used for the HybridMessenger interface + ------------------------------------------------------------------------ */ - void fillMagneticGhosts(VecFieldT& B, level_t const& level, double const fillTime) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillMagneticGhosts"); + void fillMagneticGhosts(VecFieldT& B, level_t const& level, double const fillTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillMagneticGhosts"); - setNaNsOnVecfieldGhosts(B, level); - magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); - } + setNaNsOnVecfieldGhosts(B, level); + magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); + } - void fillElectricGhosts(VecFieldT& E, level_t const& level, double const fillTime) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillElectricGhosts"); + void fillElectricGhosts(VecFieldT& E, level_t const& level, double const fillTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillElectricGhosts"); - setNaNsOnVecfieldGhosts(E, level); - elecGhostsRefiners_.fill(E, level.getLevelNumber(), fillTime); - } + setNaNsOnVecfieldGhosts(E, level); + elecGhostsRefiners_.fill(E, level.getLevelNumber(), fillTime); + } - void fillCurrentGhosts(VecFieldT& J, level_t const& level, double const fillTime) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillCurrentGhosts"); - setNaNsOnVecfieldGhosts(J, level); - currentGhostsRefiners_.fill(J, level.getLevelNumber(), fillTime); - } + void fillCurrentGhosts(VecFieldT& J, level_t const& level, double const fillTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillCurrentGhosts"); + setNaNsOnVecfieldGhosts(J, level); + currentGhostsRefiners_.fill(J, level.getLevelNumber(), fillTime); + } - /** - * @brief fillIonGhostParticles will fill the interior ghost particle array from - * neighbor patches of the same level. Before doing that, it empties the array for - * all populations - */ - void fillIonGhostParticles(IonsT& ions, level_t& level, double const fillTime) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonGhostParticles"); + /** + * @brief fillIonGhostParticles will fill the interior ghost particle array from + * neighbor patches of the same level. Before doing that, it empties the array for + * all populations + */ + void fillIonGhostParticles(IonsT& ions, level_t& level, double const fillTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonGhostParticles"); - domainGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); + domainGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); - for (auto patch : resourcesManager_->enumerate(level, ions)) - for (auto& pop : ions) - pop.patchGhostParticles().clear(); - } + for (auto patch : resourcesManager_->enumerate(level, ions)) + for (auto& pop : ions) + pop.patchGhostParticles().clear(); + } - void fillFluxBorders(IonsT& ions, level_t& level, double const fillTime) override - { - auto constexpr N = core::detail::tensor_field_dim_from_rank<1>(); - using value_type = FieldT::value_type; + void fillFluxBorders(IonsT& ions, level_t& level, double const fillTime) override + { + auto constexpr N = core::detail::tensor_field_dim_from_rank<1>(); + using value_type = FieldT::value_type; - // we cannot have the schedule doign the += in place in the flux array - // because some overlaps could be counted several times. - // we therefore first copy flux into a sumVec buffer and then - // execute the schedule onto that before copying it back onto the flux array - for (std::size_t i = 0; i < ions.size(); ++i) - { - for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) - for (std::uint8_t c = 0; c < N; ++c) - std::memcpy(sumVec_[c].data(), ions[i].flux()[c].data(), - ions[i].flux()[c].size() * sizeof(value_type)); + // we cannot have the schedule doign the += in place in the flux array + // because some overlaps could be counted several times. + // we therefore first copy flux into a sumVec buffer and then + // execute the schedule onto that before copying it back onto the flux array + for (std::size_t i = 0; i < ions.size(); ++i) + { + for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(sumVec_[c].data(), ions[i].flux()[c].data(), + ions[i].flux()[c].size() * sizeof(value_type)); - popFluxBorderSumRefiners_[i].fill(level.getLevelNumber(), fillTime); + popFluxBorderSumRefiners_[i].fill(level.getLevelNumber(), fillTime); - for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) - for (std::uint8_t c = 0; c < N; ++c) - std::memcpy(ions[i].flux()[c].data(), sumVec_[c].data(), - ions[i].flux()[c].size() * sizeof(value_type)); - } + for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(ions[i].flux()[c].data(), sumVec_[c].data(), + ions[i].flux()[c].size() * sizeof(value_type)); } + } - void fillDensityBorders(IonsT& ions, level_t& level, double const fillTime) override - { - using value_type = FieldT::value_type; + void fillDensityBorders(IonsT& ions, level_t& level, double const fillTime) override + { + using value_type = FieldT::value_type; - assert(popDensityBorderSumRefiners_.size() % ions.size() == 0); + assert(popDensityBorderSumRefiners_.size() % ions.size() == 0); - std::size_t const fieldsPerPop = popDensityBorderSumRefiners_.size() / ions.size(); + std::size_t const fieldsPerPop = popDensityBorderSumRefiners_.size() / ions.size(); - for (std::size_t i = 0; i < ions.size(); ++i) - { - for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) - std::memcpy(sumField_.data(), ions[i].particleDensity().data(), - ions[i].particleDensity().size() * sizeof(value_type)); + for (std::size_t i = 0; i < ions.size(); ++i) + { + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(sumField_.data(), ions[i].particleDensity().data(), + ions[i].particleDensity().size() * sizeof(value_type)); - popDensityBorderSumRefiners_[i * fieldsPerPop].fill(level.getLevelNumber(), - fillTime); + popDensityBorderSumRefiners_[i * fieldsPerPop].fill(level.getLevelNumber(), fillTime); - for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) - std::memcpy(ions[i].particleDensity().data(), sumField_.data(), - ions[i].particleDensity().size() * sizeof(value_type)); + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(ions[i].particleDensity().data(), sumField_.data(), + ions[i].particleDensity().size() * sizeof(value_type)); - // + // - for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) - std::memcpy(sumField_.data(), ions[i].chargeDensity().data(), - ions[i].chargeDensity().size() * sizeof(value_type)); + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(sumField_.data(), ions[i].chargeDensity().data(), + ions[i].chargeDensity().size() * sizeof(value_type)); - popDensityBorderSumRefiners_[i * fieldsPerPop + 1].fill(level.getLevelNumber(), - fillTime); + popDensityBorderSumRefiners_[i * fieldsPerPop + 1].fill(level.getLevelNumber(), + fillTime); - for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) - std::memcpy(ions[i].chargeDensity().data(), sumField_.data(), - ions[i].chargeDensity().size() * sizeof(value_type)); - } + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(ions[i].chargeDensity().data(), sumField_.data(), + ions[i].chargeDensity().size() * sizeof(value_type)); } + } - void fillIonBorders(IonsT& /*ions*/, level_t& level, double const fillTime) override - { - assert(ionFluxBorderMaxRefiners_.size() == 1); - assert(ionDensityBorderMaxRefiners_.size() == 2); + void fillIonBorders(IonsT& /*ions*/, level_t& level, double const fillTime) override + { + assert(ionFluxBorderMaxRefiners_.size() == 1); + assert(ionDensityBorderMaxRefiners_.size() == 2); - for (auto& refiner : ionFluxBorderMaxRefiners_) - refiner.fill(level.getLevelNumber(), fillTime); - for (auto& refiner : ionDensityBorderMaxRefiners_) - refiner.fill(level.getLevelNumber(), fillTime); - } + for (auto& refiner : ionFluxBorderMaxRefiners_) + refiner.fill(level.getLevelNumber(), fillTime); + for (auto& refiner : ionDensityBorderMaxRefiners_) + refiner.fill(level.getLevelNumber(), fillTime); + } - /** - * @brief fillIonPopMomentGhosts works on moment ghost nodes - * - * level border nodes are completed by the deposition - * of level ghost [old,new] particles for all populations, linear time interpolation - * is used to get the contribution of old/new particles - */ - void fillIonPopMomentGhosts(IonsT& ions, level_t& level, - double const afterPushTime) override + /** + * @brief fillIonPopMomentGhosts works on moment ghost nodes + * + * level border nodes are completed by the deposition + * of level ghost [old,new] particles for all populations, linear time interpolation + * is used to get the contribution of old/new particles + */ + void fillIonPopMomentGhosts(IonsT& ions, level_t& level, double const afterPushTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonPopMomentGhosts"); + + auto alpha = timeInterpCoef_(afterPushTime, level.getLevelNumber()); + if (level.getLevelNumber() > 0 and (alpha < 0 or alpha > 1)) + { + std::cout << std::setprecision(12) << alpha << "\n"; + throw std::runtime_error("ion moment ghost time interp coef invalid : alpha: " + + std::to_string(alpha) + " beforePushTime " + + std::to_string(afterPushTime) + " on level " + + std::to_string(level.getLevelNumber())); + } + for (auto const& patch : level) { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonPopMomentGhosts"); + auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); + auto layout = layoutFromPatch(*patch); - auto alpha = timeInterpCoef_(afterPushTime, level.getLevelNumber()); - if (level.getLevelNumber() > 0 and (alpha < 0 or alpha > 1)) - { - std::cout << std::setprecision(12) << alpha << "\n"; - throw std::runtime_error("ion moment ghost time interp coef invalid : alpha: " - + std::to_string(alpha) + " beforePushTime " - + std::to_string(afterPushTime) + " on level " - + std::to_string(level.getLevelNumber())); - } - for (auto const& patch : level) + for (auto& pop : ions) { - auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); - auto layout = layoutFromPatch(*patch); + auto& particleDensity = pop.particleDensity(); + auto& chargeDensity = pop.chargeDensity(); + auto& flux = pop.flux(); + // first thing to do is to project patchGhostParitcles moments - for (auto& pop : ions) + + if (level.getLevelNumber() > 0) // no levelGhost on root level { - auto& particleDensity = pop.particleDensity(); - auto& chargeDensity = pop.chargeDensity(); - auto& flux = pop.flux(); - // first thing to do is to project patchGhostParitcles moments - - - if (level.getLevelNumber() > 0) // no levelGhost on root level - { - // grab levelGhostParticlesOld and levelGhostParticlesNew - // and project them with alpha and (1-alpha) coefs, respectively - auto& levelGhostOld = pop.levelGhostParticlesOld(); - interpolate_(makeRange(levelGhostOld), particleDensity, chargeDensity, flux, - layout, 1. - alpha); - - auto& levelGhostNew = pop.levelGhostParticlesNew(); - interpolate_(makeRange(levelGhostNew), particleDensity, chargeDensity, flux, - layout, alpha); - } + // grab levelGhostParticlesOld and levelGhostParticlesNew + // and project them with alpha and (1-alpha) coefs, respectively + auto& levelGhostOld = pop.levelGhostParticlesOld(); + interpolate_(makeRange(levelGhostOld), particleDensity, chargeDensity, flux, + layout, 1. - alpha); + + auto& levelGhostNew = pop.levelGhostParticlesNew(); + interpolate_(makeRange(levelGhostNew), particleDensity, chargeDensity, flux, + layout, alpha); } } } + } + + + /* pure (patch and level) ghost nodes are filled by applying a regular ghost + * schedule i.e. that does not overwrite the border patch node previously well + * calculated from particles Note : the ghost schedule only fills the total density + * and bulk velocity and NOT population densities and fluxes. These partial moments + * are already completed by the "sum" schedules (+= on incomplete nodes)*/ + // virtual void fillIonMomentGhosts(IonsT& ions, level_t& level, + // double const afterPushTime) override + // { + // PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); + // auto& chargeDensity = ions.chargeDensity(); + // auto& velocity = ions.velocity(); + // + // + // if (level.getLevelNumber() != 0) + // { + // setNaNsOnFieldGhosts(chargeDensity, level); + // setNaNsOnVecfieldGhosts(velocity, level); + // } + // chargeDensityLevelGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); + // velLevelGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); + // } + + /** + * @brief firstStep : in the HybridHybridMessengerStrategy, the firstStep method is + * used to get level border ghost particles from the next coarser level. These + * particles are defined in the future at the time the method is called because the + * coarser level is ahead in time. These particles are communicated only at first + * step of a substepping cycle. They will be used with the levelGhostParticlesOld + * particles to get the moments on level border nodes. The method is does nothing if + * the level is the root level because the root level cannot get levelGhost from + * next coarser (it has none). + */ + void firstStep(IPhysicalModel& /*model*/, level_t& level, + std::shared_ptr const& /*hierarchy*/, double const currentTime, + double const prevCoarserTime, double const newCoarserTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::firstStep"); + auto levelNumber = level.getLevelNumber(); + if (newCoarserTime < prevCoarserTime) + throw std::runtime_error("Error : prevCoarserTime (" + std::to_string(prevCoarserTime) + + ") should be < newCoarserTime (" + + std::to_string(prevCoarserTime) + ")"); - /* pure (patch and level) ghost nodes are filled by applying a regular ghost - * schedule i.e. that does not overwrite the border patch node previously well - * calculated from particles Note : the ghost schedule only fills the total density - * and bulk velocity and NOT population densities and fluxes. These partial moments - * are already completed by the "sum" schedules (+= on incomplete nodes)*/ - // virtual void fillIonMomentGhosts(IonsT& ions, level_t& level, - // double const afterPushTime) override - // { - // PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); - // auto& chargeDensity = ions.chargeDensity(); - // auto& velocity = ions.velocity(); - // - // - // if (level.getLevelNumber() != 0) - // { - // setNaNsOnFieldGhosts(chargeDensity, level); - // setNaNsOnVecfieldGhosts(velocity, level); - // } - // chargeDensityLevelGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); - // velLevelGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); - // } - - /** - * @brief firstStep : in the HybridHybridMessengerStrategy, the firstStep method is - * used to get level border ghost particles from the next coarser level. These - * particles are defined in the future at the time the method is called because the - * coarser level is ahead in time. These particles are communicated only at first - * step of a substepping cycle. They will be used with the levelGhostParticlesOld - * particles to get the moments on level border nodes. The method is does nothing if - * the level is the root level because the root level cannot get levelGhost from - * next coarser (it has none). - */ - void firstStep(IPhysicalModel& /*model*/, level_t& level, - std::shared_ptr const& /*hierarchy*/, double const currentTime, - double const prevCoarserTime, double const newCoarserTime) override + // root level has no levelghost particles + if (levelNumber != 0) { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::firstStep"); - - auto levelNumber = level.getLevelNumber(); - if (newCoarserTime < prevCoarserTime) - throw std::runtime_error( - "Error : prevCoarserTime (" + std::to_string(prevCoarserTime) - + ") should be < newCoarserTime (" + std::to_string(prevCoarserTime) + ")"); - - // root level has no levelghost particles - if (levelNumber != 0) - { - PHARE_LOG_START(3, "HybridHybridMessengerStrategy::firstStep.fill"); - lvlGhostPartNewRefiners_.fill(levelNumber, currentTime); - PHARE_LOG_STOP(3, "HybridHybridMessengerStrategy::firstStep.fill"); - - // during firstStep() coarser level and current level are at the same time - // so 'time' is also the beforePushCoarseTime_ - beforePushCoarseTime_[levelNumber] = prevCoarserTime; - afterPushCoarseTime_[levelNumber] = newCoarserTime; - } + PHARE_LOG_START(3, "HybridHybridMessengerStrategy::firstStep.fill"); + lvlGhostPartNewRefiners_.fill(levelNumber, currentTime); + PHARE_LOG_STOP(3, "HybridHybridMessengerStrategy::firstStep.fill"); + + // during firstStep() coarser level and current level are at the same time + // so 'time' is also the beforePushCoarseTime_ + beforePushCoarseTime_[levelNumber] = prevCoarserTime; + afterPushCoarseTime_[levelNumber] = newCoarserTime; } + } - /** - * @brief lastStep is used to perform operations at the last step of a substepping - * cycle. It is called after the level is advanced. Here for hybrid-hybrid messages, - * the method moves levelGhostParticlesNew particles into levelGhostParticlesOld - * ones. Then levelGhostParticlesNew are emptied since it will be filled again at - * firstStep of the next substepping cycle. the new CoarseToFineOld content is then - * copied to levelGhostParticles so that they can be pushed during the next subcycle - */ - void lastStep(IPhysicalModel& model, level_t& level) override - { - if (level.getLevelNumber() == 0) - return; + /** + * @brief lastStep is used to perform operations at the last step of a substepping + * cycle. It is called after the level is advanced. Here for hybrid-hybrid messages, + * the method moves levelGhostParticlesNew particles into levelGhostParticlesOld + * ones. Then levelGhostParticlesNew are emptied since it will be filled again at + * firstStep of the next substepping cycle. the new CoarseToFineOld content is then + * copied to levelGhostParticles so that they can be pushed during the next subcycle + */ + void lastStep(IPhysicalModel& model, level_t& level) override + { + if (level.getLevelNumber() == 0) + return; - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::lastStep"); + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::lastStep"); - auto& hybridModel = static_cast(model); - for (auto& patch : level) + auto& hybridModel = static_cast(model); + for (auto& patch : level) + { + auto& ions = hybridModel.state.ions; + auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); + for (auto& pop : ions) { - auto& ions = hybridModel.state.ions; - auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); - for (auto& pop : ions) - { - auto& levelGhostParticlesOld = pop.levelGhostParticlesOld(); - auto& levelGhostParticlesNew = pop.levelGhostParticlesNew(); - auto& levelGhostParticles = pop.levelGhostParticles(); + auto& levelGhostParticlesOld = pop.levelGhostParticlesOld(); + auto& levelGhostParticlesNew = pop.levelGhostParticlesNew(); + auto& levelGhostParticles = pop.levelGhostParticles(); - std::swap(levelGhostParticlesNew, levelGhostParticlesOld); - levelGhostParticlesNew.clear(); - levelGhostParticles = levelGhostParticlesOld; - } + std::swap(levelGhostParticlesNew, levelGhostParticlesOld); + levelGhostParticlesNew.clear(); + levelGhostParticles = levelGhostParticlesOld; } } + } - /** - * @brief prepareStep is the concrete implementation of the - * HybridMessengerStrategy::prepareStep method For hybrid-Hybrid communications. - * This method copies the density J, and the density and bulk velocity, defined at t=n. - * Since prepareStep() is called just before advancing the level, this operation - * actually saves the t=n versions of J, Ni, Vi into the messenger. When the time comes - * that the next finer level needs to time interpolate the electromagnetic field and - * current at its ghost nodes, this level will be able to interpolate at required time - * because the t=n Vi,Ni,J fields of previous next coarser step will be in the - * messenger. - */ - void prepareStep(IPhysicalModel& model, level_t& level, double currentTime) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::prepareStep"); + /** + * @brief prepareStep is the concrete implementation of the + * HybridMessengerStrategy::prepareStep method For hybrid-Hybrid communications. + * This method copies the density J, and the density and bulk velocity, defined at t=n. + * Since prepareStep() is called just before advancing the level, this operation + * actually saves the t=n versions of J, Ni, Vi into the messenger. When the time comes + * that the next finer level needs to time interpolate the electromagnetic field and + * current at its ghost nodes, this level will be able to interpolate at required time + * because the t=n Vi,Ni,J fields of previous next coarser step will be in the + * messenger. + */ + void prepareStep(IPhysicalModel& model, level_t& level, double currentTime) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::prepareStep"); - auto& hybridModel = static_cast(model); - for (auto& patch : level) - { - auto dataOnPatch = resourcesManager_->setOnPatch( - *patch, hybridModel.state.electromag, hybridModel.state.J, - hybridModel.state.ions, Jold_, NiOld_, ViOld_); + auto& hybridModel = static_cast(model); + for (auto& patch : level) + { + auto dataOnPatch = resourcesManager_->setOnPatch( + *patch, hybridModel.state.electromag, hybridModel.state.J, hybridModel.state.ions, + Jold_, NiOld_, ViOld_); - resourcesManager_->setTime(Jold_, *patch, currentTime); - resourcesManager_->setTime(NiOld_, *patch, currentTime); - resourcesManager_->setTime(ViOld_, *patch, currentTime); + resourcesManager_->setTime(Jold_, *patch, currentTime); + resourcesManager_->setTime(NiOld_, *patch, currentTime); + resourcesManager_->setTime(ViOld_, *patch, currentTime); - auto& J = hybridModel.state.J; - auto& Vi = hybridModel.state.ions.velocity(); - auto& Ni = hybridModel.state.ions.chargeDensity(); + auto& J = hybridModel.state.J; + auto& Vi = hybridModel.state.ions.velocity(); + auto& Ni = hybridModel.state.ions.chargeDensity(); - Jold_.copyData(J); - ViOld_.copyData(Vi); - NiOld_.copyData(Ni); - } + Jold_.copyData(J); + ViOld_.copyData(Vi); + NiOld_.copyData(Ni); } + } - void fillRootGhosts(IPhysicalModel& model, level_t& level, - double const initDataTime) override - { - auto levelNumber = level.getLevelNumber(); - assert(levelNumber == 0); + void fillRootGhosts(IPhysicalModel& model, level_t& level, double const initDataTime) override + { + auto levelNumber = level.getLevelNumber(); + assert(levelNumber == 0); - auto& hybridModel = static_cast(model); + auto& hybridModel = static_cast(model); - // TODO : still useful? - elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, initDataTime); + // TODO : still useful? + elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, initDataTime); - // at some point in the future levelGhostParticles could be filled with injected - // particles depending on the domain boundary condition. - // - // Do we need J ghosts filled here? - // This method is only called when root level is initialized - // but J ghosts are needed a priori for the laplacian when the first Ohm is - // calculated so I think we do, not having them here is just having the - // laplacian wrong on L0 borders for the initial E, which is not the end of the - // world... - // - // do we need moment ghosts filled here? - // a priori no because those are at this time only needed for coarsening, which - // will not happen before the first advance - } + // at some point in the future levelGhostParticles could be filled with injected + // particles depending on the domain boundary condition. + // + // Do we need J ghosts filled here? + // This method is only called when root level is initialized + // but J ghosts are needed a priori for the laplacian when the first Ohm is + // calculated so I think we do, not having them here is just having the + // laplacian wrong on L0 borders for the initial E, which is not the end of the + // world... + // + // do we need moment ghosts filled here? + // a priori no because those are at this time only needed for coarsening, which + // will not happen before the first advance + } - void synchronize(level_t& level) override - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::synchronize"); + void synchronize(level_t& level) override + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::synchronize"); - auto levelNumber = level.getLevelNumber(); - PHARE_LOG_LINE_STR("synchronizing level " + std::to_string(levelNumber)); + auto levelNumber = level.getLevelNumber(); + PHARE_LOG_LINE_STR("synchronizing level " + std::to_string(levelNumber)); - // call coarsning schedules... - electroSynchronizers_.sync(levelNumber); - chargeDensitySynchronizers_.sync(levelNumber); - ionBulkVelSynchronizers_.sync(levelNumber); - } + // call coarsning schedules... + electroSynchronizers_.sync(levelNumber); + chargeDensitySynchronizers_.sync(levelNumber); + ionBulkVelSynchronizers_.sync(levelNumber); + } - // this function coarsens the fluxSum onto the corresponding coarser fluxes (E in hybrid), - // and fills the patch ghosts, making it ready for the faraday in the solver.reflux() - void reflux(int const coarserLevelNumber, int const fineLevelNumber, - double const syncTime) override - { - refluxSchedules[fineLevelNumber]->coarsenData(); - patchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); - } - - // after coarsening, domain nodes have been updated and therefore patch ghost nodes - // will probably stop having the exact same value as their overlapped neighbor - // note that NONE of the refiners used here overwrite interior nodes - // because the point is to re-sync patch ghost nodes with overlaped domain nodes - void postSynchronize(IPhysicalModel& model, level_t& level, double const time) override - { - auto levelNumber = level.getLevelNumber(); - auto& hybridModel = static_cast(model); + // this function coarsens the fluxSum onto the corresponding coarser fluxes (E in hybrid), + // and fills the patch ghosts, making it ready for the faraday in the solver.reflux() + void reflux(int const coarserLevelNumber, int const fineLevelNumber, + double const syncTime) override + { + refluxSchedules[fineLevelNumber]->coarsenData(); + patchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); + } + + // after coarsening, domain nodes have been updated and therefore patch ghost nodes + // will probably stop having the exact same value as their overlapped neighbor + // note that NONE of the refiners used here overwrite interior nodes + // because the point is to re-sync patch ghost nodes with overlaped domain nodes + void postSynchronize(IPhysicalModel& model, level_t& level, double const time) override + { + auto levelNumber = level.getLevelNumber(); + auto& hybridModel = static_cast(model); - PHARE_LOG_LINE_STR("postSynchronize level " + std::to_string(levelNumber)); + PHARE_LOG_LINE_STR("postSynchronize level " + std::to_string(levelNumber)); - // this electric schedule should probably only be a patch ghost one - // since levelghost nodes are not affected by the coarsening - elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, time); + // this electric schedule should probably only be a patch ghost one + // since levelghost nodes are not affected by the coarsening + elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, time); - chargeDensityPatchGhostsRefiners_.fill(levelNumber, time); - velPatchGhostsRefiners_.fill(hybridModel.state.ions.velocity(), levelNumber, time); - } + chargeDensityPatchGhostsRefiners_.fill(levelNumber, time); + velPatchGhostsRefiners_.fill(hybridModel.state.ions.velocity(), levelNumber, time); + } - private: - void registerGhostComms_(std::unique_ptr const& info) - { - // ********************************************************************* - // all of the ghost refiners take the nonOverwriteInteriorTFfillPattern as - // domain nodes are already computed - // ********************************************************************* +private: + void registerGhostComms_(std::unique_ptr const& info) + { + // ********************************************************************* + // all of the ghost refiners take the nonOverwriteInteriorTFfillPattern as + // domain nodes are already computed + // ********************************************************************* - elecGhostsRefiners_.addStaticRefiners(info->ghostElectric, EfieldRefineOp_, - info->ghostElectric, - nonOverwriteInteriorTFfillPattern); + elecGhostsRefiners_.addStaticRefiners(info->ghostElectric, EfieldRefineOp_, + info->ghostElectric, + nonOverwriteInteriorTFfillPattern); - // we need a separate patch strategy for each refiner so that each one can register - // their required ids - magneticPatchStratPerGhostRefiner_ = [&]() { - std::vector>> - result; + // we need a separate patch strategy for each refiner so that each one can register + // their required ids + magneticPatchStratPerGhostRefiner_ = [&]() { + std::vector>> + result; - result.reserve(info->ghostMagnetic.size()); + result.reserve(info->ghostMagnetic.size()); - for (auto const& key : info->ghostMagnetic) - { - auto&& [id] = resourcesManager_->getIDsList(key); + for (auto const& key : info->ghostMagnetic) + { + auto&& [id] = resourcesManager_->getIDsList(key); - auto patch_strat = std::make_shared>(*resourcesManager_, - *boundaryManager_); + auto patch_strat = std::make_shared>(*resourcesManager_, + *boundaryManager_); - patch_strat->registerIDs(id); + patch_strat->registerIDs(id); - result.push_back(patch_strat); - } - return result; - }(); - - for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) - { - // TODO : we could test making this time refined there is probably no - // reason to keep it static. - magGhostsRefiners_.addStaticRefiner( - info->ghostMagnetic[i], BRefineOp_, info->ghostMagnetic[i], - nonOverwriteInteriorTFfillPattern, magneticPatchStratPerGhostRefiner_[i]); + result.push_back(patch_strat); } + return result; + }(); + for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) + { + // TODO : we could test making this time refined there is probably no + // reason to keep it static. + magGhostsRefiners_.addStaticRefiner( + info->ghostMagnetic[i], BRefineOp_, info->ghostMagnetic[i], + nonOverwriteInteriorTFfillPattern, magneticPatchStratPerGhostRefiner_[i]); + } - // If using a time refinement for J then we need to ensure that Jold is also refined - // otherwise time interpolation will not be possible - // We choose to simply use static refinement for J so not to care about Jold - currentGhostsRefiners_.addStaticRefiners(info->ghostCurrent, EfieldRefineOp_, - info->ghostCurrent, - nonOverwriteInteriorTFfillPattern); - // chargeDensityLevelGhostsRefiners_.addTimeRefiner( - // info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, - // fieldTimeOp_, info->modelIonDensity, nonOverwriteInteriorFieldFillPattern); - // - // - // velLevelGhostsRefiners_.addTimeRefiners( - // info->ghostBulkVelocity, info->modelIonBulkVelocity, ViOld_.name(), - // vecFieldRefineOp_, vecFieldTimeOp_, nonOverwriteInteriorTFfillPattern); + // If using a time refinement for J then we need to ensure that Jold is also refined + // otherwise time interpolation will not be possible + // We choose to simply use static refinement for J so not to care about Jold + currentGhostsRefiners_.addStaticRefiners(info->ghostCurrent, EfieldRefineOp_, + info->ghostCurrent, + nonOverwriteInteriorTFfillPattern); - chargeDensityPatchGhostsRefiners_.addTimeRefiner( - info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, - fieldTimeOp_, info->modelIonDensity, nonOverwriteInteriorFieldFillPattern); + // chargeDensityLevelGhostsRefiners_.addTimeRefiner( + // info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, + // fieldTimeOp_, info->modelIonDensity, nonOverwriteInteriorFieldFillPattern); + // + // + // velLevelGhostsRefiners_.addTimeRefiners( + // info->ghostBulkVelocity, info->modelIonBulkVelocity, ViOld_.name(), + // vecFieldRefineOp_, vecFieldTimeOp_, nonOverwriteInteriorTFfillPattern); - velPatchGhostsRefiners_.addTimeRefiners( - info->ghostBulkVelocity, info->modelIonBulkVelocity, ViOld_.name(), - vecFieldRefineOp_, vecFieldTimeOp_, nonOverwriteInteriorTFfillPattern); - } + chargeDensityPatchGhostsRefiners_.addTimeRefiner( + info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, + fieldTimeOp_, info->modelIonDensity, nonOverwriteInteriorFieldFillPattern); + velPatchGhostsRefiners_.addTimeRefiners(info->ghostBulkVelocity, info->modelIonBulkVelocity, + ViOld_.name(), vecFieldRefineOp_, vecFieldTimeOp_, + nonOverwriteInteriorTFfillPattern); + } - void registerInitComms_(std::unique_ptr const& info) - { - auto b_id = resourcesManager_->getID(info->modelMagnetic); - BalgoInit.registerRefine(*b_id, *b_id, *b_id, BInitRefineOp_, - overwriteInteriorTFfillPattern); - // no fill pattern given for this init - // will use boxgeometryvariable fillpattern, itself using the - // field geometry with overwrite_interior true from SAMRAI - // we could set the overwriteInteriorTFfillPattern it would be the same - electricInitRefiners_.addStaticRefiners(info->initElectric, EfieldRefineOp_, - info->initElectric); + void registerInitComms_(std::unique_ptr const& info) + { + auto b_id = resourcesManager_->getID(info->modelMagnetic); + BalgoInit.registerRefine(*b_id, *b_id, *b_id, BInitRefineOp_, + overwriteInteriorTFfillPattern); + // no fill pattern given for this init + // will use boxgeometryvariable fillpattern, itself using the + // field geometry with overwrite_interior true from SAMRAI + // we could set the overwriteInteriorTFfillPattern it would be the same + electricInitRefiners_.addStaticRefiners(info->initElectric, EfieldRefineOp_, + info->initElectric); - domainParticlesRefiners_.addStaticRefiners( - info->interiorParticles, interiorParticleRefineOp_, info->interiorParticles); + domainParticlesRefiners_.addStaticRefiners( + info->interiorParticles, interiorParticleRefineOp_, info->interiorParticles); - lvlGhostPartOldRefiners_.addStaticRefiners(info->levelGhostParticlesOld, - levelGhostParticlesOldOp_, - info->levelGhostParticlesOld); + lvlGhostPartOldRefiners_.addStaticRefiners( + info->levelGhostParticlesOld, levelGhostParticlesOldOp_, info->levelGhostParticlesOld); - lvlGhostPartNewRefiners_.addStaticRefiners(info->levelGhostParticlesNew, - levelGhostParticlesNewOp_, - info->levelGhostParticlesNew); + lvlGhostPartNewRefiners_.addStaticRefiners( + info->levelGhostParticlesNew, levelGhostParticlesNewOp_, info->levelGhostParticlesNew); - domainGhostPartRefiners_.addStaticRefiners( - info->patchGhostParticles, nullptr, info->patchGhostParticles, - std::make_shared>()); + domainGhostPartRefiners_.addStaticRefiners( + info->patchGhostParticles, nullptr, info->patchGhostParticles, + std::make_shared>()); - for (auto const& vecfield : info->ghostFlux) - { - popFluxBorderSumRefiners_.emplace_back(resourcesManager_) - .addStaticRefiner( - sumVec_.name(), vecfield, nullptr, sumVec_.name(), - std::make_shared< - TensorFieldGhostInterpOverlapFillPattern>()); - } - for (auto const& field : info->sumBorderFields) - popDensityBorderSumRefiners_.emplace_back(resourcesManager_) - .addStaticRefiner( - sumField_.name(), field, nullptr, sumField_.name(), - std::make_shared>()); - - - assert(info->maxBorderFields.size() == 2); // mass & charge densities - for (auto const& field : info->maxBorderFields) - ionDensityBorderMaxRefiners_.emplace_back(resourcesManager_) - .addStaticRefiner( - field, field, nullptr, field, - std::make_shared>()); - - assert(info->maxBorderVecFields.size() == 1); - for (auto const& vecfield : info->maxBorderVecFields) - ionFluxBorderMaxRefiners_.emplace_back(resourcesManager_) - .addStaticRefiner( - vecfield, vecfield, nullptr, vecfield, - std::make_shared< - TensorFieldGhostInterpOverlapFillPattern>()); + for (auto const& vecfield : info->ghostFlux) + { + popFluxBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + sumVec_.name(), vecfield, nullptr, sumVec_.name(), + std::make_shared< + TensorFieldGhostInterpOverlapFillPattern>()); } + for (auto const& field : info->sumBorderFields) + popDensityBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + sumField_.name(), field, nullptr, sumField_.name(), + std::make_shared>()); - void registerSyncComms_(std::unique_ptr const& info) - { - electroSynchronizers_.add(info->modelElectric, electricFieldCoarseningOp_, - info->modelElectric); + assert(info->maxBorderFields.size() == 2); // mass & charge densities + for (auto const& field : info->maxBorderFields) + ionDensityBorderMaxRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + field, field, nullptr, field, + std::make_shared>()); - ionBulkVelSynchronizers_.add(info->modelIonBulkVelocity, vecFieldMomentsCoarseningOp_, - info->modelIonBulkVelocity); + assert(info->maxBorderVecFields.size() == 1); + for (auto const& vecfield : info->maxBorderVecFields) + ionFluxBorderMaxRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + vecfield, vecfield, nullptr, vecfield, + std::make_shared< + TensorFieldGhostInterpOverlapFillPattern>()); + } - chargeDensitySynchronizers_.add(info->modelIonDensity, fieldMomentsCoarseningOp_, - info->modelIonDensity); - } + void registerSyncComms_(std::unique_ptr const& info) + { + electroSynchronizers_.add(info->modelElectric, electricFieldCoarseningOp_, + info->modelElectric); + ionBulkVelSynchronizers_.add(info->modelIonBulkVelocity, vecFieldMomentsCoarseningOp_, + info->modelIonBulkVelocity); - void copyLevelGhostOldToPushable_(level_t& level, IPhysicalModel& model) - { - auto& hybridModel = static_cast(model); - for (auto& patch : level) - { - auto& ions = hybridModel.state.ions; - auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); - for (auto& pop : ions) - { - auto& levelGhostParticlesOld = pop.levelGhostParticlesOld(); - auto& levelGhostParticles = pop.levelGhostParticles(); + chargeDensitySynchronizers_.add(info->modelIonDensity, fieldMomentsCoarseningOp_, + info->modelIonDensity); + } - levelGhostParticles = levelGhostParticlesOld; - } - } - } - - double timeInterpCoef_(double const afterPushTime, std::size_t levelNumber) + void copyLevelGhostOldToPushable_(level_t& level, IPhysicalModel& model) + { + auto& hybridModel = static_cast(model); + for (auto& patch : level) { - return (afterPushTime - beforePushCoarseTime_[levelNumber]) - / (afterPushCoarseTime_[levelNumber] - beforePushCoarseTime_[levelNumber]); - } - - + auto& ions = hybridModel.state.ions; + auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); + for (auto& pop : ions) + { + auto& levelGhostParticlesOld = pop.levelGhostParticlesOld(); + auto& levelGhostParticles = pop.levelGhostParticles(); - void magneticRegriding_(std::shared_ptr const& hierarchy, - std::shared_ptr const& level, - std::shared_ptr const& oldLevel, double const initDataTime) - { - auto magSchedule = BregridAlgo.createSchedule( - level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy, - &magneticRefinePatchStrategy_); - magSchedule->fillData(initDataTime); + levelGhostParticles = levelGhostParticlesOld; + } } + } - /** * @brief setNaNsFieldOnGhosts sets NaNs on the level ghost nodes of the field - * so that the refinement operators can know nodes at NaN have not been - * touched by schedule copy. - * - * This is needed when the schedule copy is done before refinement - * as a result of FieldVariable::fineBoundaryRepresentsVariable=false - * - * boxes : are level patch boxes - */ - void setNaNsOnFieldGhosts(FieldT& field, patch_t const& patch, - SAMRAI::hier::BoxContainer const& boxes) - { - auto const qty = field.physicalQuantity(); - using qty_t = std::decay_t; - using field_geometry_t = FieldGeometry; - - auto const layout = layoutFromPatch(patch); - - // we need to remove the box from the ghost box - // to use SAMRAI::removeIntersections we do some conversions to - // samrai box. - // not gbox is a fieldBox (thanks to the layout) - - auto const gbox = layout.AMRGhostBoxFor(field.physicalQuantity()); - auto const sgbox = samrai_box_from(gbox); - auto const fbox = field_geometry_t::toFieldBoxes(boxes, qty, layout); - - // we create a box container with the ghost box, and then remove the level boxes - // from it - SAMRAI::hier::BoxContainer ghostLayerBoxes{sgbox}; - ghostLayerBoxes.removeIntersections(fbox); - - // and now finally set the NaNs on the ghost boxes - for (auto const& gb : ghostLayerBoxes) - for (auto const& index : layout.AMRToLocal(phare_box_from(gb))) - field(index) = std::numeric_limits::quiet_NaN(); - } - void setNaNsOnFieldGhosts(FieldT& field, level_t const& level) - { - auto const& boxes = level.getBoxes(); - for (auto& patch : resourcesManager_->enumerate(level, field)) - setNaNsOnFieldGhosts(field, *patch, boxes); - } + double timeInterpCoef_(double const afterPushTime, std::size_t levelNumber) + { + return (afterPushTime - beforePushCoarseTime_[levelNumber]) + / (afterPushCoarseTime_[levelNumber] - beforePushCoarseTime_[levelNumber]); + } - void setNaNsOnVecfieldGhosts(VecFieldT& vf, level_t const& level) - { - auto const& boxes = level.getBoxes(); - for (auto& patch : resourcesManager_->enumerate(level, vf)) - for (auto& field : vf) - setNaNsOnFieldGhosts(field, *patch, boxes); - } + void magneticRegriding_(std::shared_ptr const& hierarchy, + std::shared_ptr const& level, + std::shared_ptr const& oldLevel, double const initDataTime) + { + auto magSchedule = BregridAlgo.createSchedule(level, oldLevel, + level->getNextCoarserHierarchyLevelNumber(), + hierarchy, &magneticRefinePatchStrategy_); + magSchedule->fillData(initDataTime); + } + + + /** * @brief setNaNsFieldOnGhosts sets NaNs on the level ghost nodes of the field + * so that the refinement operators can know nodes at NaN have not been + * touched by schedule copy. + * + * This is needed when the schedule copy is done before refinement + * as a result of FieldVariable::fineBoundaryRepresentsVariable=false + * + * boxes : are level patch boxes + */ + void setNaNsOnFieldGhosts(FieldT& field, patch_t const& patch, + SAMRAI::hier::BoxContainer const& boxes) + { + auto const qty = field.physicalQuantity(); + using qty_t = std::decay_t; + using field_geometry_t = FieldGeometry; - VecFieldT Jold_{stratName + "_Jold", core::HybridQuantity::Vector::J}; - VecFieldT ViOld_{stratName + "_VBulkOld", core::HybridQuantity::Vector::V}; - FieldT NiOld_{stratName + "_NiOld", core::HybridQuantity::Scalar::rho}; - - TensorFieldT sumTensor_{"PHARE_sumTensor", core::HybridQuantity::Tensor::M}; - VecFieldT sumVec_{"PHARE_sumVec", core::HybridQuantity::Vector::V}; - FieldT sumField_{"PHARE_sumField", core::HybridQuantity::Scalar::rho}; + auto const layout = layoutFromPatch(patch); + // we need to remove the box from the ghost box + // to use SAMRAI::removeIntersections we do some conversions to + // samrai box. + // not gbox is a fieldBox (thanks to the layout) + auto const gbox = layout.AMRGhostBoxFor(field.physicalQuantity()); + auto const sgbox = samrai_box_from(gbox); + auto const fbox = field_geometry_t::toFieldBoxes(boxes, qty, layout); - //! ResourceManager shared with other objects (like the HybridModel) - std::shared_ptr resourcesManager_; - - std::shared_ptr boundaryManager_; - - - int const firstLevel_; - std::unordered_map beforePushCoarseTime_; - std::unordered_map afterPushCoarseTime_; - - core::Interpolator interpolate_; - - using rm_t = ResourcesManagerT; - using RefineOperator = SAMRAI::hier::RefineOperator; - using TimeInterpolateOperator = SAMRAI::hier::TimeInterpolateOperator; - - // these refiners are used to initialize electromagnetic fields when creating - // a new level (initLevel) or regridding (regrid) - using InitRefinerPool = RefinerPool; - using GhostRefinerPool = RefinerPool; - using InitDomPartRefinerPool = RefinerPool; - using LevelBorderFieldRefinerPool = RefinerPool; - using DomainGhostPartRefinerPool = RefinerPool; - using PatchGhostRefinerPool = RefinerPool; - using FieldGhostSumRefinerPool = RefinerPool; - using VecFieldGhostSumRefinerPool = RefinerPool; - using FieldGhostMaxRefinerPool = RefinerPool; - using VecFieldGhostMaxRefinerPool = RefinerPool; - using FieldFillPattern_t = FieldFillPattern; - using TensorFieldFillPattern_t = TensorFieldFillPattern; + // we create a box container with the ghost box, and then remove the level boxes + // from it + SAMRAI::hier::BoxContainer ghostLayerBoxes{sgbox}; + ghostLayerBoxes.removeIntersections(fbox); - //! += flux on ghost box overlap incomplete population moment nodes - std::vector popFluxBorderSumRefiners_; - //! += density on ghost box overlap incomplete population moment nodes - std::vector popDensityBorderSumRefiners_; + // and now finally set the NaNs on the ghost boxes + for (auto const& gb : ghostLayerBoxes) + for (auto const& index : layout.AMRToLocal(phare_box_from(gb))) + field(index) = std::numeric_limits::quiet_NaN(); + } - std::vector ionDensityBorderMaxRefiners_; - std::vector ionFluxBorderMaxRefiners_; + void setNaNsOnFieldGhosts(FieldT& field, level_t const& level) + { + auto const& boxes = level.getBoxes(); - InitRefinerPool electricInitRefiners_{resourcesManager_}; + for (auto& patch : resourcesManager_->enumerate(level, field)) + setNaNsOnFieldGhosts(field, *patch, boxes); + } + void setNaNsOnVecfieldGhosts(VecFieldT& vf, level_t const& level) + { + auto const& boxes = level.getBoxes(); - SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; - SAMRAI::xfer::RefineAlgorithm BalgoInit; - SAMRAI::xfer::RefineAlgorithm BregridAlgo; - SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; - std::map> magInitRefineSchedules; - std::map> magPatchGhostsRefineSchedules; - std::map> magGhostsRefineSchedules; - std::map> BpredGhostsRefineSchedules; - std::map> elecPatchGhostsRefineSchedules; + for (auto& patch : resourcesManager_->enumerate(level, vf)) + for (auto& field : vf) + setNaNsOnFieldGhosts(field, *patch, boxes); + } + + + VecFieldT Jold_{stratName + "_Jold", core::HybridQuantity::Vector::J}; + VecFieldT ViOld_{stratName + "_VBulkOld", core::HybridQuantity::Vector::V}; + FieldT NiOld_{stratName + "_NiOld", core::HybridQuantity::Scalar::rho}; + + TensorFieldT sumTensor_{"PHARE_sumTensor", core::HybridQuantity::Tensor::M}; + VecFieldT sumVec_{"PHARE_sumVec", core::HybridQuantity::Vector::V}; + FieldT sumField_{"PHARE_sumField", core::HybridQuantity::Scalar::rho}; - SAMRAI::xfer::CoarsenAlgorithm RefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; - SAMRAI::xfer::RefineAlgorithm PatchGhostRefluxedAlgo; - std::map> refluxSchedules; - std::map> patchGhostRefluxedSchedules; - //! store refiners for electric fields that need ghosts to be filled - GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; - GhostRefinerPool magGhostsRefiners_{resourcesManager_}; + //! ResourceManager shared with other objects (like the HybridModel) + std::shared_ptr resourcesManager_; + + std::shared_ptr boundaryManager_; + + + int const firstLevel_; + std::unordered_map beforePushCoarseTime_; + std::unordered_map afterPushCoarseTime_; + + core::Interpolator interpolate_; + + using rm_t = ResourcesManagerT; + using RefineOperator = SAMRAI::hier::RefineOperator; + using TimeInterpolateOperator = SAMRAI::hier::TimeInterpolateOperator; + + // these refiners are used to initialize electromagnetic fields when creating + // a new level (initLevel) or regridding (regrid) + using InitRefinerPool = RefinerPool; + using GhostRefinerPool = RefinerPool; + using InitDomPartRefinerPool = RefinerPool; + using LevelBorderFieldRefinerPool = RefinerPool; + using DomainGhostPartRefinerPool = RefinerPool; + using PatchGhostRefinerPool = RefinerPool; + using FieldGhostSumRefinerPool = RefinerPool; + using VecFieldGhostSumRefinerPool = RefinerPool; + using FieldGhostMaxRefinerPool = RefinerPool; + using VecFieldGhostMaxRefinerPool = RefinerPool; + using FieldFillPattern_t = FieldFillPattern; + using TensorFieldFillPattern_t = TensorFieldFillPattern; + + //! += flux on ghost box overlap incomplete population moment nodes + std::vector popFluxBorderSumRefiners_; + //! += density on ghost box overlap incomplete population moment nodes + std::vector popDensityBorderSumRefiners_; - GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; + std::vector ionDensityBorderMaxRefiners_; + std::vector ionFluxBorderMaxRefiners_; - // moment ghosts - // The border node is already complete by the deposit of ghost particles - // these refiners are used to fill ghost nodes, and therefore, owing to - // the GhostField tag, will only assign pure ghost nodes. Border nodes will - // be overwritten only on level borders, which does not seem to be an issue. - // ****** - // NOTE : - // ***** - // these and all the code that use them is commented - // the reason for not deleting is that in its current state the code - // only deposits levelghost particles which therefore leaves some of the level - // ghost nodes incomplete (missing the outside contribution). - // We thought about replacing levelghost particle deposit by filling a level ghost schedule - // but at interp order >=2, levelghost particles will contribute to inner domain nodes - // which a schedule will not do so we need them. - // Keeping this code here is a way to ease the filling of pure level ghost nodes - // if we decide to do so one day. This would overwrite what level ghost particles - // have deposited on level ghost nodes, but since it is incomplete it does not matter - // on the other hand this would be necessary if we wanted to have a multiple point - // coarsening operator for moments. - // LevelBorderFieldRefinerPool chargeDensityLevelGhostsRefiners_{resourcesManager_}; - // LevelBorderFieldRefinerPool velLevelGhostsRefiners_{resourcesManager_}; + InitRefinerPool electricInitRefiners_{resourcesManager_}; - PatchGhostRefinerPool chargeDensityPatchGhostsRefiners_{resourcesManager_}; - PatchGhostRefinerPool velPatchGhostsRefiners_{resourcesManager_}; - // pool of refiners for interior particles of each population - // and the associated refinement operator - InitDomPartRefinerPool domainParticlesRefiners_{resourcesManager_}; + SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; + SAMRAI::xfer::RefineAlgorithm BalgoInit; + SAMRAI::xfer::RefineAlgorithm BregridAlgo; + SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; + std::map> magInitRefineSchedules; + std::map> magPatchGhostsRefineSchedules; + std::map> magGhostsRefineSchedules; + std::map> BpredGhostsRefineSchedules; + std::map> elecPatchGhostsRefineSchedules; - using RefOp_ptr = std::shared_ptr; + SAMRAI::xfer::CoarsenAlgorithm RefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; + SAMRAI::xfer::RefineAlgorithm PatchGhostRefluxedAlgo; + std::map> refluxSchedules; + std::map> patchGhostRefluxedSchedules; - RefOp_ptr interiorParticleRefineOp_{std::make_shared()}; + //! store refiners for electric fields that need ghosts to be filled + GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; - //! store communicators for coarse to fine particles old - // pools of refiners to fill level ghost particles, old and new ones - // and their associated refinement operator - static auto constexpr LGRefT = RefinerType::LevelBorderParticles; - RefinerPool lvlGhostPartOldRefiners_{resourcesManager_}; - RefinerPool lvlGhostPartNewRefiners_{resourcesManager_}; - RefOp_ptr levelGhostParticlesOldOp_{std::make_shared()}; - RefOp_ptr levelGhostParticlesNewOp_{std::make_shared()}; - - - //! to grab particle leaving neighboring patches and inject into domain - DomainGhostPartRefinerPool domainGhostPartRefiners_{resourcesManager_}; - - SynchronizerPool chargeDensitySynchronizers_{resourcesManager_}; - SynchronizerPool ionBulkVelSynchronizers_{resourcesManager_}; - SynchronizerPool electroSynchronizers_{resourcesManager_}; - - - RefOp_ptr fieldRefineOp_{std::make_shared()}; - RefOp_ptr vecFieldRefineOp_{std::make_shared()}; - - - RefOp_ptr BInitRefineOp_{std::make_shared()}; - RefOp_ptr BRefineOp_{std::make_shared()}; - RefOp_ptr EfieldRefineOp_{std::make_shared()}; - std::shared_ptr nonOverwriteInteriorFieldFillPattern - = std::make_shared>(); // stateless (mostly) + GhostRefinerPool magGhostsRefiners_{resourcesManager_}; - std::shared_ptr overwriteInteriorFieldFillPattern - = std::make_shared>( - /*overwrite_interior=*/true); // stateless (mostly) + GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; - std::shared_ptr nonOverwriteInteriorTFfillPattern - = std::make_shared>(); + // moment ghosts + // The border node is already complete by the deposit of ghost particles + // these refiners are used to fill ghost nodes, and therefore, owing to + // the GhostField tag, will only assign pure ghost nodes. Border nodes will + // be overwritten only on level borders, which does not seem to be an issue. + // ****** + // NOTE : + // ***** + // these and all the code that use them is commented + // the reason for not deleting is that in its current state the code + // only deposits levelghost particles which therefore leaves some of the level + // ghost nodes incomplete (missing the outside contribution). + // We thought about replacing levelghost particle deposit by filling a level ghost schedule + // but at interp order >=2, levelghost particles will contribute to inner domain nodes + // which a schedule will not do so we need them. + // Keeping this code here is a way to ease the filling of pure level ghost nodes + // if we decide to do so one day. This would overwrite what level ghost particles + // have deposited on level ghost nodes, but since it is incomplete it does not matter + // on the other hand this would be necessary if we wanted to have a multiple point + // coarsening operator for moments. + // LevelBorderFieldRefinerPool chargeDensityLevelGhostsRefiners_{resourcesManager_}; + // LevelBorderFieldRefinerPool velLevelGhostsRefiners_{resourcesManager_}; - std::shared_ptr overwriteInteriorTFfillPattern - = std::make_shared>( - /*overwrite_interior=*/true); + PatchGhostRefinerPool chargeDensityPatchGhostsRefiners_{resourcesManager_}; + PatchGhostRefinerPool velPatchGhostsRefiners_{resourcesManager_}; - std::shared_ptr fieldTimeOp_{std::make_shared()}; - std::shared_ptr vecFieldTimeOp_{ - std::make_shared()}; + // pool of refiners for interior particles of each population + // and the associated refinement operator + InitDomPartRefinerPool domainParticlesRefiners_{resourcesManager_}; - using CoarsenOperator_ptr = std::shared_ptr; - - CoarsenOperator_ptr fieldMomentsCoarseningOp_{std::make_shared()}; - CoarsenOperator_ptr vecFieldMomentsCoarseningOp_{ - std::make_shared()}; - CoarsenOperator_ptr electricFieldCoarseningOp_{std::make_shared()}; + using RefOp_ptr = std::shared_ptr; - MagneticRefinePatchStrategy - magneticRefinePatchStrategy_{*resourcesManager_, *boundaryManager_}; + RefOp_ptr interiorParticleRefineOp_{std::make_shared()}; - std::vector>> - magneticPatchStratPerGhostRefiner_; - }; + //! store communicators for coarse to fine particles old + // pools of refiners to fill level ghost particles, old and new ones + // and their associated refinement operator + static auto constexpr LGRefT = RefinerType::LevelBorderParticles; + RefinerPool lvlGhostPartOldRefiners_{resourcesManager_}; + RefinerPool lvlGhostPartNewRefiners_{resourcesManager_}; + RefOp_ptr levelGhostParticlesOldOp_{std::make_shared()}; + RefOp_ptr levelGhostParticlesNewOp_{std::make_shared()}; + + + //! to grab particle leaving neighboring patches and inject into domain + DomainGhostPartRefinerPool domainGhostPartRefiners_{resourcesManager_}; + + SynchronizerPool chargeDensitySynchronizers_{resourcesManager_}; + SynchronizerPool ionBulkVelSynchronizers_{resourcesManager_}; + SynchronizerPool electroSynchronizers_{resourcesManager_}; + + + RefOp_ptr fieldRefineOp_{std::make_shared()}; + RefOp_ptr vecFieldRefineOp_{std::make_shared()}; + + + RefOp_ptr BInitRefineOp_{std::make_shared()}; + RefOp_ptr BRefineOp_{std::make_shared()}; + RefOp_ptr EfieldRefineOp_{std::make_shared()}; + std::shared_ptr nonOverwriteInteriorFieldFillPattern + = std::make_shared>(); // stateless (mostly) + + std::shared_ptr overwriteInteriorFieldFillPattern + = std::make_shared>( + /*overwrite_interior=*/true); // stateless (mostly) + std::shared_ptr nonOverwriteInteriorTFfillPattern + = std::make_shared>(); + + std::shared_ptr overwriteInteriorTFfillPattern + = std::make_shared>( + /*overwrite_interior=*/true); + + std::shared_ptr fieldTimeOp_{std::make_shared()}; + std::shared_ptr vecFieldTimeOp_{ + std::make_shared()}; + + using CoarsenOperator_ptr = std::shared_ptr; + + CoarsenOperator_ptr fieldMomentsCoarseningOp_{std::make_shared()}; + CoarsenOperator_ptr vecFieldMomentsCoarseningOp_{std::make_shared()}; + CoarsenOperator_ptr electricFieldCoarseningOp_{std::make_shared()}; + + MagneticRefinePatchStrategy + magneticRefinePatchStrategy_{*resourcesManager_, *boundaryManager_}; -} // namespace amr + std::vector>> + magneticPatchStratPerGhostRefiner_; +}; -} // namespace PHARE +} // namespace PHARE::amr #endif diff --git a/src/amr/messengers/messenger_factory.hpp b/src/amr/messengers/messenger_factory.hpp index 83cbd5a94..0f20b6ebd 100644 --- a/src/amr/messengers/messenger_factory.hpp +++ b/src/amr/messengers/messenger_factory.hpp @@ -3,11 +3,12 @@ -#include "amr/messengers/hybrid_hybrid_messenger_strategy.hpp" -#include "amr/messengers/hybrid_messenger.hpp" #include "amr/messengers/messenger.hpp" -#include "amr/messengers/mhd_hybrid_messenger_strategy.hpp" #include "amr/messengers/mhd_messenger.hpp" +#include "amr/messengers/hybrid_messenger.hpp" +#include "amr/messengers/mhd_hybrid_messenger_strategy.hpp" +#include "amr/messengers/hybrid_hybrid_messenger_strategy.hpp" + #include "core/def.hpp" #include diff --git a/src/amr/messengers/mhd_messenger.hpp b/src/amr/messengers/mhd_messenger.hpp index 2320b1960..913e786ef 100644 --- a/src/amr/messengers/mhd_messenger.hpp +++ b/src/amr/messengers/mhd_messenger.hpp @@ -1,25 +1,25 @@ #ifndef PHARE_MHD_MESSENGER_HPP #define PHARE_MHD_MESSENGER_HPP -#include "amr/data/field/coarsening/electric_field_coarsener.hpp" -#include "amr/data/field/coarsening/field_coarsen_operator.hpp" -#include "amr/data/field/coarsening/mhd_flux_coarsener.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" -#include "amr/data/field/refine/electric_field_refiner.hpp" -#include "amr/data/field/refine/magnetic_field_refiner.hpp" -#include "amr/data/field/refine/magnetic_field_regrider.hpp" -#include "amr/data/field/refine/mhd_field_refiner.hpp" -#include "amr/data/field/refine/mhd_flux_refiner.hpp" -#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" #include "amr/messengers/refiner.hpp" -#include "amr/messengers/refiner_pool.hpp" -#include "amr/messengers/synchronizer_pool.hpp" #include "amr/messengers/messenger.hpp" +#include "amr/messengers/refiner_pool.hpp" #include "amr/messengers/messenger_info.hpp" +#include "amr/messengers/synchronizer_pool.hpp" #include "amr/messengers/mhd_messenger_info.hpp" +#include "amr/data/field/refine/mhd_flux_refiner.hpp" +#include "amr/data/field/refine/mhd_field_refiner.hpp" +#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/coarsening/mhd_flux_coarsener.hpp" +#include "amr/data/field/refine/electric_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_field_regrider.hpp" +#include "amr/data/field/coarsening/field_coarsen_operator.hpp" #include "amr/data/field/refine/field_refine_patch_strategy.hpp" +#include "amr/data/field/coarsening/electric_field_coarsener.hpp" #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" -#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" #include "core/mhd/mhd_quantities.hpp" @@ -30,798 +30,777 @@ #include #include +namespace PHARE::amr +{ + +template +class MHDMessenger : public IMessenger +{ + using amr_types = PHARE::amr::SAMRAI_Types; + using level_t = amr_types::level_t; + using patch_t = amr_types::patch_t; + using hierarchy_t = amr_types::hierarchy_t; + + using IPhysicalModel = MHDModel::Interface; + using FieldT = MHDModel::field_type; + using VecFieldT = MHDModel::vecfield_type; + using MHDStateT = MHDModel::state_type; + using GridLayoutT = MHDModel::gridlayout_type; + using GridT = MHDModel::grid_type; + using ResourcesManagerT = MHDModel::resources_manager_type; + using BoundaryManagerT = MHDModel::boundary_manager_type; + using FieldDataT = FieldData; + using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::MHDQuantity>; + + static constexpr auto dimension = MHDModel::dimension; + +public: + static constexpr std::size_t rootLevelNumber = 0; + static inline std::string const stratName = "MHDModel-MHDModel"; + + MHDMessenger(std::shared_ptr resourcesManager, + std::shared_ptr boundaryManager, int const firstLevel) + : resourcesManager_{std::move(resourcesManager)} + , boundaryManager_{std::move(boundaryManager)} + , firstLevel_{firstLevel} + { + // moment ghosts are primitive quantities + resourcesManager_->registerResources(rhoOld_); + resourcesManager_->registerResources(Vold_); + resourcesManager_->registerResources(Pold_); -#include -#include -#include + resourcesManager_->registerResources(rhoVold_); + resourcesManager_->registerResources(EtotOld_); + resourcesManager_->registerResources(Jold_); // conditionally register -namespace PHARE -{ -namespace amr -{ - template - class MHDMessenger : public IMessenger + // also magnetic fluxes ? or should we use static refiners instead ? + } + + virtual ~MHDMessenger() = default; + + void allocate(SAMRAI::hier::Patch& patch, double const allocateTime) const override { - using amr_types = PHARE::amr::SAMRAI_Types; - using level_t = amr_types::level_t; - using patch_t = amr_types::patch_t; - using hierarchy_t = amr_types::hierarchy_t; - - using IPhysicalModel = MHDModel::Interface; - using FieldT = MHDModel::field_type; - using VecFieldT = MHDModel::vecfield_type; - using MHDStateT = MHDModel::state_type; - using GridLayoutT = MHDModel::gridlayout_type; - using GridT = MHDModel::grid_type; - using ResourcesManagerT = MHDModel::resources_manager_type; - using BoundaryManagerT = MHDModel::boundary_manager_type; - using FieldDataT = FieldData; - using VectorFieldDataT = TensorFieldData<1, GridLayoutT, GridT, core::MHDQuantity>; - - static constexpr auto dimension = MHDModel::dimension; - - public: - static constexpr std::size_t rootLevelNumber = 0; - static inline std::string const stratName = "MHDModel-MHDModel"; - - MHDMessenger(std::shared_ptr resourcesManager, - std::shared_ptr boundaryManager, int const firstLevel) - : resourcesManager_{std::move(resourcesManager)} - , boundaryManager_{std::move(boundaryManager)} - , firstLevel_{firstLevel} - { - // moment ghosts are primitive quantities - resourcesManager_->registerResources(rhoOld_); - resourcesManager_->registerResources(Vold_); - resourcesManager_->registerResources(Pold_); + resourcesManager_->allocate(rhoOld_, patch, allocateTime); + resourcesManager_->allocate(Vold_, patch, allocateTime); + resourcesManager_->allocate(Pold_, patch, allocateTime); - resourcesManager_->registerResources(rhoVold_); - resourcesManager_->registerResources(EtotOld_); + resourcesManager_->allocate(rhoVold_, patch, allocateTime); + resourcesManager_->allocate(EtotOld_, patch, allocateTime); - resourcesManager_->registerResources(Jold_); // conditionally register + resourcesManager_->allocate(Jold_, patch, allocateTime); + } - // also magnetic fluxes ? or should we use static refiners instead ? - } - virtual ~MHDMessenger() = default; + void registerQuantities(std::unique_ptr fromCoarserInfo, + [[maybe_unused]] std::unique_ptr fromFinerInfo) override + { + std::unique_ptr mhdInfo{ + dynamic_cast(fromFinerInfo.release())}; + + auto b_id = resourcesManager_->getID(mhdInfo->modelMagnetic); - void allocate(SAMRAI::hier::Patch& patch, double const allocateTime) const override + if (!b_id) { - resourcesManager_->allocate(rhoOld_, patch, allocateTime); - resourcesManager_->allocate(Vold_, patch, allocateTime); - resourcesManager_->allocate(Pold_, patch, allocateTime); + throw std::runtime_error("MHDMessengerStrategy: missing magnetic field variable IDs"); + } - resourcesManager_->allocate(rhoVold_, patch, allocateTime); - resourcesManager_->allocate(EtotOld_, patch, allocateTime); + magneticRefinePatchStrategy_.registerIDs(*b_id); - resourcesManager_->allocate(Jold_, patch, allocateTime); - } + BalgoPatchGhost.registerRefine(*b_id, *b_id, *b_id, BfieldRefineOp_, + nonOverwriteInteriorTFfillPattern); + BalgoInit.registerRefine(*b_id, *b_id, *b_id, BfieldRegridOp_, + overwriteInteriorTFfillPattern); - void - registerQuantities(std::unique_ptr fromCoarserInfo, - [[maybe_unused]] std::unique_ptr fromFinerInfo) override - { - std::unique_ptr mhdInfo{ - dynamic_cast(fromFinerInfo.release())}; + BregridAlgo.registerRefine(*b_id, *b_id, *b_id, BfieldRegridOp_, + overwriteInteriorTFfillPattern); - auto b_id = resourcesManager_->getID(mhdInfo->modelMagnetic); + auto e_id = resourcesManager_->getID(mhdInfo->modelElectric); - if (!b_id) - { - throw std::runtime_error( - "MHDMessengerStrategy: missing magnetic field variable IDs"); - } + if (!e_id) + { + throw std::runtime_error("MHDMessengerStrategy: missing electric field variable IDs"); + } - magneticRefinePatchStrategy_.registerIDs(*b_id); + EalgoPatchGhost.registerRefine(*e_id, *e_id, *e_id, EfieldRefineOp_, + nonOverwriteInteriorTFfillPattern); - BalgoPatchGhost.registerRefine(*b_id, *b_id, *b_id, BfieldRefineOp_, - nonOverwriteInteriorTFfillPattern); + // refluxing + // we first want to coarsen the flux sum onto the coarser level + auto rho_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fx); + auto rhoV_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fx); + auto Etot_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fx); - BalgoInit.registerRefine(*b_id, *b_id, *b_id, BfieldRegridOp_, - overwriteInteriorTFfillPattern); + if (!rho_fx_reflux_id or !rhoV_fx_reflux_id or !Etot_fx_reflux_id) + { + throw std::runtime_error( + "MHDMessenger: missing reflux variable IDs for fluxes in x direction"); + } - BregridAlgo.registerRefine(*b_id, *b_id, *b_id, BfieldRegridOp_, - overwriteInteriorTFfillPattern); + auto rho_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fx); + auto rhoV_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fx); + auto Etot_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fx); - auto e_id = resourcesManager_->getID(mhdInfo->modelElectric); - if (!e_id) - { - throw std::runtime_error( - "MHDMessengerStrategy: missing electric field variable IDs"); - } + if (!rho_fx_fluxsum_id or !rhoV_fx_fluxsum_id or !Etot_fx_fluxsum_id) + { + throw std::runtime_error( + "MHDMessenger: missing flux sum variable IDs for fluxes in x direction"); + } - EalgoPatchGhost.registerRefine(*e_id, *e_id, *e_id, EfieldRefineOp_, - nonOverwriteInteriorTFfillPattern); - // refluxing - // we first want to coarsen the flux sum onto the coarser level - auto rho_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fx); - auto rhoV_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fx); - auto Etot_fx_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fx); + // all of the fluxes fx are defined on the same faces no matter the component, so we + // just need a different fill pattern per direction + HydroXrefluxAlgo.registerCoarsen(*rho_fx_reflux_id, *rho_fx_fluxsum_id, + mhdFluxCoarseningOp_); + HydroXrefluxAlgo.registerCoarsen(*rhoV_fx_reflux_id, *rhoV_fx_fluxsum_id, + mhdVecFluxCoarseningOp_); + HydroXrefluxAlgo.registerCoarsen(*Etot_fx_reflux_id, *Etot_fx_fluxsum_id, + mhdFluxCoarseningOp_); + + // we then need to refill the ghosts so that they agree with the newly refluxed + // cells + HydroXpatchGhostRefluxedAlgo.registerRefine(*rho_fx_reflux_id, *rho_fx_reflux_id, + *rho_fx_reflux_id, mhdFluxRefineOp_, + nonOverwriteInteriorTFfillPattern); + HydroXpatchGhostRefluxedAlgo.registerRefine(*rhoV_fx_reflux_id, *rhoV_fx_reflux_id, + *rhoV_fx_reflux_id, mhdVecFluxRefineOp_, + nonOverwriteInteriorTFfillPattern); + HydroXpatchGhostRefluxedAlgo.registerRefine(*Etot_fx_reflux_id, *Etot_fx_reflux_id, + *Etot_fx_reflux_id, mhdFluxRefineOp_, + nonOverwriteInteriorTFfillPattern); + + if constexpr (dimension >= 2) + { + auto rho_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fy); + auto rhoV_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fy); + auto Etot_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fy); - if (!rho_fx_reflux_id or !rhoV_fx_reflux_id or !Etot_fx_reflux_id) + if (!rho_fy_reflux_id or !rhoV_fy_reflux_id or !Etot_fy_reflux_id) { throw std::runtime_error( - "MHDMessenger: missing reflux variable IDs for fluxes in x direction"); + "MHDMessenger: missing reflux variable IDs for fluxes in y direction"); } - auto rho_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fx); - auto rhoV_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fx); - auto Etot_fx_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fx); - + auto rho_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fy); + auto rhoV_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fy); + auto Etot_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fy); - if (!rho_fx_fluxsum_id or !rhoV_fx_fluxsum_id or !Etot_fx_fluxsum_id) + if (!rho_fy_fluxsum_id or !rhoV_fy_fluxsum_id or !Etot_fy_fluxsum_id) { throw std::runtime_error( - "MHDMessenger: missing flux sum variable IDs for fluxes in x direction"); + "MHDMessenger: missing flux sum variable IDs for fluxes in y direction"); } - - // all of the fluxes fx are defined on the same faces no matter the component, so we - // just need a different fill pattern per direction - HydroXrefluxAlgo.registerCoarsen(*rho_fx_reflux_id, *rho_fx_fluxsum_id, + HydroYrefluxAlgo.registerCoarsen(*rho_fy_reflux_id, *rho_fy_fluxsum_id, mhdFluxCoarseningOp_); - HydroXrefluxAlgo.registerCoarsen(*rhoV_fx_reflux_id, *rhoV_fx_fluxsum_id, + HydroYrefluxAlgo.registerCoarsen(*rhoV_fy_reflux_id, *rhoV_fy_fluxsum_id, mhdVecFluxCoarseningOp_); - HydroXrefluxAlgo.registerCoarsen(*Etot_fx_reflux_id, *Etot_fx_fluxsum_id, + HydroYrefluxAlgo.registerCoarsen(*Etot_fy_reflux_id, *Etot_fy_fluxsum_id, mhdFluxCoarseningOp_); - // we then need to refill the ghosts so that they agree with the newly refluxed - // cells - HydroXpatchGhostRefluxedAlgo.registerRefine(*rho_fx_reflux_id, *rho_fx_reflux_id, - *rho_fx_reflux_id, mhdFluxRefineOp_, + HydroYpatchGhostRefluxedAlgo.registerRefine(*rho_fy_reflux_id, *rho_fy_reflux_id, + *rho_fy_reflux_id, mhdFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - HydroXpatchGhostRefluxedAlgo.registerRefine(*rhoV_fx_reflux_id, *rhoV_fx_reflux_id, - *rhoV_fx_reflux_id, mhdVecFluxRefineOp_, + HydroYpatchGhostRefluxedAlgo.registerRefine(*rhoV_fy_reflux_id, *rhoV_fy_reflux_id, + *rhoV_fy_reflux_id, mhdVecFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - HydroXpatchGhostRefluxedAlgo.registerRefine(*Etot_fx_reflux_id, *Etot_fx_reflux_id, - *Etot_fx_reflux_id, mhdFluxRefineOp_, + HydroYpatchGhostRefluxedAlgo.registerRefine(*Etot_fy_reflux_id, *Etot_fy_reflux_id, + *Etot_fy_reflux_id, mhdFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - if constexpr (dimension >= 2) + if constexpr (dimension == 3) { - auto rho_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fy); - auto rhoV_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fy); - auto Etot_fy_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fy); + auto rho_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fz); + auto rhoV_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fz); + auto Etot_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fz); + - if (!rho_fy_reflux_id or !rhoV_fy_reflux_id or !Etot_fy_reflux_id) + if (!rho_fz_reflux_id or !rhoV_fz_reflux_id or !Etot_fz_reflux_id) { throw std::runtime_error( - "MHDMessenger: missing reflux variable IDs for fluxes in y direction"); + "MHDMessenger: missing reflux variable IDs for fluxes in z direction"); } - auto rho_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fy); - auto rhoV_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fy); - auto Etot_fy_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fy); + auto rho_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fz); + auto rhoV_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fz); + auto Etot_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fz); - if (!rho_fy_fluxsum_id or !rhoV_fy_fluxsum_id or !Etot_fy_fluxsum_id) + if (!rho_fz_fluxsum_id or !rhoV_fz_fluxsum_id or !Etot_fz_fluxsum_id) { - throw std::runtime_error( - "MHDMessenger: missing flux sum variable IDs for fluxes in y direction"); + throw std::runtime_error("MHDMessenger: missing flux sum variable IDs for " + "fluxes in z direction"); } - HydroYrefluxAlgo.registerCoarsen(*rho_fy_reflux_id, *rho_fy_fluxsum_id, + HydroZrefluxAlgo.registerCoarsen(*rho_fz_reflux_id, *rho_fz_fluxsum_id, mhdFluxCoarseningOp_); - HydroYrefluxAlgo.registerCoarsen(*rhoV_fy_reflux_id, *rhoV_fy_fluxsum_id, + HydroZrefluxAlgo.registerCoarsen(*rhoV_fz_reflux_id, *rhoV_fz_fluxsum_id, mhdVecFluxCoarseningOp_); - HydroYrefluxAlgo.registerCoarsen(*Etot_fy_reflux_id, *Etot_fy_fluxsum_id, + HydroZrefluxAlgo.registerCoarsen(*Etot_fz_reflux_id, *Etot_fz_fluxsum_id, mhdFluxCoarseningOp_); - HydroYpatchGhostRefluxedAlgo.registerRefine(*rho_fy_reflux_id, *rho_fy_reflux_id, - *rho_fy_reflux_id, mhdFluxRefineOp_, + + HydroZpatchGhostRefluxedAlgo.registerRefine(*rho_fz_reflux_id, *rho_fz_reflux_id, + *rho_fz_reflux_id, mhdFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - HydroYpatchGhostRefluxedAlgo.registerRefine(*rhoV_fy_reflux_id, *rhoV_fy_reflux_id, - *rhoV_fy_reflux_id, mhdVecFluxRefineOp_, + HydroZpatchGhostRefluxedAlgo.registerRefine(*rhoV_fz_reflux_id, *rhoV_fz_reflux_id, + *rhoV_fz_reflux_id, mhdVecFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - HydroYpatchGhostRefluxedAlgo.registerRefine(*Etot_fy_reflux_id, *Etot_fy_reflux_id, - *Etot_fy_reflux_id, mhdFluxRefineOp_, + HydroZpatchGhostRefluxedAlgo.registerRefine(*Etot_fz_reflux_id, *Etot_fz_reflux_id, + *Etot_fz_reflux_id, mhdFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - - if constexpr (dimension == 3) - { - auto rho_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rho_fz); - auto rhoV_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.rhoV_fz); - auto Etot_fz_reflux_id = resourcesManager_->getID(mhdInfo->reflux.Etot_fz); - - - if (!rho_fz_reflux_id or !rhoV_fz_reflux_id or !Etot_fz_reflux_id) - { - throw std::runtime_error( - "MHDMessenger: missing reflux variable IDs for fluxes in z direction"); - } - - auto rho_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rho_fz); - auto rhoV_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.rhoV_fz); - auto Etot_fz_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSum.Etot_fz); - - if (!rho_fz_fluxsum_id or !rhoV_fz_fluxsum_id or !Etot_fz_fluxsum_id) - { - throw std::runtime_error("MHDMessenger: missing flux sum variable IDs for " - "fluxes in z direction"); - } - - HydroZrefluxAlgo.registerCoarsen(*rho_fz_reflux_id, *rho_fz_fluxsum_id, - mhdFluxCoarseningOp_); - HydroZrefluxAlgo.registerCoarsen(*rhoV_fz_reflux_id, *rhoV_fz_fluxsum_id, - mhdVecFluxCoarseningOp_); - HydroZrefluxAlgo.registerCoarsen(*Etot_fz_reflux_id, *Etot_fz_fluxsum_id, - mhdFluxCoarseningOp_); - - - HydroZpatchGhostRefluxedAlgo.registerRefine( - *rho_fz_reflux_id, *rho_fz_reflux_id, *rho_fz_reflux_id, mhdFluxRefineOp_, - nonOverwriteInteriorTFfillPattern); - HydroZpatchGhostRefluxedAlgo.registerRefine( - *rhoV_fz_reflux_id, *rhoV_fz_reflux_id, *rhoV_fz_reflux_id, - mhdVecFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - HydroZpatchGhostRefluxedAlgo.registerRefine( - *Etot_fz_reflux_id, *Etot_fz_reflux_id, *Etot_fz_reflux_id, - mhdFluxRefineOp_, nonOverwriteInteriorTFfillPattern); - } } + } - auto e_reflux_id = resourcesManager_->getID(mhdInfo->refluxElectric); + auto e_reflux_id = resourcesManager_->getID(mhdInfo->refluxElectric); - auto e_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSumElectric); + auto e_fluxsum_id = resourcesManager_->getID(mhdInfo->fluxSumElectric); - if (!e_reflux_id or !e_fluxsum_id) - { - throw std::runtime_error( - "MHDMessenger: missing electric refluxing field variable IDs"); - } + if (!e_reflux_id or !e_fluxsum_id) + { + throw std::runtime_error("MHDMessenger: missing electric refluxing field variable IDs"); + } - ErefluxAlgo.registerCoarsen(*e_reflux_id, *e_fluxsum_id, electricFieldCoarseningOp_); + ErefluxAlgo.registerCoarsen(*e_reflux_id, *e_fluxsum_id, electricFieldCoarseningOp_); - EpatchGhostRefluxedAlgo.registerRefine(*e_reflux_id, *e_reflux_id, *e_reflux_id, - EfieldRefineOp_, - nonOverwriteInteriorTFfillPattern); + EpatchGhostRefluxedAlgo.registerRefine(*e_reflux_id, *e_reflux_id, *e_reflux_id, + EfieldRefineOp_, nonOverwriteInteriorTFfillPattern); - registerGhostComms_(mhdInfo); - registerInitComms_(mhdInfo); - } + registerGhostComms_(mhdInfo); + registerInitComms_(mhdInfo); + } - void registerLevel(std::shared_ptr const& hierarchy, - int const levelNumber) override - { - auto const level = hierarchy->getPatchLevel(levelNumber); + void registerLevel(std::shared_ptr const& hierarchy, + int const levelNumber) override + { + auto const level = hierarchy->getPatchLevel(levelNumber); - magPatchGhostsRefineSchedules[levelNumber] - = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); + magPatchGhostsRefineSchedules[levelNumber] + = BalgoPatchGhost.createSchedule(level, &magneticRefinePatchStrategy_); - elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); + elecPatchGhostsRefineSchedules[levelNumber] = EalgoPatchGhost.createSchedule(level); - EpatchGhostRefluxedSchedules[levelNumber] - = EpatchGhostRefluxedAlgo.createSchedule(level); - HydroXpatchGhostRefluxedSchedules[levelNumber] - = HydroXpatchGhostRefluxedAlgo.createSchedule(level); - HydroYpatchGhostRefluxedSchedules[levelNumber] - = HydroYpatchGhostRefluxedAlgo.createSchedule(level); - HydroZpatchGhostRefluxedSchedules[levelNumber] - = HydroZpatchGhostRefluxedAlgo.createSchedule(level); + EpatchGhostRefluxedSchedules[levelNumber] = EpatchGhostRefluxedAlgo.createSchedule(level); + HydroXpatchGhostRefluxedSchedules[levelNumber] + = HydroXpatchGhostRefluxedAlgo.createSchedule(level); + HydroYpatchGhostRefluxedSchedules[levelNumber] + = HydroYpatchGhostRefluxedAlgo.createSchedule(level); + HydroZpatchGhostRefluxedSchedules[levelNumber] + = HydroZpatchGhostRefluxedAlgo.createSchedule(level); - elecGhostsRefiners_.registerLevel(hierarchy, level); - currentGhostsRefiners_.registerLevel(hierarchy, level); + elecGhostsRefiners_.registerLevel(hierarchy, level); + currentGhostsRefiners_.registerLevel(hierarchy, level); - rhoGhostsRefiners_.registerLevel(hierarchy, level); - // velGhostsRefiners_.registerLevel(hierarchy, level); - // pressureGhostsRefiners_.registerLevel(hierarchy, level); + rhoGhostsRefiners_.registerLevel(hierarchy, level); + // velGhostsRefiners_.registerLevel(hierarchy, level); + // pressureGhostsRefiners_.registerLevel(hierarchy, level); - momentumGhostsRefiners_.registerLevel(hierarchy, level); - totalEnergyGhostsRefiners_.registerLevel(hierarchy, level); + momentumGhostsRefiners_.registerLevel(hierarchy, level); + totalEnergyGhostsRefiners_.registerLevel(hierarchy, level); - magFluxesXGhostRefiners_.registerLevel(hierarchy, level); - magFluxesYGhostRefiners_.registerLevel(hierarchy, level); - magFluxesZGhostRefiners_.registerLevel(hierarchy, level); + magFluxesXGhostRefiners_.registerLevel(hierarchy, level); + magFluxesYGhostRefiners_.registerLevel(hierarchy, level); + magFluxesZGhostRefiners_.registerLevel(hierarchy, level); - magGhostsRefiners_.registerLevel(hierarchy, level); + magGhostsRefiners_.registerLevel(hierarchy, level); - if (levelNumber != rootLevelNumber) - { - // refluxing - auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); - ErefluxSchedules[levelNumber] = ErefluxAlgo.createSchedule(coarseLevel, level); - HydroXrefluxSchedules[levelNumber] - = HydroXrefluxAlgo.createSchedule(coarseLevel, level); - HydroYrefluxSchedules[levelNumber] - = HydroYrefluxAlgo.createSchedule(coarseLevel, level); - HydroZrefluxSchedules[levelNumber] - = HydroZrefluxAlgo.createSchedule(coarseLevel, level); - - // refinement - magInitRefineSchedules[levelNumber] = BalgoInit.createSchedule( - level, nullptr, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); - - densityInitRefiners_.registerLevel(hierarchy, level); - momentumInitRefiners_.registerLevel(hierarchy, level); - totalEnergyInitRefiners_.registerLevel(hierarchy, level); - } + if (levelNumber != rootLevelNumber) + { + // refluxing + auto const& coarseLevel = hierarchy->getPatchLevel(levelNumber - 1); + ErefluxSchedules[levelNumber] = ErefluxAlgo.createSchedule(coarseLevel, level); + HydroXrefluxSchedules[levelNumber] + = HydroXrefluxAlgo.createSchedule(coarseLevel, level); + HydroYrefluxSchedules[levelNumber] + = HydroYrefluxAlgo.createSchedule(coarseLevel, level); + HydroZrefluxSchedules[levelNumber] + = HydroZrefluxAlgo.createSchedule(coarseLevel, level); + + // refinement + magInitRefineSchedules[levelNumber] = BalgoInit.createSchedule( + level, nullptr, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); + + densityInitRefiners_.registerLevel(hierarchy, level); + momentumInitRefiners_.registerLevel(hierarchy, level); + totalEnergyInitRefiners_.registerLevel(hierarchy, level); } + } - void regrid(std::shared_ptr const& hierarchy, - int const levelNumber, - std::shared_ptr const& oldLevel, - IPhysicalModel& model, double const initDataTime) override - { - auto& mhdModel = static_cast(model); - auto level = hierarchy->getPatchLevel(levelNumber); + void regrid(std::shared_ptr const& hierarchy, + int const levelNumber, std::shared_ptr const& oldLevel, + IPhysicalModel& model, double const initDataTime) override + { + auto& mhdModel = static_cast(model); + auto level = hierarchy->getPatchLevel(levelNumber); - bool isRegriddingL0 = levelNumber == 0 and oldLevel; + bool isRegriddingL0 = levelNumber == 0 and oldLevel; - magneticRegriding_(hierarchy, level, oldLevel, initDataTime); - densityInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - momentumInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - totalEnergyInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); + magneticRegriding_(hierarchy, level, oldLevel, initDataTime); + densityInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); + momentumInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); + totalEnergyInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - // magPatchGhostsRefineSchedules[levelNumber]->fillData(initDataTime); - // elecPatchGhostsRefineSchedules[levelNumber]->fillData(initDataTime); - } + // magPatchGhostsRefineSchedules[levelNumber]->fillData(initDataTime); + // elecPatchGhostsRefineSchedules[levelNumber]->fillData(initDataTime); + } - std::string fineModelName() const override { return MHDModel::model_name; } + std::string fineModelName() const override { return MHDModel::model_name; } - std::string coarseModelName() const override { return MHDModel::model_name; } + std::string coarseModelName() const override { return MHDModel::model_name; } - std::unique_ptr emptyInfoFromCoarser() override - { - return std::make_unique(); - } + std::unique_ptr emptyInfoFromCoarser() override + { + return std::make_unique(); + } - std::unique_ptr emptyInfoFromFiner() override - { - return std::make_unique(); - } + std::unique_ptr emptyInfoFromFiner() override + { + return std::make_unique(); + } - void initLevel(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, - double const initDataTime) override - { - auto levelNumber = level.getLevelNumber(); + void initLevel(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, + double const initDataTime) override + { + auto levelNumber = level.getLevelNumber(); - auto& mhdModel = static_cast(model); + auto& mhdModel = static_cast(model); - magInitRefineSchedules[levelNumber]->fillData(initDataTime); - densityInitRefiners_.fill(levelNumber, initDataTime); - momentumInitRefiners_.fill(levelNumber, initDataTime); - totalEnergyInitRefiners_.fill(levelNumber, initDataTime); - } + magInitRefineSchedules[levelNumber]->fillData(initDataTime); + densityInitRefiners_.fill(levelNumber, initDataTime); + momentumInitRefiners_.fill(levelNumber, initDataTime); + totalEnergyInitRefiners_.fill(levelNumber, initDataTime); + } - void firstStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, - std::shared_ptr const& hierarchy, - double const currentTime, double const prevCoarserTIme, - double const newCoarserTime) final - { - } + void firstStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, + std::shared_ptr const& hierarchy, + double const currentTime, double const prevCoarserTIme, + double const newCoarserTime) final + { + } - void lastStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level) final {} + void lastStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level) final {} - void prepareStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, - double currentTime) final - { - auto& mhdModel = static_cast(model); - for (auto& patch : level) - { - auto dataOnPatch = resourcesManager_->setOnPatch( - *patch, mhdModel.state.rho, mhdModel.state.V, mhdModel.state.P, - mhdModel.state.rhoV, mhdModel.state.Etot, mhdModel.state.J, rhoOld_, Vold_, - Pold_, rhoVold_, EtotOld_, Jold_); - - resourcesManager_->setTime(rhoOld_, *patch, currentTime); - resourcesManager_->setTime(Vold_, *patch, currentTime); - resourcesManager_->setTime(Pold_, *patch, currentTime); - resourcesManager_->setTime(rhoVold_, *patch, currentTime); - resourcesManager_->setTime(EtotOld_, *patch, currentTime); - resourcesManager_->setTime(Jold_, *patch, currentTime); - - rhoOld_.copyData(mhdModel.state.rho); - Vold_.copyData(mhdModel.state.V); - Pold_.copyData(mhdModel.state.P); - rhoVold_.copyData(mhdModel.state.rhoV); - EtotOld_.copyData(mhdModel.state.Etot); - Jold_.copyData(mhdModel.state.J); - } - } - - void fillRootGhosts(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, - double const initDataTime) final + void prepareStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, + double currentTime) final + { + auto& mhdModel = static_cast(model); + for (auto& patch : level) { + auto dataOnPatch = resourcesManager_->setOnPatch( + *patch, mhdModel.state.rho, mhdModel.state.V, mhdModel.state.P, mhdModel.state.rhoV, + mhdModel.state.Etot, mhdModel.state.J, rhoOld_, Vold_, Pold_, rhoVold_, EtotOld_, + Jold_); + + resourcesManager_->setTime(rhoOld_, *patch, currentTime); + resourcesManager_->setTime(Vold_, *patch, currentTime); + resourcesManager_->setTime(Pold_, *patch, currentTime); + resourcesManager_->setTime(rhoVold_, *patch, currentTime); + resourcesManager_->setTime(EtotOld_, *patch, currentTime); + resourcesManager_->setTime(Jold_, *patch, currentTime); + + rhoOld_.copyData(mhdModel.state.rho); + Vold_.copyData(mhdModel.state.V); + Pold_.copyData(mhdModel.state.P); + rhoVold_.copyData(mhdModel.state.rhoV); + EtotOld_.copyData(mhdModel.state.Etot); + Jold_.copyData(mhdModel.state.J); } + } - void synchronize(SAMRAI::hier::PatchLevel& level) final {} - - void reflux(int const coarserLevelNumber, int const fineLevelNumber, - double const syncTime) override - { - ErefluxSchedules[fineLevelNumber]->coarsenData(); - HydroXrefluxSchedules[fineLevelNumber]->coarsenData(); - HydroYrefluxSchedules[fineLevelNumber]->coarsenData(); - HydroZrefluxSchedules[fineLevelNumber]->coarsenData(); - - EpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); - HydroXpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); - HydroYpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); - HydroZpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); - } + void fillRootGhosts(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, + double const initDataTime) final + { + } - void postSynchronize(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, - double const time) override - { - // The ghosts for B are obtained in the solver's reflux_euler. For B, this is because - // refluxing is done through faraday which is computed on the ghost box for the other - // quantities, the ghosts are filled in the end of the euler step anyways. - } + void synchronize(SAMRAI::hier::PatchLevel& level) final {} - void fillMomentsGhosts(MHDStateT& state, level_t const& level, double const fillTime) - { - setNaNsOnFieldGhosts(state.rho, level); - setNaNsOnVecfieldGhosts(state.rhoV, level); - setNaNsOnFieldGhosts(state.Etot, level); - rhoGhostsRefiners_.fill(state.rho, level.getLevelNumber(), fillTime); - momentumGhostsRefiners_.fill(state.rhoV, level.getLevelNumber(), fillTime); - totalEnergyGhostsRefiners_.fill(state.Etot, level.getLevelNumber(), fillTime); - } + void reflux(int const coarserLevelNumber, int const fineLevelNumber, + double const syncTime) override + { + ErefluxSchedules[fineLevelNumber]->coarsenData(); + HydroXrefluxSchedules[fineLevelNumber]->coarsenData(); + HydroYrefluxSchedules[fineLevelNumber]->coarsenData(); + HydroZrefluxSchedules[fineLevelNumber]->coarsenData(); + + EpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); + HydroXpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); + HydroYpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); + HydroZpatchGhostRefluxedSchedules[coarserLevelNumber]->fillData(syncTime); + } + + void postSynchronize(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, + double const time) override + { + // The ghosts for B are obtained in the solver's reflux_euler. For B, this is because + // refluxing is done through faraday which is computed on the ghost box for the other + // quantities, the ghosts are filled in the end of the euler step anyways. + } - void fillMagneticFluxesXGhosts(VecFieldT& Fx_B, level_t const& level, double const fillTime) - { - setNaNsOnVecfieldGhosts(Fx_B, level); - magFluxesXGhostRefiners_.fill(Fx_B, level.getLevelNumber(), fillTime); - } + void fillMomentsGhosts(MHDStateT& state, level_t const& level, double const fillTime) + { + setNaNsOnFieldGhosts(state.rho, level); + setNaNsOnVecfieldGhosts(state.rhoV, level); + setNaNsOnFieldGhosts(state.Etot, level); + rhoGhostsRefiners_.fill(state.rho, level.getLevelNumber(), fillTime); + momentumGhostsRefiners_.fill(state.rhoV, level.getLevelNumber(), fillTime); + totalEnergyGhostsRefiners_.fill(state.Etot, level.getLevelNumber(), fillTime); + } + + void fillMagneticFluxesXGhosts(VecFieldT& Fx_B, level_t const& level, double const fillTime) + { + setNaNsOnVecfieldGhosts(Fx_B, level); + magFluxesXGhostRefiners_.fill(Fx_B, level.getLevelNumber(), fillTime); + } - void fillMagneticFluxesYGhosts(VecFieldT& Fy_B, level_t const& level, double const fillTime) - { - setNaNsOnVecfieldGhosts(Fy_B, level); - magFluxesYGhostRefiners_.fill(Fy_B, level.getLevelNumber(), fillTime); - } + void fillMagneticFluxesYGhosts(VecFieldT& Fy_B, level_t const& level, double const fillTime) + { + setNaNsOnVecfieldGhosts(Fy_B, level); + magFluxesYGhostRefiners_.fill(Fy_B, level.getLevelNumber(), fillTime); + } - void fillMagneticFluxesZGhosts(VecFieldT& Fz_B, level_t const& level, double const fillTime) - { - setNaNsOnVecfieldGhosts(Fz_B, level); - magFluxesZGhostRefiners_.fill(Fz_B, level.getLevelNumber(), fillTime); - } + void fillMagneticFluxesZGhosts(VecFieldT& Fz_B, level_t const& level, double const fillTime) + { + setNaNsOnVecfieldGhosts(Fz_B, level); + magFluxesZGhostRefiners_.fill(Fz_B, level.getLevelNumber(), fillTime); + } - void fillElectricGhosts(VecFieldT& E, level_t const& level, double const fillTime) - { - setNaNsOnVecfieldGhosts(E, level); - elecGhostsRefiners_.fill(E, level.getLevelNumber(), fillTime); - } + void fillElectricGhosts(VecFieldT& E, level_t const& level, double const fillTime) + { + setNaNsOnVecfieldGhosts(E, level); + elecGhostsRefiners_.fill(E, level.getLevelNumber(), fillTime); + } - void fillMagneticGhosts(VecFieldT& B, level_t const& level, double const fillTime) - { - PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillMagneticGhosts"); + void fillMagneticGhosts(VecFieldT& B, level_t const& level, double const fillTime) + { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillMagneticGhosts"); - setNaNsOnVecfieldGhosts(B, level); - magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); - } + setNaNsOnVecfieldGhosts(B, level); + magGhostsRefiners_.fill(B, level.getLevelNumber(), fillTime); + } - void fillCurrentGhosts(VecFieldT& J, level_t const& level, double const fillTime) + void fillCurrentGhosts(VecFieldT& J, level_t const& level, double const fillTime) + { + setNaNsOnVecfieldGhosts(J, level); + currentGhostsRefiners_.fill(J, level.getLevelNumber(), fillTime); + } + + std::string name() override { return stratName; } + + + +private: + using rm_t = typename MHDModel::resources_manager_type; + using InitRefinerPool = RefinerPool; + using GhostRefinerPool = RefinerPool; + using InitDomPartRefinerPool = RefinerPool; + using FieldRefinePatchStrategyT + = FieldRefinePatchStrategy; + using VectorFieldRefinePatchStrategyT + = FieldRefinePatchStrategy; + using MagneticRefinePatchStrategyT + = MagneticRefinePatchStrategy; + using FieldRefinePatchStrategyList = std::vector>; + using VectorFieldRefinePatchStrategyList + = std::vector>; + using MagneticRefinePatchStrategyList + = std::vector>; + + + // Maybe we also need conservative ghost refiners for amr operations, actually quite + // likely + void registerGhostComms_(std::unique_ptr const& info) + { + // static refinement for J because in MHD it is a temporary, so keeping its + // state updated after each regrid is not a priority. However if we do not correctly + // refine on regrid, the post regrid state is not up to date (in our case it will be nan + // since we nan-initialise) and thus is is better to rely on static refinement, which + // uses the state after computation of ampere or CT. + + registerGhostRefinePatchStrategies_(currentPatchStratPerGhostRefiner_, info->ghostCurrent); + for (size_t i = 0; i < info->ghostCurrent.size(); ++i) + currentGhostsRefiners_.addStaticRefiner( + info->ghostCurrent[i], EfieldRefineOp_, info->ghostCurrent[i], + nonOverwriteInteriorTFfillPattern, currentPatchStratPerGhostRefiner_[i]); + + + registerGhostRefinePatchStrategies_(rhoPatchStratPerGhostRefiner_, info->ghostDensity); + for (size_t i = 0; i < info->ghostDensity.size(); ++i) + rhoGhostsRefiners_.addTimeRefiner(info->ghostDensity[i], info->modelDensity, + rhoOld_.name(), mhdFieldRefineOp_, fieldTimeOp_, + info->ghostDensity[i], nonOverwriteFieldFillPattern, + rhoPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(momentumPatchStratPerGhostRefiner_, + info->ghostMomentum); + for (size_t i = 0; i < info->ghostMomentum.size(); ++i) + momentumGhostsRefiners_.addTimeRefiner( + info->ghostMomentum[i], info->modelMomentum, rhoVold_.name(), mhdVecFieldRefineOp_, + vecFieldTimeOp_, info->ghostMomentum[i], nonOverwriteInteriorTFfillPattern, + momentumPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(totalEnergyPatchStratPerGhostRefiner_, + info->ghostTotalEnergy); + for (size_t i = 0; i < info->ghostTotalEnergy.size(); ++i) + totalEnergyGhostsRefiners_.addTimeRefiner( + info->ghostTotalEnergy[i], info->modelTotalEnergy, EtotOld_.name(), + mhdFieldRefineOp_, fieldTimeOp_, info->ghostTotalEnergy[i], + nonOverwriteFieldFillPattern, totalEnergyPatchStratPerGhostRefiner_[i]); + + registerGhostRefinePatchStrategies_(magPatchStratPerGhostRefiner_, info->ghostMagnetic); + for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) + magGhostsRefiners_.addStaticRefiner( + info->ghostMagnetic[i], BfieldRegridOp_, info->ghostMagnetic[i], + nonOverwriteInteriorTFfillPattern, magPatchStratPerGhostRefiner_[i]); + } + + + /** + * @brief Register a list of refine patch strategy pointers corresponding to a list of keys. + * + * @tparam RefinePatchStrategyT type inheriting from SAMRAI's `RefinePatchStrategy` + * @param patchStrategies the list of refine patch strategy pointers. + * @param keys the list of keys. + */ + template + void registerGhostRefinePatchStrategies_( + std::vector>& patchStrategies, + std::vector const& keys) + { + patchStrategies.reserve(keys.size()); + for (auto const& key : keys) { - setNaNsOnVecfieldGhosts(J, level); - currentGhostsRefiners_.fill(J, level.getLevelNumber(), fillTime); + auto&& [id] = resourcesManager_->getIDsList(key); + auto patchStrat + = std::make_shared(*resourcesManager_, *boundaryManager_); + patchStrat->registerIDs(id); + patchStrategies.push_back(patchStrat); } + } - std::string name() override { return stratName; } + // should this use conservative quantities ? When should we do the initial conversion ? + // Maybe mhd_init + void registerInitComms_(std::unique_ptr const& info) + { + densityInitRefiners_.addStaticRefiners(info->initDensity, mhdFieldRefineOp_, + info->initDensity); + momentumInitRefiners_.addStaticRefiners(info->initMomentum, mhdVecFieldRefineOp_, + info->initMomentum); + totalEnergyInitRefiners_.addStaticRefiners(info->initTotalEnergy, mhdFieldRefineOp_, + info->initTotalEnergy); + } - private: - using rm_t = typename MHDModel::resources_manager_type; - using InitRefinerPool = RefinerPool; - using GhostRefinerPool = RefinerPool; - using InitDomPartRefinerPool = RefinerPool; - using FieldRefinePatchStrategyT - = FieldRefinePatchStrategy; - using VectorFieldRefinePatchStrategyT - = FieldRefinePatchStrategy; - using MagneticRefinePatchStrategyT - = MagneticRefinePatchStrategy; - using FieldRefinePatchStrategyList - = std::vector>; - using VectorFieldRefinePatchStrategyList - = std::vector>; - using MagneticRefinePatchStrategyList - = std::vector>; + void magneticRegriding_(std::shared_ptr const& hierarchy, + std::shared_ptr const& level, + std::shared_ptr const& oldLevel, double const initDataTime) + { + auto magSchedule = BregridAlgo.createSchedule(level, oldLevel, + level->getNextCoarserHierarchyLevelNumber(), + hierarchy, &magneticRefinePatchStrategy_); + magSchedule->fillData(initDataTime); + } + + /** * @brief setNaNsFieldOnGhosts sets NaNs on the ghost nodes of the field + * + * NaNs are set on all ghost nodes, patch ghost or level ghost nodes + * so that the refinement operators can know nodes at NaN have not been + * touched by schedule copy. + * + * This is needed when the schedule copy is done before refinement + * as a result of FieldVariable::fineBoundaryRepresentsVariable=false + */ + void setNaNsOnFieldGhosts(FieldT& field, patch_t const& patch) + { + auto const qty = field.physicalQuantity(); + using qty_t = std::decay_t; + using field_geometry_t = FieldGeometry; - // Maybe we also need conservative ghost refiners for amr operations, actually quite - // likely - void registerGhostComms_(std::unique_ptr const& info) - { - // static refinement for J because in MHD it is a temporary, so keeping its - // state updated after each regrid is not a priority. However if we do not correctly - // refine on regrid, the post regrid state is not up to date (in our case it will be nan - // since we nan-initialise) and thus is is better to rely on static refinement, which - // uses the state after computation of ampere or CT. - - registerGhostRefinePatchStrategies_(currentPatchStratPerGhostRefiner_, - info->ghostCurrent); - for (size_t i = 0; i < info->ghostCurrent.size(); ++i) - currentGhostsRefiners_.addStaticRefiner( - info->ghostCurrent[i], EfieldRefineOp_, info->ghostCurrent[i], - nonOverwriteInteriorTFfillPattern, currentPatchStratPerGhostRefiner_[i]); - - - registerGhostRefinePatchStrategies_(rhoPatchStratPerGhostRefiner_, info->ghostDensity); - for (size_t i = 0; i < info->ghostDensity.size(); ++i) - rhoGhostsRefiners_.addTimeRefiner( - info->ghostDensity[i], info->modelDensity, rhoOld_.name(), mhdFieldRefineOp_, - fieldTimeOp_, info->ghostDensity[i], nonOverwriteFieldFillPattern, - rhoPatchStratPerGhostRefiner_[i]); - - registerGhostRefinePatchStrategies_(momentumPatchStratPerGhostRefiner_, - info->ghostMomentum); - for (size_t i = 0; i < info->ghostMomentum.size(); ++i) - momentumGhostsRefiners_.addTimeRefiner( - info->ghostMomentum[i], info->modelMomentum, rhoVold_.name(), - mhdVecFieldRefineOp_, vecFieldTimeOp_, info->ghostMomentum[i], - nonOverwriteInteriorTFfillPattern, momentumPatchStratPerGhostRefiner_[i]); - - registerGhostRefinePatchStrategies_(totalEnergyPatchStratPerGhostRefiner_, - info->ghostTotalEnergy); - for (size_t i = 0; i < info->ghostTotalEnergy.size(); ++i) - totalEnergyGhostsRefiners_.addTimeRefiner( - info->ghostTotalEnergy[i], info->modelTotalEnergy, EtotOld_.name(), - mhdFieldRefineOp_, fieldTimeOp_, info->ghostTotalEnergy[i], - nonOverwriteFieldFillPattern, totalEnergyPatchStratPerGhostRefiner_[i]); - - registerGhostRefinePatchStrategies_(magPatchStratPerGhostRefiner_, info->ghostMagnetic); - for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) - magGhostsRefiners_.addStaticRefiner( - info->ghostMagnetic[i], BfieldRegridOp_, info->ghostMagnetic[i], - nonOverwriteInteriorTFfillPattern, magPatchStratPerGhostRefiner_[i]); - } + auto const box = patch.getBox(); + auto const layout = layoutFromPatch(patch); + // we need to remove the box from the ghost box + // to use SAMRAI::removeIntersections we do some conversions to + // samrai box. + // not gbox is a fieldBox (thanks to the layout) - /** - * @brief Register a list of refine patch strategy pointers corresponding to a list of keys. - * - * @tparam RefinePatchStrategyT type inheriting from SAMRAI's `RefinePatchStrategy` - * @param patchStrategies the list of refine patch strategy pointers. - * @param keys the list of keys. - */ - template - void registerGhostRefinePatchStrategies_( - std::vector>& patchStrategies, - std::vector const& keys) - { - patchStrategies.reserve(keys.size()); - for (auto const& key : keys) - { - auto&& [id] = resourcesManager_->getIDsList(key); - auto patchStrat - = std::make_shared(*resourcesManager_, *boundaryManager_); - patchStrat->registerIDs(id); - patchStrategies.push_back(patchStrat); - } - } + auto const gbox = layout.AMRGhostBoxFor(field.physicalQuantity()); + auto const sgbox = samrai_box_from(gbox); + auto const fbox = field_geometry_t::toFieldBox(box, qty, layout); - // should this use conservative quantities ? When should we do the initial conversion ? - // Maybe mhd_init - void registerInitComms_(std::unique_ptr const& info) - { - densityInitRefiners_.addStaticRefiners(info->initDensity, mhdFieldRefineOp_, - info->initDensity); + // we have field samrai boxes so we can now remove one from the other + SAMRAI::hier::BoxContainer ghostLayerBoxes{}; + ghostLayerBoxes.removeIntersections(sgbox, fbox); - momentumInitRefiners_.addStaticRefiners(info->initMomentum, mhdVecFieldRefineOp_, - info->initMomentum); + // and now finally set the NaNs on the ghost boxes + for (auto const& gb : ghostLayerBoxes) + for (auto const& index : layout.AMRToLocal(phare_box_from(gb))) + field(index) = std::numeric_limits::quiet_NaN(); + } - totalEnergyInitRefiners_.addStaticRefiners(info->initTotalEnergy, mhdFieldRefineOp_, - info->initTotalEnergy); - } + void setNaNsOnFieldGhosts(FieldT& field, level_t const& level) + { + for (auto& patch : resourcesManager_->enumerate(level, field)) + setNaNsOnFieldGhosts(field, *patch); + } + void setNaNsOnVecfieldGhosts(VecFieldT& vf, level_t const& level) + { + for (auto& patch : resourcesManager_->enumerate(level, vf)) + for (auto& component : vf) + setNaNsOnFieldGhosts(component, *patch); + } + + + FieldT rhoOld_{stratName + "rhoOld", core::MHDQuantity::Scalar::rho}; + VecFieldT Vold_{stratName + "Vold", core::MHDQuantity::Vector::V}; + FieldT Pold_{stratName + "Pold", core::MHDQuantity::Scalar::P}; - void magneticRegriding_(std::shared_ptr const& hierarchy, - std::shared_ptr const& level, - std::shared_ptr const& oldLevel, double const initDataTime) - { - auto magSchedule = BregridAlgo.createSchedule( - level, oldLevel, level->getNextCoarserHierarchyLevelNumber(), hierarchy, - &magneticRefinePatchStrategy_); - magSchedule->fillData(initDataTime); - } + VecFieldT rhoVold_{stratName + "rhoVold", core::MHDQuantity::Vector::rhoV}; + FieldT EtotOld_{stratName + "EtotOld", core::MHDQuantity::Scalar::Etot}; + + VecFieldT Jold_{stratName + "Jold", core::MHDQuantity::Vector::J}; + + std::shared_ptr resourcesManager_; + std::shared_ptr boundaryManager_; + int const firstLevel_; + + + SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; + SAMRAI::xfer::RefineAlgorithm BalgoInit; + SAMRAI::xfer::RefineAlgorithm BregridAlgo; + SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; + std::map> magInitRefineSchedules; + std::map> magGhostsRefineSchedules; + std::map> magPatchGhostsRefineSchedules; + std::map> elecPatchGhostsRefineSchedules; + std::map> magSharedNodeRefineSchedules; - /** * @brief setNaNsFieldOnGhosts sets NaNs on the ghost nodes of the field - * - * NaNs are set on all ghost nodes, patch ghost or level ghost nodes - * so that the refinement operators can know nodes at NaN have not been - * touched by schedule copy. - * - * This is needed when the schedule copy is done before refinement - * as a result of FieldVariable::fineBoundaryRepresentsVariable=false - */ - void setNaNsOnFieldGhosts(FieldT& field, patch_t const& patch) - { - auto const qty = field.physicalQuantity(); - using qty_t = std::decay_t; - using field_geometry_t = FieldGeometry; - - auto const box = patch.getBox(); - auto const layout = layoutFromPatch(patch); - - // we need to remove the box from the ghost box - // to use SAMRAI::removeIntersections we do some conversions to - // samrai box. - // not gbox is a fieldBox (thanks to the layout) - - auto const gbox = layout.AMRGhostBoxFor(field.physicalQuantity()); - auto const sgbox = samrai_box_from(gbox); - auto const fbox = field_geometry_t::toFieldBox(box, qty, layout); - - // we have field samrai boxes so we can now remove one from the other - SAMRAI::hier::BoxContainer ghostLayerBoxes{}; - ghostLayerBoxes.removeIntersections(sgbox, fbox); - - // and now finally set the NaNs on the ghost boxes - for (auto const& gb : ghostLayerBoxes) - for (auto const& index : layout.AMRToLocal(phare_box_from(gb))) - field(index) = std::numeric_limits::quiet_NaN(); - } + SAMRAI::xfer::CoarsenAlgorithm ErefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; + SAMRAI::xfer::CoarsenAlgorithm HydroXrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; + SAMRAI::xfer::CoarsenAlgorithm HydroYrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; + SAMRAI::xfer::CoarsenAlgorithm HydroZrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; - void setNaNsOnFieldGhosts(FieldT& field, level_t const& level) - { - for (auto& patch : resourcesManager_->enumerate(level, field)) - setNaNsOnFieldGhosts(field, *patch); - } + SAMRAI::xfer::RefineAlgorithm EpatchGhostRefluxedAlgo; + SAMRAI::xfer::RefineAlgorithm HydroXpatchGhostRefluxedAlgo; + SAMRAI::xfer::RefineAlgorithm HydroYpatchGhostRefluxedAlgo; + SAMRAI::xfer::RefineAlgorithm HydroZpatchGhostRefluxedAlgo; - void setNaNsOnVecfieldGhosts(VecFieldT& vf, level_t const& level) - { - for (auto& patch : resourcesManager_->enumerate(level, vf)) - for (auto& component : vf) - setNaNsOnFieldGhosts(component, *patch); - } + std::map> ErefluxSchedules; + std::map> HydroXrefluxSchedules; + std::map> HydroYrefluxSchedules; + std::map> HydroZrefluxSchedules; + std::map> EpatchGhostRefluxedSchedules; + std::map> HydroXpatchGhostRefluxedSchedules; + std::map> HydroYpatchGhostRefluxedSchedules; + std::map> HydroZpatchGhostRefluxedSchedules; - FieldT rhoOld_{stratName + "rhoOld", core::MHDQuantity::Scalar::rho}; - VecFieldT Vold_{stratName + "Vold", core::MHDQuantity::Vector::V}; - FieldT Pold_{stratName + "Pold", core::MHDQuantity::Scalar::P}; - - VecFieldT rhoVold_{stratName + "rhoVold", core::MHDQuantity::Vector::rhoV}; - FieldT EtotOld_{stratName + "EtotOld", core::MHDQuantity::Scalar::Etot}; + GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; + GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; + GhostRefinerPool rhoGhostsRefiners_{resourcesManager_}; + // GhostRefinerPool velGhostsRefiners_{resourcesManager_}; + // GhostRefinerPool pressureGhostsRefiners_{resourcesManager_}; + GhostRefinerPool momentumGhostsRefiners_{resourcesManager_}; + GhostRefinerPool totalEnergyGhostsRefiners_{resourcesManager_}; + GhostRefinerPool magFluxesXGhostRefiners_{resourcesManager_}; + GhostRefinerPool magFluxesYGhostRefiners_{resourcesManager_}; + GhostRefinerPool magFluxesZGhostRefiners_{resourcesManager_}; - VecFieldT Jold_{stratName + "Jold", core::MHDQuantity::Vector::J}; + GhostRefinerPool magGhostsRefiners_{resourcesManager_}; - std::shared_ptr resourcesManager_; - std::shared_ptr boundaryManager_; - int const firstLevel_; - - - SAMRAI::xfer::RefineAlgorithm BalgoPatchGhost; - SAMRAI::xfer::RefineAlgorithm BalgoInit; - SAMRAI::xfer::RefineAlgorithm BregridAlgo; - SAMRAI::xfer::RefineAlgorithm EalgoPatchGhost; - std::map> magInitRefineSchedules; - std::map> magGhostsRefineSchedules; - std::map> magPatchGhostsRefineSchedules; - std::map> elecPatchGhostsRefineSchedules; - std::map> magSharedNodeRefineSchedules; + InitRefinerPool densityInitRefiners_{resourcesManager_}; + InitRefinerPool momentumInitRefiners_{resourcesManager_}; + InitRefinerPool totalEnergyInitRefiners_{resourcesManager_}; - SAMRAI::xfer::CoarsenAlgorithm ErefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; - SAMRAI::xfer::CoarsenAlgorithm HydroXrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; - SAMRAI::xfer::CoarsenAlgorithm HydroYrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; - SAMRAI::xfer::CoarsenAlgorithm HydroZrefluxAlgo{SAMRAI::tbox::Dimension{dimension}}; + // SynchronizerPool densitySynchronizers_{resourcesManager_}; + // SynchronizerPool momentumSynchronizers_{resourcesManager_}; + // SynchronizerPool magnetoSynchronizers_{resourcesManager_}; + // SynchronizerPool totalEnergySynchronizers_{resourcesManager_}; - SAMRAI::xfer::RefineAlgorithm EpatchGhostRefluxedAlgo; - SAMRAI::xfer::RefineAlgorithm HydroXpatchGhostRefluxedAlgo; - SAMRAI::xfer::RefineAlgorithm HydroYpatchGhostRefluxedAlgo; - SAMRAI::xfer::RefineAlgorithm HydroZpatchGhostRefluxedAlgo; - - std::map> ErefluxSchedules; - std::map> HydroXrefluxSchedules; - std::map> HydroYrefluxSchedules; - std::map> HydroZrefluxSchedules; + using RefOp_ptr = std::shared_ptr; + using CoarsenOp_ptr = std::shared_ptr; + using TimeOp_ptr = std::shared_ptr; - std::map> EpatchGhostRefluxedSchedules; - std::map> - HydroXpatchGhostRefluxedSchedules; - std::map> - HydroYpatchGhostRefluxedSchedules; - std::map> - HydroZpatchGhostRefluxedSchedules; + template + using FieldRefineOp = FieldRefineOperator; - GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; - GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; - GhostRefinerPool rhoGhostsRefiners_{resourcesManager_}; - // GhostRefinerPool velGhostsRefiners_{resourcesManager_}; - // GhostRefinerPool pressureGhostsRefiners_{resourcesManager_}; - GhostRefinerPool momentumGhostsRefiners_{resourcesManager_}; - GhostRefinerPool totalEnergyGhostsRefiners_{resourcesManager_}; - GhostRefinerPool magFluxesXGhostRefiners_{resourcesManager_}; - GhostRefinerPool magFluxesYGhostRefiners_{resourcesManager_}; - GhostRefinerPool magFluxesZGhostRefiners_{resourcesManager_}; + template + using VecFieldRefineOp = VecFieldRefineOperator; - GhostRefinerPool magGhostsRefiners_{resourcesManager_}; + using DefaultVecFieldRefineOp = VecFieldRefineOp>; + using MagneticFieldRefineOp = VecFieldRefineOp>; + using MagneticFieldRegridOp = VecFieldRefineOp>; + using ElectricFieldRefineOp = VecFieldRefineOp>; - InitRefinerPool densityInitRefiners_{resourcesManager_}; - InitRefinerPool momentumInitRefiners_{resourcesManager_}; - InitRefinerPool totalEnergyInitRefiners_{resourcesManager_}; + using MHDFluxRefineOp = FieldRefineOp>; + using MHDVecFluxRefineOp = VecFieldRefineOp>; + using MHDFieldRefineOp = FieldRefineOp>; + using MHDVecFieldRefineOp = VecFieldRefineOp>; - // SynchronizerPool densitySynchronizers_{resourcesManager_}; - // SynchronizerPool momentumSynchronizers_{resourcesManager_}; - // SynchronizerPool magnetoSynchronizers_{resourcesManager_}; - // SynchronizerPool totalEnergySynchronizers_{resourcesManager_}; + using FieldTimeInterp = FieldLinearTimeInterpolate; - using RefOp_ptr = std::shared_ptr; - using CoarsenOp_ptr = std::shared_ptr; - using TimeOp_ptr = std::shared_ptr; + using VecFieldTimeInterp = VecFieldLinearTimeInterpolate; - template - using FieldRefineOp = FieldRefineOperator; + template + using FieldCoarseningOp = FieldCoarsenOperator; - template - using VecFieldRefineOp = VecFieldRefineOperator; + template + using VecFieldCoarsenOp + = VecFieldCoarsenOperator; - using DefaultVecFieldRefineOp = VecFieldRefineOp>; - using MagneticFieldRefineOp = VecFieldRefineOp>; - using MagneticFieldRegridOp = VecFieldRefineOp>; - using ElectricFieldRefineOp = VecFieldRefineOp>; + using MHDFluxCoarsenOp = FieldCoarseningOp>; + using MHDVecFluxCoarsenOp = VecFieldCoarsenOp>; + using ElectricFieldCoarsenOp = VecFieldCoarsenOp>; - using MHDFluxRefineOp = FieldRefineOp>; - using MHDVecFluxRefineOp = VecFieldRefineOp>; - using MHDFieldRefineOp = FieldRefineOp>; - using MHDVecFieldRefineOp = VecFieldRefineOp>; + SynchronizerPool electroSynchronizers_{resourcesManager_}; - using FieldTimeInterp = FieldLinearTimeInterpolate; + RefOp_ptr mhdFluxRefineOp_{std::make_shared()}; + RefOp_ptr mhdVecFluxRefineOp_{std::make_shared()}; + RefOp_ptr mhdFieldRefineOp_{std::make_shared()}; + RefOp_ptr mhdVecFieldRefineOp_{std::make_shared()}; + RefOp_ptr EfieldRefineOp_{std::make_shared()}; + RefOp_ptr BfieldRefineOp_{std::make_shared()}; + RefOp_ptr BfieldRegridOp_{std::make_shared()}; + + TimeOp_ptr fieldTimeOp_{std::make_shared()}; + TimeOp_ptr vecFieldTimeOp_{std::make_shared()}; + + using TensorFieldFillPattern_t = TensorFieldFillPattern; + using FieldFillPattern_t = FieldFillPattern; + + std::shared_ptr nonOverwriteFieldFillPattern + = std::make_shared>(); // stateless (mostly) + + std::shared_ptr nonOverwriteInteriorTFfillPattern + = std::make_shared>(); - using VecFieldTimeInterp - = VecFieldLinearTimeInterpolate; + std::shared_ptr overwriteInteriorTFfillPattern + = std::make_shared>( + /*overwrite_interior=*/true); - template - using FieldCoarseningOp = FieldCoarsenOperator; + CoarsenOp_ptr mhdFluxCoarseningOp_{std::make_shared()}; + CoarsenOp_ptr mhdVecFluxCoarseningOp_{std::make_shared()}; + CoarsenOp_ptr electricFieldCoarseningOp_{std::make_shared()}; - template - using VecFieldCoarsenOp - = VecFieldCoarsenOperator; - - using MHDFluxCoarsenOp = FieldCoarseningOp>; - using MHDVecFluxCoarsenOp = VecFieldCoarsenOp>; - using ElectricFieldCoarsenOp = VecFieldCoarsenOp>; - - SynchronizerPool electroSynchronizers_{resourcesManager_}; - - RefOp_ptr mhdFluxRefineOp_{std::make_shared()}; - RefOp_ptr mhdVecFluxRefineOp_{std::make_shared()}; - RefOp_ptr mhdFieldRefineOp_{std::make_shared()}; - RefOp_ptr mhdVecFieldRefineOp_{std::make_shared()}; - RefOp_ptr EfieldRefineOp_{std::make_shared()}; - RefOp_ptr BfieldRefineOp_{std::make_shared()}; - RefOp_ptr BfieldRegridOp_{std::make_shared()}; - - TimeOp_ptr fieldTimeOp_{std::make_shared()}; - TimeOp_ptr vecFieldTimeOp_{std::make_shared()}; - - using TensorFieldFillPattern_t = TensorFieldFillPattern; - using FieldFillPattern_t = FieldFillPattern; - - std::shared_ptr nonOverwriteFieldFillPattern - = std::make_shared>(); // stateless (mostly) - - std::shared_ptr nonOverwriteInteriorTFfillPattern - = std::make_shared>(); - - std::shared_ptr overwriteInteriorTFfillPattern - = std::make_shared>( - /*overwrite_interior=*/true); - - CoarsenOp_ptr mhdFluxCoarseningOp_{std::make_shared()}; - CoarsenOp_ptr mhdVecFluxCoarseningOp_{std::make_shared()}; - CoarsenOp_ptr electricFieldCoarseningOp_{std::make_shared()}; - - MagneticRefinePatchStrategyT magneticRefinePatchStrategy_{*resourcesManager_, - *boundaryManager_}; + MagneticRefinePatchStrategyT magneticRefinePatchStrategy_{*resourcesManager_, + *boundaryManager_}; - FieldRefinePatchStrategyList rhoPatchStratPerGhostRefiner_; - FieldRefinePatchStrategyList totalEnergyPatchStratPerGhostRefiner_; - VectorFieldRefinePatchStrategyList momentumPatchStratPerGhostRefiner_; - MagneticRefinePatchStrategyList magPatchStratPerGhostRefiner_; + FieldRefinePatchStrategyList rhoPatchStratPerGhostRefiner_; + FieldRefinePatchStrategyList totalEnergyPatchStratPerGhostRefiner_; + VectorFieldRefinePatchStrategyList momentumPatchStratPerGhostRefiner_; + MagneticRefinePatchStrategyList magPatchStratPerGhostRefiner_; - VectorFieldRefinePatchStrategyList currentPatchStratPerGhostRefiner_; - }; + VectorFieldRefinePatchStrategyList currentPatchStratPerGhostRefiner_; +}; -} // namespace amr -} // namespace PHARE +} // namespace PHARE::amr #endif diff --git a/src/amr/physical_models/mhd_model.hpp b/src/amr/physical_models/mhd_model.hpp index fca7a8fb5..9d7513478 100644 --- a/src/amr/physical_models/mhd_model.hpp +++ b/src/amr/physical_models/mhd_model.hpp @@ -3,8 +3,8 @@ #include "core/def.hpp" #include "core/def/phare_mpi.hpp" // IWYU pragma: keep -#include "core/mhd/mhd_quantities.hpp" #include "core/models/mhd_state.hpp" +#include "core/mhd/mhd_quantities.hpp" #include "core/boundary/boundary_manager.hpp" #include "amr/messengers/mhd_messenger_info.hpp" @@ -13,10 +13,9 @@ #include -#include -#include #include #include +#include namespace PHARE::solver diff --git a/src/core/boundary/boundary.hpp b/src/core/boundary/boundary.hpp index aa05a3506..4d793b37f 100644 --- a/src/core/boundary/boundary.hpp +++ b/src/core/boundary/boundary.hpp @@ -2,15 +2,15 @@ #define PHARE_CORE_BOUNDARY_BOUNDARY_HPP #include "core/boundary/boundary_defs.hpp" -#include "core/data/field/field_traits.hpp" #include "core/data/vecfield/vecfield.hpp" +#include "core/data/field/field_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" #include "core/numerics/boundary_condition/field_boundary_condition_factory.hpp" -#include #include -#include #include +#include +#include namespace PHARE::core { diff --git a/src/core/boundary/boundary_defs.hpp b/src/core/boundary/boundary_defs.hpp index 389ef9a2a..3691626de 100644 --- a/src/core/boundary/boundary_defs.hpp +++ b/src/core/boundary/boundary_defs.hpp @@ -3,7 +3,7 @@ #include "core/data/grid/gridlayoutdefs.hpp" -#include "unordered_map" +#include namespace PHARE::core { @@ -135,10 +135,9 @@ inline BoundaryLocation getBoundaryLocationFromString(std::string const& name) {"zlower", BoundaryLocation::ZLower}, {"zupper", BoundaryLocation::ZUpper}, }; - auto it = typeMap_.find(name); - if (it == typeMap_.end()) - throw std::runtime_error("Wrong boundary location name = " + name); - return it->second; + if (typeMap_.count(name)) + return typeMap_.at(name); + throw std::runtime_error("Wrong boundary location name = " + name); } } // namespace PHARE::core diff --git a/src/core/boundary/boundary_factory.hpp b/src/core/boundary/boundary_factory.hpp index 055b9f3cf..da0b11764 100644 --- a/src/core/boundary/boundary_factory.hpp +++ b/src/core/boundary/boundary_factory.hpp @@ -1,8 +1,8 @@ #ifndef PHARE_CORE_BOUNDARY_BOUNDARY_FACTORY #define PHARE_CORE_BOUNDARY_BOUNDARY_FACTORY -#include "core/boundary/boundary_defs.hpp" #include "core/boundary/boundary.hpp" +#include "core/boundary/boundary_defs.hpp" #include "core/data/field/field_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" diff --git a/src/core/boundary/boundary_manager.hpp b/src/core/boundary/boundary_manager.hpp index 4262494fa..8e3a52287 100644 --- a/src/core/boundary/boundary_manager.hpp +++ b/src/core/boundary/boundary_manager.hpp @@ -3,21 +3,17 @@ #include "core/boundary/boundary.hpp" #include "core/boundary/boundary_defs.hpp" -#include "core/boundary/boundary_factory.hpp" +#include "core/data/vecfield/vecfield.hpp" #include "core/data/field/field_traits.hpp" +#include "core/boundary/boundary_factory.hpp" #include "core/data/grid/gridlayout_traits.hpp" -#include "core/data/vecfield/vecfield.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" -#include "core/numerics/boundary_condition/field_boundary_condition_factory.hpp" #include "initializer/data_provider.hpp" -#include #include -#include -#include +#include #include -#include namespace PHARE::core { diff --git a/src/core/data/field/field_traits.hpp b/src/core/data/field/field_traits.hpp index 86db2f12d..a760a10a0 100644 --- a/src/core/data/field/field_traits.hpp +++ b/src/core/data/field/field_traits.hpp @@ -23,7 +23,6 @@ concept IsField = requires(T field) { { field.name() } -> std::convertible_to; { field.physicalQuantity() } -> std::same_as; - { field.isUsable() } -> std::same_as; { field.data() } -> std::same_as; // Inherited from NdArrayView requires((T::dimension == 1 && requires(T f) { diff --git a/src/core/data/grid/gridlayout_traits.hpp b/src/core/data/grid/gridlayout_traits.hpp index df355d332..e18650fa1 100644 --- a/src/core/data/grid/gridlayout_traits.hpp +++ b/src/core/data/grid/gridlayout_traits.hpp @@ -1,13 +1,13 @@ #ifndef PHARE_CORE_GRID_GRIDLAYOUT_TRAITS_HPP #define PHARE_CORE_GRID_GRIDLAYOUT_TRAITS_HPP -#include "core/data/grid/gridlayoutdefs.hpp" #include "core/utilities/box/box.hpp" #include "core/utilities/point/point.hpp" +#include "core/data/grid/gridlayoutdefs.hpp" -#include #include #include +#include namespace PHARE::core { diff --git a/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp index c7c311814..fd1c50828 100644 --- a/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp @@ -2,9 +2,9 @@ #define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_ANTISYMMETRIC_BOUNDARY_CONDITION_HPP #include "core/data/grid/gridlayoutdefs.hpp" -#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" -#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" namespace PHARE::core { @@ -67,7 +67,7 @@ class FieldAntiSymmetricBoundaryCondition Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) { - constexpr std::array centerings = {Centerings...}; + constexpr std::array centerings = {Centerings...}; // no other way than using a lambda builder auto fields = [&]() { @@ -78,8 +78,8 @@ class FieldAntiSymmetricBoundaryCondition }(); for_N([&](auto i) { - constexpr QtyCentering centering = centerings[i]; - field_type& field = std::get(fields); + constexpr auto centering = centerings[i]; + field_type& field = std::get(fields); if constexpr ((i != static_cast(direction)) || is_scalar) // if the component is tangent to the boundary, or if we are handling a scalar { diff --git a/src/core/numerics/boundary_condition/field_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_boundary_condition.hpp index fe33492d1..59a40d04b 100644 --- a/src/core/numerics/boundary_condition/field_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_boundary_condition.hpp @@ -1,11 +1,11 @@ #ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP #define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP +#include "core/utilities/box/box.hpp" #include "core/boundary/boundary_defs.hpp" #include "core/data/field/field_traits.hpp" -#include "core/data/tensorfield/tensorfield_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" -#include "core/utilities/box/box.hpp" +#include "core/data/tensorfield/tensorfield_traits.hpp" #include diff --git a/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp b/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp index 048fd808c..70aa8578a 100644 --- a/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp +++ b/src/core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp @@ -2,8 +2,8 @@ #define PHARE_CORE_DATA_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_DISPATCHER #include "core/boundary/boundary_defs.hpp" -#include "core/data/grid/gridlayout_traits.hpp" #include "core/data/grid/gridlayoutdefs.hpp" +#include "core/data/grid/gridlayout_traits.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" @@ -55,8 +55,7 @@ class FieldBoundaryConditionDispatcher * Triggers the recursive dispatching of centerings, directions, and sides to * specialized implementations. */ - void apply(ScalarOrTensorFieldT& scalarOrTensorField, - BoundaryLocation const boundaryLocation, + void apply(ScalarOrTensorFieldT& scalarOrTensorField, BoundaryLocation const boundaryLocation, Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) override { diff --git a/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp index 5f0250da1..42990b086 100644 --- a/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp +++ b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp @@ -1,8 +1,8 @@ #ifndef PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY #define PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY -#include "core/data/tensorfield/tensorfield_traits.hpp" #include "core/data/vecfield/vecfield_traits.hpp" +#include "core/data/tensorfield/tensorfield_traits.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" diff --git a/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp index 01f2aed28..37f5f9016 100644 --- a/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp @@ -73,7 +73,7 @@ class FieldDirichletBoundaryCondition Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) { - constexpr std::array centerings = {Centerings...}; + constexpr std::array centerings = {Centerings...}; auto fields = [&]() { if constexpr (is_scalar) diff --git a/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp index 6d384d703..e1235a53f 100644 --- a/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp @@ -2,8 +2,8 @@ #define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_DIVERGENCE_FREE_TRANSVERSE_NEUMANN_BOUNDARY_CONDITION_HPP #include "core/data/grid/gridlayoutdefs.hpp" -#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" #include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" #include @@ -80,7 +80,7 @@ class FieldDivergenceFreeTransverseNeumannBoundaryCondition void apply_specialized(VecFieldT& vecField, Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) { - constexpr std::array centerings = {Centerings...}; + constexpr std::array centerings = {Centerings...}; auto fields = vecField.components(); diff --git a/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp index f3ba31f08..20738ecaf 100644 --- a/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_neumann_boundary_condition.hpp @@ -68,7 +68,7 @@ class FieldNeumannBoundaryCondition { using Index = Point; - constexpr std::array centerings = {Centerings...}; + constexpr std::array centerings = {Centerings...}; // no other way than using a lambda builder auto fields = [&]() { diff --git a/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp index bd79423ff..b6f0ae71e 100644 --- a/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp @@ -2,9 +2,9 @@ #define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_SYMMETRIC_BOUNDARY_CONDITION_HPP #include "core/data/grid/gridlayoutdefs.hpp" -#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" -#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" namespace PHARE::core { @@ -66,7 +66,7 @@ class FieldSymmetricBoundaryCondition Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) { - constexpr std::array centerings = {Centerings...}; + constexpr std::array centerings = {Centerings...}; // no other way than using a lambda builder auto fields = [&]() { From c06ca6f53ff3cfd5222bc651a9e1c0ed5f4c2e1e Mon Sep 17 00:00:00 2001 From: Ivan Girault Date: Thu, 26 Feb 2026 18:49:46 +0100 Subject: [PATCH 7/7] get reflective boundary to work; handle edges and corners; --- res/cmake/test.cmake | 2 +- .../refine/field_refine_patch_strategy.hpp | 77 ++++++---- src/amr/messengers/mhd_messenger.hpp | 79 +++++----- src/amr/physical_models/mhd_model.hpp | 16 +- .../time_integrator/compute_fluxes.hpp | 5 +- src/core/boundary/boundary.hpp | 15 +- src/core/boundary/boundary_defs.hpp | 139 +++++++++++++++--- src/core/boundary/boundary_factory.hpp | 10 +- src/core/boundary/boundary_manager.hpp | 82 +++++++++-- .../field_boundary_condition.hpp | 17 +-- .../field_boundary_condition_factory.hpp | 29 ++-- .../field_none_boundary_condition.hpp | 54 +++++++ .../boundary/boundary_manager/CMakeLists.txt | 23 +++ .../test_boundary_manager.cpp | 100 +++++++++++++ 14 files changed, 509 insertions(+), 139 deletions(-) create mode 100644 src/core/numerics/boundary_condition/field_none_boundary_condition.hpp create mode 100644 tests/core/boundary/boundary_manager/CMakeLists.txt create mode 100644 tests/core/boundary/boundary_manager/test_boundary_manager.cpp diff --git a/res/cmake/test.cmake b/res/cmake/test.cmake index 26bd60283..6f265de18 100644 --- a/res/cmake/test.cmake +++ b/res/cmake/test.cmake @@ -6,7 +6,7 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests configure_file(${CMAKE_SOURCE_DIR}/tests/__init__.py ${CMAKE_BINARY_DIR}/tests/__init__.py @ONLY) - + add_subdirectory(tests/core/boundary/boundary_manager) add_subdirectory(tests/core/data/ndarray) add_subdirectory(tests/core/data/grid) add_subdirectory(tests/core/data/gridlayout) diff --git a/src/amr/data/field/refine/field_refine_patch_strategy.hpp b/src/amr/data/field/refine/field_refine_patch_strategy.hpp index 74fc113a4..7d50b5326 100644 --- a/src/amr/data/field/refine/field_refine_patch_strategy.hpp +++ b/src/amr/data/field/refine/field_refine_patch_strategy.hpp @@ -2,18 +2,18 @@ #define PHARE_AMR_FIELD_REFINE_PATCH_STRATEGY_HPP -#include "core/boundary/boundary_defs.hpp" -#include "core/numerics/boundary_condition/field_boundary_condition.hpp" - #include "amr/data/field/field_data_traits.hpp" #include "amr/data/tensorfield/tensor_field_data_traits.hpp" +#include "core/boundary/boundary_defs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition.hpp" + +#include "SAMRAI/geom/CartesianPatchGeometry.h" +#include "SAMRAI/hier/BoundaryBox.h" #include "SAMRAI/hier/Box.h" #include "SAMRAI/hier/IntVector.h" -#include "SAMRAI/hier/BoundaryBox.h" #include "SAMRAI/hier/PatchGeometry.h" #include "SAMRAI/xfer/RefinePatchStrategy.h" -#include "SAMRAI/geom/CartesianPatchGeometry.h" #include #include @@ -106,9 +106,6 @@ class FieldRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy std::shared_ptr patchGeom = std::static_pointer_cast(patch.getPatchGeometry()); - std::vector const& boundaries - = patchGeom->getCodimensionBoundaries(static_cast(core::BoundaryCodim::One)); - auto scalarOrTensorField = [&]() { if constexpr (is_scalar) { @@ -123,28 +120,52 @@ class FieldRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy // must be retrieved to pass as argument to patchGeom->getBoundaryFillBox later SAMRAI::hier::Box const& patch_box = patch.getBox(); - for (SAMRAI::hier::BoundaryBox const& bBox : boundaries) - { - // Boundary definitions in PHARE matches those of SAMRAI - core::BoundaryLocation const bLoc - = static_cast(bBox.getLocationIndex()); - - SAMRAI::hier::Box samraiBoxToFill - = patchGeom->getBoundaryFillBox(bBox, patch_box, ghost_width_to_fill); - auto localBox = gridLayout.AMRToLocal(phare_box_from(samraiBoxToFill)); - - std::shared_ptr boundary = boundaryManager_.getBoundary(bLoc); - if (!boundary) - throw std::runtime_error("boundary not found."); - std::shared_ptr bc - = boundary->getFieldCondition(scalarOrTensorField.physicalQuantity()); - if (!bc) - throw std::runtime_error("boundary condition not found."); - - bc->apply(scalarOrTensorField, bLoc, localBox, gridLayout, fill_time); - }; + // iterations on potential boundary codimensions in [[1, dim]] + core::for_N([&](auto tag) { + constexpr auto codim = tag.value + 1; + + // find all boundaries with the current codimension + std::vector const& boundaries + = patchGeom->getCodimensionBoundaries(static_cast(codim)); + + // iterate on all found boundaries of given codimension + for (SAMRAI::hier::BoundaryBox const& bBox : boundaries) + { + // retrieve the localBox of ghost that must be filled + SAMRAI::hier::Box samraiBoxToFill + = patchGeom->getBoundaryFillBox(bBox, patch_box, ghost_width_to_fill); + auto localBox = gridLayout.AMRToLocal(phare_box_from(samraiBoxToFill)); + + // get location of the currently treated boundary + auto const currentBoundaryLocation + = static_cast>(bBox.getLocationIndex()); + + // get the primary 1-codimensional boundary that applies at the currently treated + // boundary. If the current boundary is itself 1-codimensional, then + // masterBoundaryLocation = currentBoundaryLocation + core::BoundaryLocation const masterBoundaryLocation + = boundaryManager_.getMasterBoundaryLocation(currentBoundaryLocation); + std::shared_ptr masterBoundary + = boundaryManager_.getBoundary(masterBoundaryLocation); + if (!masterBoundary) + throw std::runtime_error("Boundary not found."); + + // get the boundary condition for the current physical quantity + std::shared_ptr bc + = masterBoundary->getFieldCondition(scalarOrTensorField.physicalQuantity()); + if (!bc) + throw std::runtime_error("Field boundary condition not found."); + + // apply the boundary condition as if the current boundary was belonging to the + // primary boundary + bc->apply(scalarOrTensorField, masterBoundaryLocation, localBox, gridLayout, + fill_time); + } + }); } + + SAMRAI::hier::IntVector getRefineOpStencilWidth(SAMRAI::tbox::Dimension const& dim) const override { diff --git a/src/amr/messengers/mhd_messenger.hpp b/src/amr/messengers/mhd_messenger.hpp index 913e786ef..98aa79d33 100644 --- a/src/amr/messengers/mhd_messenger.hpp +++ b/src/amr/messengers/mhd_messenger.hpp @@ -1,25 +1,25 @@ #ifndef PHARE_MHD_MESSENGER_HPP #define PHARE_MHD_MESSENGER_HPP -#include "amr/messengers/refiner.hpp" -#include "amr/messengers/messenger.hpp" -#include "amr/messengers/refiner_pool.hpp" -#include "amr/messengers/messenger_info.hpp" -#include "amr/messengers/synchronizer_pool.hpp" -#include "amr/messengers/mhd_messenger_info.hpp" -#include "amr/data/field/refine/mhd_flux_refiner.hpp" -#include "amr/data/field/refine/mhd_field_refiner.hpp" -#include "amr/data/field/field_variable_fill_pattern.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/coarsening/electric_field_coarsener.hpp" +#include "amr/data/field/coarsening/field_coarsen_operator.hpp" #include "amr/data/field/coarsening/mhd_flux_coarsener.hpp" +#include "amr/data/field/field_variable_fill_pattern.hpp" #include "amr/data/field/refine/electric_field_refiner.hpp" +#include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/refine/field_refine_patch_strategy.hpp" #include "amr/data/field/refine/magnetic_field_refiner.hpp" #include "amr/data/field/refine/magnetic_field_regrider.hpp" -#include "amr/data/field/coarsening/field_coarsen_operator.hpp" -#include "amr/data/field/refine/field_refine_patch_strategy.hpp" -#include "amr/data/field/coarsening/electric_field_coarsener.hpp" #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" +#include "amr/data/field/refine/mhd_field_refiner.hpp" +#include "amr/data/field/refine/mhd_flux_refiner.hpp" #include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" +#include "amr/messengers/messenger.hpp" +#include "amr/messengers/messenger_info.hpp" +#include "amr/messengers/mhd_messenger_info.hpp" +#include "amr/messengers/refiner.hpp" +#include "amr/messengers/refiner_pool.hpp" +#include "amr/messengers/synchronizer_pool.hpp" #include "core/mhd/mhd_quantities.hpp" @@ -376,6 +376,7 @@ class MHDMessenger : public IMessenger totalEnergyInitRefiners_.fill(levelNumber, initDataTime); } + void firstStep(IPhysicalModel& model, SAMRAI::hier::PatchLevel& level, std::shared_ptr const& hierarchy, double const currentTime, double const prevCoarserTIme, @@ -523,41 +524,46 @@ class MHDMessenger : public IMessenger // since we nan-initialise) and thus is is better to rely on static refinement, which // uses the state after computation of ampere or CT. - registerGhostRefinePatchStrategies_(currentPatchStratPerGhostRefiner_, info->ghostCurrent); - for (size_t i = 0; i < info->ghostCurrent.size(); ++i) - currentGhostsRefiners_.addStaticRefiner( - info->ghostCurrent[i], EfieldRefineOp_, info->ghostCurrent[i], - nonOverwriteInteriorTFfillPattern, currentPatchStratPerGhostRefiner_[i]); - - - registerGhostRefinePatchStrategies_(rhoPatchStratPerGhostRefiner_, info->ghostDensity); + // registerGhostRefinePatchStrategies_(currentPatchStrats, + // info->ghostCurrent); + // for (size_t i = 0; i < info->ghostCurrent.size(); ++i) + // currentGhostsRefiners_.addStaticRefiner( + // info->ghostCurrent[i], EfieldRefineOp_, info->ghostCurrent[i], + // nonOverwriteInteriorTFfillPattern, currentPatchStrats[i]); + registerGhostRefinePatchStrategies_(rhoPatchStrats, info->ghostDensity); for (size_t i = 0; i < info->ghostDensity.size(); ++i) rhoGhostsRefiners_.addTimeRefiner(info->ghostDensity[i], info->modelDensity, rhoOld_.name(), mhdFieldRefineOp_, fieldTimeOp_, info->ghostDensity[i], nonOverwriteFieldFillPattern, - rhoPatchStratPerGhostRefiner_[i]); + rhoPatchStrats[i]); - registerGhostRefinePatchStrategies_(momentumPatchStratPerGhostRefiner_, - info->ghostMomentum); + registerGhostRefinePatchStrategies_(momentumPatchStrats, info->ghostMomentum); for (size_t i = 0; i < info->ghostMomentum.size(); ++i) momentumGhostsRefiners_.addTimeRefiner( info->ghostMomentum[i], info->modelMomentum, rhoVold_.name(), mhdVecFieldRefineOp_, vecFieldTimeOp_, info->ghostMomentum[i], nonOverwriteInteriorTFfillPattern, - momentumPatchStratPerGhostRefiner_[i]); + momentumPatchStrats[i]); - registerGhostRefinePatchStrategies_(totalEnergyPatchStratPerGhostRefiner_, - info->ghostTotalEnergy); + registerGhostRefinePatchStrategies_(totalEnergyPatchStrats, info->ghostTotalEnergy); for (size_t i = 0; i < info->ghostTotalEnergy.size(); ++i) totalEnergyGhostsRefiners_.addTimeRefiner( info->ghostTotalEnergy[i], info->modelTotalEnergy, EtotOld_.name(), mhdFieldRefineOp_, fieldTimeOp_, info->ghostTotalEnergy[i], - nonOverwriteFieldFillPattern, totalEnergyPatchStratPerGhostRefiner_[i]); + nonOverwriteFieldFillPattern, totalEnergyPatchStrats[i]); - registerGhostRefinePatchStrategies_(magPatchStratPerGhostRefiner_, info->ghostMagnetic); + registerGhostRefinePatchStrategies_(magPatchStrats, info->ghostMagnetic); for (size_t i = 0; i < info->ghostMagnetic.size(); ++i) magGhostsRefiners_.addStaticRefiner( info->ghostMagnetic[i], BfieldRegridOp_, info->ghostMagnetic[i], - nonOverwriteInteriorTFfillPattern, magPatchStratPerGhostRefiner_[i]); + nonOverwriteInteriorTFfillPattern, magPatchStrats[i]); + + // The refiner for the electric field only serve for filling ghost at physical + // boundaries. + registerGhostRefinePatchStrategies_(elecPatchStrats, info->ghostElectric); + for (size_t i = 0; i < info->ghostElectric.size(); ++i) + elecGhostsRefiners_.addStaticRefiner(info->ghostElectric[i], nullptr, + info->ghostElectric[i], nullptr, + elecPatchStrats[i]); } @@ -794,13 +800,14 @@ class MHDMessenger : public IMessenger MagneticRefinePatchStrategyT magneticRefinePatchStrategy_{*resourcesManager_, *boundaryManager_}; - FieldRefinePatchStrategyList rhoPatchStratPerGhostRefiner_; - FieldRefinePatchStrategyList totalEnergyPatchStratPerGhostRefiner_; - VectorFieldRefinePatchStrategyList momentumPatchStratPerGhostRefiner_; - MagneticRefinePatchStrategyList magPatchStratPerGhostRefiner_; + FieldRefinePatchStrategyList rhoPatchStrats; + FieldRefinePatchStrategyList totalEnergyPatchStrats; + VectorFieldRefinePatchStrategyList momentumPatchStrats; + VectorFieldRefinePatchStrategyList elecPatchStrats; + MagneticRefinePatchStrategyList magPatchStrats; - VectorFieldRefinePatchStrategyList currentPatchStratPerGhostRefiner_; + VectorFieldRefinePatchStrategyList currentPatchStrats; }; - } // namespace PHARE::amr + #endif diff --git a/src/amr/physical_models/mhd_model.hpp b/src/amr/physical_models/mhd_model.hpp index 9d7513478..1bcd3cfc2 100644 --- a/src/amr/physical_models/mhd_model.hpp +++ b/src/amr/physical_models/mhd_model.hpp @@ -1,21 +1,19 @@ #ifndef PHARE_MHD_MODEL_HPP #define PHARE_MHD_MODEL_HPP -#include "core/def.hpp" -#include "core/def/phare_mpi.hpp" // IWYU pragma: keep -#include "core/models/mhd_state.hpp" -#include "core/mhd/mhd_quantities.hpp" -#include "core/boundary/boundary_manager.hpp" - #include "amr/messengers/mhd_messenger_info.hpp" #include "amr/physical_models/physical_model.hpp" #include "amr/resources_manager/resources_manager.hpp" -#include +#include "core/boundary/boundary_manager.hpp" +#include "core/def.hpp" +#include "core/def/phare_mpi.hpp" // IWYU pragma: keep +#include "core/mhd/mhd_quantities.hpp" +#include "core/models/mhd_state.hpp" +#include #include #include -#include namespace PHARE::solver @@ -92,7 +90,7 @@ class MHDModel : public IPhysicalModel = {core::MHDQuantity::Scalar::rho, core::MHDQuantity::Scalar::Etot}; std::vector vectorQuantities = { core::MHDQuantity::Vector::B, - core::MHDQuantity::Vector::J, + // core::MHDQuantity::Vector::J, core::MHDQuantity::Vector::E, core::MHDQuantity::Vector::rhoV, }; diff --git a/src/amr/solvers/time_integrator/compute_fluxes.hpp b/src/amr/solvers/time_integrator/compute_fluxes.hpp index 1cfabfd26..7d048f1e1 100644 --- a/src/amr/solvers/time_integrator/compute_fluxes.hpp +++ b/src/amr/solvers/time_integrator/compute_fluxes.hpp @@ -1,9 +1,10 @@ #ifndef PHARE_CORE_NUMERICS_TIME_INTEGRATOR_COMPUTE_FLUXES_HPP #define PHARE_CORE_NUMERICS_TIME_INTEGRATOR_COMPUTE_FLUXES_HPP -#include "initializer/data_provider.hpp" #include "amr/solvers/solver_mhd_model_view.hpp" +#include "initializer/data_provider.hpp" + namespace PHARE::solver { template typename FVMethodStrategy, typename MHDModel> @@ -72,7 +73,7 @@ class ComputeFluxes // ct_(level, model, state, fluxes); - // bc.fillElectricGhosts(state.E, level, newTime); + bc.fillElectricGhosts(state.E, level, newTime); } void registerResources(MHDModel& model) diff --git a/src/core/boundary/boundary.hpp b/src/core/boundary/boundary.hpp index 4d793b37f..5edbc18dd 100644 --- a/src/core/boundary/boundary.hpp +++ b/src/core/boundary/boundary.hpp @@ -2,15 +2,15 @@ #define PHARE_CORE_BOUNDARY_BOUNDARY_HPP #include "core/boundary/boundary_defs.hpp" -#include "core/data/vecfield/vecfield.hpp" #include "core/data/field/field_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" +#include "core/data/vecfield/vecfield.hpp" #include "core/numerics/boundary_condition/field_boundary_condition_factory.hpp" -#include -#include #include +#include #include +#include namespace PHARE::core { @@ -31,6 +31,7 @@ template class Boundary { public: + using This = Boundary; using scalar_quantity_type = FieldT::physical_quantity_type; static_assert(std::same_as); using vector_quantity_type = PhysicalQuantityT::Vector; @@ -114,6 +115,14 @@ class Boundary } } + /** + * @brief Define comparison of boundaries based on the enum @c BoundaryType . + */ + std::strong_ordering operator<=>(This const& other) const + { + return this->getType() <=> other.getType(); + } + private: using _scalar_field_condition_map_type = std::unordered_map>; diff --git a/src/core/boundary/boundary_defs.hpp b/src/core/boundary/boundary_defs.hpp index 3691626de..9eb36eaa2 100644 --- a/src/core/boundary/boundary_defs.hpp +++ b/src/core/boundary/boundary_defs.hpp @@ -7,14 +7,10 @@ namespace PHARE::core { -/** - * @brief Physical behavior of a boundary. - */ +/** @brief Physical behavior of a boundary. */ enum class BoundaryType { None, Reflective, Inflow, Outflow, Open }; -/* - * @brief Possible codimension of a boundary. - */ +/** @brief Possible codimension of a boundary. */ enum class BoundaryCodim { One = 1, Two = 2, Three = 3 }; //@{ @@ -42,9 +38,11 @@ enum class BoundaryLocation { ZUpper = 5 }; -/// @brief Return the side of a boundary location. -/// @param boundaryLoc The boundary location. -/// @return The boundary side. +/** + * @brief Return the side of a boundary location. + * @param boundaryLoc The boundary location. + * @return The boundary side. + */ constexpr Side getSide(BoundaryLocation boundaryLoc) { switch (boundaryLoc) @@ -61,9 +59,10 @@ constexpr Side getSide(BoundaryLocation boundaryLoc) } }; -/// @brief Return the direction of a boundary location. -/// @param boundaryLoc The boundary location. -/// @return The boundary direction. +/** @brief Return the direction of a boundary location. + * @param boundaryLoc The boundary location. + * @return The boundary direction. + */ constexpr Direction getDirection(BoundaryLocation boundaryLoc) { switch (boundaryLoc) @@ -81,30 +80,114 @@ constexpr Direction getDirection(BoundaryLocation boundaryLoc) } }; -/* - * @brief Possible locations of a 2-codimensional boundary (an edge in 3D, a corner in 2D) - */ +/** @brief Possible locations of a 2-codimensional boundary (an edge in 3D, a corner in 2D) */ enum class Codim2BoundaryLocation { XLower_YLower = 0, - XHI_YLower = 1, + XUpper_YLower = 1, XLower_YUpper = 2, - XHI_YUpper = 3 + XUpper_YUpper = 3, + XLower_ZLower = 4, + XUpper_ZLower = 5, + XLower_ZUpper = 6, + XUpper_ZUpper = 7, + YLower_ZLower = 8, + YUpper_ZLower = 9, + YLower_ZUpper = 10, + YUpper_ZUpper = 11 }; -/* - * @brief Possible locations of a 3-codimensional boundary (a corner in 3D) +/** + * @brief Return the location of the two (1-codimensional) boundaries adjacent to a 2-codimensional + * boundary. + * @param The location of the 2-codimensional boundary. + * @return An array containing the two locations of the adjacent boundaries. */ +constexpr std::array +getAdjacentBoundaryLocations(Codim2BoundaryLocation location) +{ + switch (location) + { + // X-Y Edges + case Codim2BoundaryLocation::XLower_YLower: + return {BoundaryLocation::XLower, BoundaryLocation::YLower}; + case Codim2BoundaryLocation::XUpper_YLower: + return {BoundaryLocation::XUpper, BoundaryLocation::YLower}; + case Codim2BoundaryLocation::XLower_YUpper: + return {BoundaryLocation::XLower, BoundaryLocation::YUpper}; + case Codim2BoundaryLocation::XUpper_YUpper: + return {BoundaryLocation::XUpper, BoundaryLocation::YUpper}; + + // X-Z Edges + case Codim2BoundaryLocation::XLower_ZLower: + return {BoundaryLocation::XLower, BoundaryLocation::ZLower}; + case Codim2BoundaryLocation::XUpper_ZLower: + return {BoundaryLocation::XUpper, BoundaryLocation::ZLower}; + case Codim2BoundaryLocation::XLower_ZUpper: + return {BoundaryLocation::XLower, BoundaryLocation::ZUpper}; + case Codim2BoundaryLocation::XUpper_ZUpper: + return {BoundaryLocation::XUpper, BoundaryLocation::ZUpper}; + + // Y-Z Edges + case Codim2BoundaryLocation::YLower_ZLower: + return {BoundaryLocation::YLower, BoundaryLocation::ZLower}; + case Codim2BoundaryLocation::YUpper_ZLower: + return {BoundaryLocation::YUpper, BoundaryLocation::ZLower}; + case Codim2BoundaryLocation::YLower_ZUpper: + return {BoundaryLocation::YLower, BoundaryLocation::ZUpper}; + case Codim2BoundaryLocation::YUpper_ZUpper: + return {BoundaryLocation::YUpper, BoundaryLocation::ZUpper}; + + default: return {}; + } +} + +/** @brief Possible locations of a 3-codimensional boundary (a corner in 3D) */ enum class Codim3BoundaryLocation { XLower_YLower_ZLower = 0, - XHI_YLower_ZLower = 1, + XUpper_YLower_ZLower = 1, XLower_YUpper_ZLower = 2, - XHI_YUpper_ZLower = 3, + XUpper_YUpper_ZLower = 3, XLower_YLower_ZUpper = 4, - XHI_YLower_ZUpper = 5, + XUpper_YLower_ZUpper = 5, XLower_YUpper_ZUpper = 6, - XHI_YUpper_ZUpper = 7 + XUpper_YUpper_ZUpper = 7 }; +/** + * @brief Return the location of the three (1-codimensional) boundaries adjacent to a + * 3-codimensional boundary. + * @param The location of the 3-codimensional boundary. + * @return An array containing the three locations of the adjacent boundaries. + */ +constexpr std::array +getAdjacentBoundaryLocations(Codim3BoundaryLocation location) +{ + switch (location) + { + // Lower Z Plane + case Codim3BoundaryLocation::XLower_YLower_ZLower: + return {BoundaryLocation::XLower, BoundaryLocation::YLower, BoundaryLocation::ZLower}; + case Codim3BoundaryLocation::XUpper_YLower_ZLower: + return {BoundaryLocation::XUpper, BoundaryLocation::YLower, BoundaryLocation::ZLower}; + case Codim3BoundaryLocation::XLower_YUpper_ZLower: + return {BoundaryLocation::XLower, BoundaryLocation::YUpper, BoundaryLocation::ZLower}; + case Codim3BoundaryLocation::XUpper_YUpper_ZLower: + return {BoundaryLocation::XUpper, BoundaryLocation::YUpper, BoundaryLocation::ZLower}; + + // Upper Z Plane + case Codim3BoundaryLocation::XLower_YLower_ZUpper: + return {BoundaryLocation::XLower, BoundaryLocation::YLower, BoundaryLocation::ZUpper}; + case Codim3BoundaryLocation::XUpper_YLower_ZUpper: + return {BoundaryLocation::XUpper, BoundaryLocation::YLower, BoundaryLocation::ZUpper}; + case Codim3BoundaryLocation::XLower_YUpper_ZUpper: + return {BoundaryLocation::XLower, BoundaryLocation::YUpper, BoundaryLocation::ZUpper}; + case Codim3BoundaryLocation::XUpper_YUpper_ZUpper: + return {BoundaryLocation::XUpper, BoundaryLocation::YUpper, BoundaryLocation::ZUpper}; + + default: return {}; + } +} + /** * @brief Get the BoundaryType from input keyword, and throw and error if the keyword does not * correspond to any known boundary type. @@ -140,6 +223,16 @@ inline BoundaryLocation getBoundaryLocationFromString(std::string const& name) throw std::runtime_error("Wrong boundary location name = " + name); } +/** + * @brief Meta utilities to retrieve the enum type of boundary location depending on the + * codimension. + * @tparam N Codimension value. + */ +template +using CodimNBoundaryLocation = std::tuple_element_t< + N - 1, std::tuple>; + + } // namespace PHARE::core #endif /* PHARE_CORE_BOUNDARY_BOUNDARY_DEFS_HPP */ diff --git a/src/core/boundary/boundary_factory.hpp b/src/core/boundary/boundary_factory.hpp index da0b11764..18f503bad 100644 --- a/src/core/boundary/boundary_factory.hpp +++ b/src/core/boundary/boundary_factory.hpp @@ -15,7 +15,7 @@ namespace PHARE::core { /** - * @brief This class contains all the recipes to create a boundary object according to the desired + * @brief Contains all the recipes to create a boundary object according to the desired * type of physical boundary (reflective, open, ...). It can extracts all the necessary data from * the input data dict associated to the boundary (value of physical quantities on the boundary for * an Inflow condition for instance), and create the right boundary conditions associated to each @@ -112,6 +112,10 @@ class BoundaryFactory boundary->template registerFieldCondition< FieldBoundaryConditionType::AntiSymmetric>(quantity); break; + case (PhysicalQuantityT::Vector::E): + boundary->template registerFieldCondition< + FieldBoundaryConditionType::AntiSymmetric>(quantity); + break; default: boundary ->template registerFieldCondition( @@ -139,6 +143,10 @@ class BoundaryFactory boundary->template registerFieldCondition< FieldBoundaryConditionType::DivergenceFreeTransverseNeumann>(quantity); break; + case (PhysicalQuantityT::Vector::E): + boundary->template registerFieldCondition( + quantity); + break; default: boundary->template registerFieldCondition( quantity); diff --git a/src/core/boundary/boundary_manager.hpp b/src/core/boundary/boundary_manager.hpp index 8e3a52287..5ca7c929a 100644 --- a/src/core/boundary/boundary_manager.hpp +++ b/src/core/boundary/boundary_manager.hpp @@ -3,16 +3,19 @@ #include "core/boundary/boundary.hpp" #include "core/boundary/boundary_defs.hpp" -#include "core/data/vecfield/vecfield.hpp" -#include "core/data/field/field_traits.hpp" #include "core/boundary/boundary_factory.hpp" +#include "core/data/field/field_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" +#include "core/data/vecfield/vecfield.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" #include "initializer/data_provider.hpp" -#include +#include #include +#include +#include +#include #include namespace PHARE::core @@ -62,6 +65,9 @@ class BoundaryManager boundaries_[location] = boundary_factory_type::create( location, dict[locationName], scalarQuantities, vectorQuantities); }); + + /// @todo If this mode stays in the code it should be read from the input dict. + priority_policy_ = PriorityPolicy::ByDirection; } @@ -78,25 +84,83 @@ class BoundaryManager return (it != boundaries_.end()) ? it->second : nullptr; } + /** @brief Describes how the master boundary is chosen at corner and edges */ + enum class PriorityPolicy { + ByDirection, + ByBoundaryType, + }; -private: - using _boundary_map_type = std::unordered_map>; + void setPriorityPolicy(PriorityPolicy policy) { priority_policy_ = policy; } - /** - * @brief Utility struct to group scalar and vector quantities together + /** @brief Gets the master 1-codimensional boundary for any given N-codimensional boundary, + * following the priority policy of the boundary manager. + * + * @note If @p location corresponds itself to a 1-codim boundary, then it returns the same + * @p location. * + * @tparam CodimNBoundaryLocationT Type of boundary location. + * @param location The location of the boundary where we want to determine which is the master + * boundary. + * @return The location of the master boundary. */ + template + BoundaryLocation getMasterBoundaryLocation(CodimNBoundaryLocationT location) const + { + if constexpr (std::same_as) + { + return location; + } + else + { + return selectMasterBoundaryInArray_(getAdjacentBoundaryLocations(location)); + } + } + +private: + using _boundary_map_type = std::unordered_map>; + + /** @brief Utility struct to group scalar and vector quantities together */ struct SimulationMenu { std::vector const& scalars; std::vector const& vectors; }; + + _boundary_map_type boundaries_; //!< List of boundaries mapped by their location. + PriorityPolicy priority_policy_; //!< How the master boundary is chosen at corners and edges. + /** - * @brief List of boundaries mapped by their location + * @brief Worker function to get the master of an array of 1-codimensional boundary locations, + * according to the priority policy of the boundary manager. * + * @tparam N Number of elements in the array + * @param locations Array of boundary locations. + * @return The location of the master boundary. */ - _boundary_map_type boundaries_; + template + BoundaryLocation selectMasterBoundaryInArray_(std::array locations) const + { + switch (priority_policy_) + { + case PriorityPolicy::ByDirection: { + auto it = std::ranges::max_element(locations, {}, getDirection); + return *it; + } + + case PriorityPolicy::ByBoundaryType: { + auto it = std::ranges::max_element(locations, {}, [&](auto location) { + if (auto boundaryPtr = getBoundary(location); boundaryPtr) + return boundaryPtr->getType(); + else + throw std::runtime_error("Pointer to boundary is null."); + }); + return *it; + } + + default: throw std::runtime_error("Non-existing priority mode for boundaries."); + } + } }; } // namespace PHARE::core diff --git a/src/core/numerics/boundary_condition/field_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_boundary_condition.hpp index 59a40d04b..0147fcd31 100644 --- a/src/core/numerics/boundary_condition/field_boundary_condition.hpp +++ b/src/core/numerics/boundary_condition/field_boundary_condition.hpp @@ -1,13 +1,11 @@ #ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP #define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_BOUNDARY_CONDITION_HPP -#include "core/utilities/box/box.hpp" #include "core/boundary/boundary_defs.hpp" #include "core/data/field/field_traits.hpp" #include "core/data/grid/gridlayout_traits.hpp" #include "core/data/tensorfield/tensorfield_traits.hpp" - -#include +#include "core/utilities/box/box.hpp" namespace PHARE::core { @@ -15,10 +13,9 @@ namespace PHARE::core /** * @brief Supported types of field boundary conditions. * - * @note The enum fields are ordered from lowest to highest priority at edges/corner. - * */ enum class FieldBoundaryConditionType : int { + None, Dirichlet, AntiSymmetric, Symmetric, @@ -75,16 +72,6 @@ class IFieldBoundaryCondition Box const& localGhostBox, GridLayoutT const& gridLayout, double const time) = 0; - - - /** - * @brief Define comparison of field boundary conditions based on the enum @c - * ScalarOrTensorFieldT. - */ - std::strong_ordering operator<=>(This const& other) const - { - return this->getType() <=> other.getType(); - } }; } // namespace PHARE::core diff --git a/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp index 42990b086..a5fdcb7e5 100644 --- a/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp +++ b/src/core/numerics/boundary_condition/field_boundary_condition_factory.hpp @@ -1,15 +1,15 @@ #ifndef PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY #define PHARE_CORE_NUMERICS_FIELD_BOUNDARY_CONDITION_FACTORY -#include "core/data/vecfield/vecfield_traits.hpp" #include "core/data/tensorfield/tensorfield_traits.hpp" - +#include "core/data/vecfield/vecfield_traits.hpp" +#include "core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_boundary_condition.hpp" -#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_dirichlet_boundary_condition.hpp" -#include "core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp" -#include "core/numerics/boundary_condition/field_antisymmetric_boundary_condition.hpp" #include "core/numerics/boundary_condition/field_divergence_free_transverse_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_neumann_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_none_boundary_condition.hpp" +#include "core/numerics/boundary_condition/field_symmetric_boundary_condition.hpp" #include #include @@ -45,36 +45,41 @@ class FieldBoundaryConditionFactory static std::unique_ptr> create(Args&&... args) { - if constexpr (type == FieldBoundaryConditionType::Neumann) + if constexpr (type == FieldBoundaryConditionType::None) + { + return std::make_unique>( + std::forward(args)...); + } + else if constexpr (type == FieldBoundaryConditionType::Neumann) { return std::make_unique< FieldNeumannBoundaryCondition>( - std::forward(args)...); + std::forward(args)...); } else if constexpr (type == FieldBoundaryConditionType::Dirichlet) { return std::make_unique< FieldDirichletBoundaryCondition>( - std::forward(args)...); + std::forward(args)...); } else if constexpr (type == FieldBoundaryConditionType::Symmetric) { return std::make_unique< FieldSymmetricBoundaryCondition>( - std::forward(args)...); + std::forward(args)...); } else if constexpr (type == FieldBoundaryConditionType::AntiSymmetric) { return std::make_unique< FieldAntiSymmetricBoundaryCondition>( - std::forward(args)...); + std::forward(args)...); } else if constexpr (type == FieldBoundaryConditionType::DivergenceFreeTransverseNeumann) { if constexpr (IsVecField) { return std::make_unique>(std::forward(args)...); + ScalarOrTensorFieldT, GridLayoutT>>(std::forward(args)...); } else { @@ -84,7 +89,7 @@ class FieldBoundaryConditionFactory } else { - throw std::runtime_error("Unhandled FieldBoundaryConditionType"); + static_assert(false, "Unhandled FieldBoundaryConditionType"); }; } }; diff --git a/src/core/numerics/boundary_condition/field_none_boundary_condition.hpp b/src/core/numerics/boundary_condition/field_none_boundary_condition.hpp new file mode 100644 index 000000000..619e1817d --- /dev/null +++ b/src/core/numerics/boundary_condition/field_none_boundary_condition.hpp @@ -0,0 +1,54 @@ +#ifndef PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NONE_BOUNDARY_CONDITION_HPP +#define PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NONE_BOUNDARY_CONDITION_HPP + +#include "core/data/grid/gridlayoutdefs.hpp" +#include "core/numerics/boundary_condition/field_boundary_condition_dispatcher.hpp" + +#include + +namespace PHARE::core +{ +/** + * @brief 'None' boundary condition for scalar and vector fields. + * + * @tparam ScalarOrTensorFieldT Type of the field or tensor field. + * @tparam GridLayoutT Grid layout configuration. + */ +template +class FieldNoneBoundaryCondition + : public FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldNoneBoundaryCondition> +{ +public: + using Super = FieldBoundaryConditionDispatcher< + ScalarOrTensorFieldT, GridLayoutT, + FieldNoneBoundaryCondition>; + + static constexpr size_t dimension = Super::dimension; + + FieldNoneBoundaryCondition() = default; + + FieldNoneBoundaryCondition(FieldNoneBoundaryCondition const&) = default; + FieldNoneBoundaryCondition& operator=(FieldNoneBoundaryCondition const&) = default; + FieldNoneBoundaryCondition(FieldNoneBoundaryCondition&&) = default; + FieldNoneBoundaryCondition& operator=(FieldNoneBoundaryCondition&&) = default; + + virtual ~FieldNoneBoundaryCondition() = default; + + + FieldBoundaryConditionType getType() const override { return FieldBoundaryConditionType::None; } + + + /** @brief Do nothing. */ + template + void apply_specialized(ScalarOrTensorFieldT& scalarOrTensorField, + Box const& localGhostBox, + GridLayoutT const& gridLayout, double const time) + { + } +}; // class FieldNoneBoundaryCondition + +} // namespace PHARE::core + +#endif // PHARE_CORE_NUMERICS_BOUNDARY_CONDITION_FIELD_NONE_BOUNDARY_CONDITION_HPP diff --git a/tests/core/boundary/boundary_manager/CMakeLists.txt b/tests/core/boundary/boundary_manager/CMakeLists.txt new file mode 100644 index 000000000..1f67dc90a --- /dev/null +++ b/tests/core/boundary/boundary_manager/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required (VERSION 3.20.1) + +project(test-boundary-manager) + +function(_add_test test_name) + + add_executable(${test_name} ${test_name}.cpp) + + target_include_directories(${test_name} PRIVATE + ${GTEST_INCLUDE_DIRS} + ) + + target_link_libraries(${test_name} PRIVATE + phare_initializer + phare_amr + ${GTEST_LIBS} +) + + add_no_mpi_phare_test(${test_name} ${CMAKE_CURRENT_BINARY_DIR}) + +endfunction(_box_test) + +_add_test(test_boundary_manager) diff --git a/tests/core/boundary/boundary_manager/test_boundary_manager.cpp b/tests/core/boundary/boundary_manager/test_boundary_manager.cpp new file mode 100644 index 000000000..f9e48405a --- /dev/null +++ b/tests/core/boundary/boundary_manager/test_boundary_manager.cpp @@ -0,0 +1,100 @@ +#include "core/boundary/boundary_manager.hpp" +#include "core/mhd/mhd_quantities.hpp" + +#include "initializer/data_provider.hpp" + +#include "simulator/phare_types.hpp" + +#include "gtest/gtest.h" + +#include + +using namespace PHARE::core; + +constexpr size_t dimension = 3; +constexpr PHARE::SimOpts opts{dimension}; +constexpr std::size_t rank = 1; +using types = PHARE::amr::PHARE_Types::core_types; +using grid_type = types::Grid_MHD; +using field_type = grid_type::field_type; +using grid_layout_type = types::GridLayout_MHD; +using physical_quantity_type = MHDQuantity; +using boundary_type = Boundary; +using boundary_manager_type = BoundaryManager; + +boundary_manager_type createBoundaryManager() +{ + PHARE::initializer::PHAREDict dict; + dict["xlower"]["type"] = std::string{"none"}; + dict["xupper"]["type"] = std::string{"none"}; + dict["ylower"]["type"] = std::string{"reflective"}; + dict["yupper"]["type"] = std::string{"reflective"}; + dict["zlower"]["type"] = std::string{"open"}; + dict["zupper"]["type"] = std::string{"open"}; + + + boundary_manager_type bm{dict, {}, {}}; + + return bm; +} + +TEST(BoundaryManager, hasPriorityPolicyByDirection) +{ + auto bm = createBoundaryManager(); + bm.setPriorityPolicy(boundary_manager_type::PriorityPolicy::ByDirection); + + for (size_t i = 0; i < NUM_3D_EDGES; ++i) + { + auto codim2loc = static_cast(i); + BoundaryLocation actual = bm.getMasterBoundaryLocation(codim2loc); + BoundaryLocation expected = getAdjacentBoundaryLocations(codim2loc)[1]; + EXPECT_EQ(actual, expected); + } + + for (size_t i = 0; i < NUM_3D_NODES; ++i) + { + auto codim3loc = static_cast(i); + BoundaryLocation actual = bm.getMasterBoundaryLocation(codim3loc); + BoundaryLocation expected = getAdjacentBoundaryLocations(codim3loc)[1]; + EXPECT_EQ(actual, expected); + } +} + +TEST(BoundaryManager, hasPriorityPolicyByBoundaryTypes) +{ + auto bm = createBoundaryManager(); + bm.setPriorityPolicy(boundary_manager_type::PriorityPolicy::ByBoundaryType); + + for (size_t i = 0; i < NUM_3D_EDGES; ++i) + { + auto codim2loc = static_cast(i); + BoundaryLocation masterLoc = bm.getMasterBoundaryLocation(codim2loc); + boundary_type& masterBoundary = *(bm.getBoundary(masterLoc)); + std::array adjacentLocations = getAdjacentBoundaryLocations(codim2loc); + for (auto loc : adjacentLocations) + { + boundary_type& adjacentBoundary = *(bm.getBoundary(loc)); + EXPECT_TRUE(masterBoundary.getType() >= adjacentBoundary.getType()); + } + } + + for (size_t i = 0; i < NUM_3D_NODES; ++i) + { + auto codim3loc = static_cast(i); + BoundaryLocation masterLoc = bm.getMasterBoundaryLocation(codim3loc); + boundary_type& masterBoundary = *(bm.getBoundary(masterLoc)); + std::array adjacentLocations = getAdjacentBoundaryLocations(codim3loc); + for (auto loc : adjacentLocations) + { + boundary_type& adjacentBoundary = *(bm.getBoundary(loc)); + EXPECT_TRUE(masterBoundary >= adjacentBoundary); + } + } +} + +int main(int argc, char** argv) +{ + ::testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +}