diff --git a/pyphare/pyphare/pharein/diagnostics.py b/pyphare/pyphare/pharein/diagnostics.py index bd0bce293..61eee5645 100644 --- a/pyphare/pyphare/pharein/diagnostics.py +++ b/pyphare/pyphare/pharein/diagnostics.py @@ -307,7 +307,7 @@ def __init__(self, **kwargs): class ParticleDiagnostics(Diagnostics): - particle_quantities = ["space_box", "domain", "levelGhost", "patchGhost"] + particle_quantities = ["space_box", "domain", "levelGhost"] type = "particle" def __init__(self, **kwargs): diff --git a/pyphare/pyphare/pharesee/hierarchy/fromh5.py b/pyphare/pyphare/pharesee/hierarchy/fromh5.py index fd3266d4e..369ac2379 100644 --- a/pyphare/pyphare/pharesee/hierarchy/fromh5.py +++ b/pyphare/pyphare/pharesee/hierarchy/fromh5.py @@ -21,7 +21,7 @@ h5_time_grp_key = "t" -particle_files_patterns = ("domain", "patchGhost", "levelGhost") +particle_files_patterns = ("domain", "levelGhost") def get_all_available_quantities_from_h5(filepath, time=0, exclude=["tags"], hier=None): diff --git a/pyphare/pyphare/pharesee/hierarchy/fromsim.py b/pyphare/pyphare/pharesee/hierarchy/fromsim.py index ecdc24600..aa13ea2cc 100644 --- a/pyphare/pyphare/pharesee/hierarchy/fromsim.py +++ b/pyphare/pyphare/pharesee/hierarchy/fromsim.py @@ -90,7 +90,7 @@ def hierarchy_from_sim(simulator, qty, pop=""): # domain... while looping on the patchGhost items, we need to search in # the already created patches which one to which add the patchGhost particles - for ghostParticles in ["patchGhost", "levelGhost"]: + for ghostParticles in ["levelGhost"]: if ghostParticles in populationdict: for dwpatch in populationdict[ghostParticles]: v = np.asarray(dwpatch.data.v) diff --git a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py index b7fbcc220..d3856873d 100644 --- a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py +++ b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py @@ -93,24 +93,14 @@ def merge_particles(hierarchy): (pdname, pd) for pdname, pd in pdatas.items() if "domain" in pdname ][0] - pghost_pdatas = [ - (pdname, pd) - for pdname, pd in pdatas.items() - if "patchGhost" in pdname - ] lghost_pdatas = [ (pdname, pd) for pdname, pd in pdatas.items() if "levelGhost" in pdname ] - pghost_pdata = pghost_pdatas[0] if pghost_pdatas else None lghost_pdata = lghost_pdatas[0] if lghost_pdatas else None - if pghost_pdata is not None: - domain_pdata[1].dataset.add(pghost_pdata[1].dataset) - del pdatas[pghost_pdata[0]] - if lghost_pdata is not None: domain_pdata[1].dataset.add(lghost_pdata[1].dataset) del pdatas[lghost_pdata[0]] diff --git a/pyphare/pyphare/pharesee/run/run.py b/pyphare/pyphare/pharesee/run/run.py index 30f6a25f8..f1513293b 100644 --- a/pyphare/pyphare/pharesee/run/run.py +++ b/pyphare/pyphare/pharesee/run/run.py @@ -202,7 +202,7 @@ def GetParticleCount(self, time, **kwargs): return c def GetMass(self, pop_name, **kwargs): - list_of_qty = ["density", "flux", "domain", "levelGhost", "patchGhost"] + list_of_qty = ["density", "flux", "domain", "levelGhost"] list_of_mass = [] import h5py diff --git a/src/amr/data/field/field_data.hpp b/src/amr/data/field/field_data.hpp index 56801c93c..7872730e4 100644 --- a/src/amr/data/field/field_data.hpp +++ b/src/amr/data/field/field_data.hpp @@ -2,22 +2,21 @@ #define PHARE_SRC_AMR_FIELD_FIELD_DATA_HPP - +#include "core/logger.hpp" #include "core/def/phare_mpi.hpp" +#include +#include "core/data/field/field_box.hpp" -#include -#include -#include - -#include "core/data/grid/gridlayout.hpp" -#include "core/data/grid/gridlayout_impl.hpp" +#include #include "amr/resources_manager/amr_utils.hpp" #include "field_geometry.hpp" -#include "core/logger.hpp" +#include +#include + +#include -#include namespace PHARE { @@ -40,13 +39,17 @@ namespace amr typename PhysicalQuantity = decltype(std::declval().physicalQuantity())> class FieldData : public SAMRAI::hier::PatchData { - using Super = SAMRAI::hier::PatchData; + using Super = SAMRAI::hier::PatchData; + using value_type = Grid_t::value_type; + using SetEqualOp = core::Equals; public: static constexpr std::size_t dimension = GridLayoutT::dimension; static constexpr std::size_t interp_order = GridLayoutT::interp_order; using Geometry = FieldGeometry; using gridlayout_type = GridLayoutT; + static constexpr auto NO_ROTATE = SAMRAI::hier::Transformation::NO_ROTATE; + /*** \brief Construct a FieldData from information associated to a patch * @@ -126,24 +129,19 @@ namespace amr // quantity_ using the source gridlayout to accomplish that we get the interior box, // from the FieldData. - SAMRAI::hier::Box sourceBox = Geometry::toFieldBox(fieldSource.getGhostBox(), quantity_, - fieldSource.gridLayout); - + SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox( + fieldSource.getGhostBox(), quantity_, fieldSource.gridLayout); - SAMRAI::hier::Box destinationBox + SAMRAI::hier::Box const destinationBox = Geometry::toFieldBox(this->getGhostBox(), quantity_, this->gridLayout); // Given the two boxes in correct space we just have to intersect them - SAMRAI::hier::Box intersectionBox = sourceBox * destinationBox; + SAMRAI::hier::Box const intersectionBox = sourceBox * destinationBox; if (!intersectionBox.empty()) { - auto const& sourceField = fieldSource.field; - auto& destinationField = field; - // We can copy field from the source to the destination on the correct region - copy_(intersectionBox, sourceBox, destinationBox, fieldSource, sourceField, - destinationField); + copy_(intersectionBox, sourceBox, destinationBox, fieldSource, field); } } @@ -209,7 +207,7 @@ namespace amr */ std::size_t getDataStreamSize(const SAMRAI::hier::BoxOverlap& overlap) const final { - return getDataStreamSize_(overlap); + return getDataStreamSize_(overlap); } @@ -223,37 +221,29 @@ namespace amr { PHARE_LOG_SCOPE(3, "packStream"); - // getDataStreamSize_ mean that we want to apply the transformation - std::size_t expectedSize = getDataStreamSize_(overlap) / sizeof(double); - std::vector buffer; - buffer.reserve(expectedSize); - auto& fieldOverlap = dynamic_cast(overlap); SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) - { - SAMRAI::hier::BoxContainer const& boxContainer - = fieldOverlap.getDestinationBoxContainer(); - for (auto const& box : boxContainer) - { - auto const& source = field; - SAMRAI::hier::Box sourceBox - = Geometry::toFieldBox(getGhostBox(), quantity_, gridLayout); + if (transformation.getRotation() != NO_ROTATE) + throw std::runtime_error("Rotations are not supported in PHARE"); - SAMRAI::hier::Box packBox{box}; + std::vector buffer; + buffer.reserve(getDataStreamSize_(overlap) / sizeof(double)); - // Since the transformation, allow to transform the source box, - // into the destination box space, and that the box in the boxContainer - // are in destination space, we have to use the inverseTransform - // to get into source space - transformation.inverseTransform(packBox); - packBox = packBox * sourceBox; + for (auto const& box : fieldOverlap.getDestinationBoxContainer()) + { + SAMRAI::hier::Box packBox{box}; - internals_.packImpl(buffer, source, packBox, sourceBox); - } + // Since the transformation, allow to transform the source box, + // into the destination box space, and that the box in the boxContainer + // are in destination space, we have to use the inverseTransform + // to get into source space + transformation.inverseTransform(packBox); + + core::FieldBox src{field, gridLayout, + phare_box_from(packBox)}; + src.append_to(buffer); } - // throw, we don't do rotations in phare.... // Once we have fill the buffer, we send it on the stream stream.pack(buffer.data(), buffer.size()); @@ -261,50 +251,43 @@ namespace amr - - /*** \brief Unserialize data contained on the stream, that comes from a region covered by - * the overlap, and fill the data where is needed. + /*** \brief Unserialize data contained on the stream, that comes from a region covered + * by the overlap, and fill the data where is needed. */ void unpackStream(SAMRAI::tbox::MessageStream& stream, const SAMRAI::hier::BoxOverlap& overlap) final { - PHARE_LOG_SCOPE(3, "unpackStream"); - - // For unpacking we need to know how much element we will need to - // extract - std::size_t expectedSize = getDataStreamSize(overlap) / sizeof(double); + unpackStream(stream, overlap, field); + } - std::vector buffer; - buffer.resize(expectedSize, 0.); + template + void unpackStream(SAMRAI::tbox::MessageStream& stream, + const SAMRAI::hier::BoxOverlap& overlap, Grid_t& dst_grid) + { + PHARE_LOG_SCOPE(3, "unpackStream"); auto& fieldOverlap = dynamic_cast(overlap); - // We flush a portion of the stream on the buffer. - stream.unpack(buffer.data(), expectedSize); - - SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) - { - // Here the seek counter will be used to index buffer - std::size_t seek = 0; - - SAMRAI::hier::BoxContainer const& boxContainer - = fieldOverlap.getDestinationBoxContainer(); - for (auto const& box : boxContainer) - { - // For unpackStream, there is no transformation needed, since all the box - // are on the destination space + if (fieldOverlap.getTransformation().getRotation() != NO_ROTATE) + throw std::runtime_error("Rotations are not supported in PHARE"); - auto& source = field; - SAMRAI::hier::Box destination - = Geometry::toFieldBox(getGhostBox(), quantity_, gridLayout); + // For unpacking we need to know how much element we will need to extract + std::vector buffer(getDataStreamSize(overlap) / sizeof(value_type), 0.); + // We flush a portion of the stream on the buffer. + stream.unpack(buffer.data(), buffer.size()); - SAMRAI::hier::Box packBox{box * destination}; - + // Here the seek counter will be used to index buffer + std::size_t seek = 0; - internals_.unpackImpl(seek, buffer, source, packBox, destination); - } + // For unpackStream, there is no transformation needed, since all the box + // are on the destination space + for (auto const& sambox : fieldOverlap.getDestinationBoxContainer()) + { + auto const box = phare_box_from(sambox); + core::FieldBox dst{dst_grid, gridLayout, box}; + dst.template set_from(buffer, seek); + seek += box.size(); } } @@ -337,7 +320,9 @@ namespace amr } - + void sum(SAMRAI::hier::PatchData const& src, SAMRAI::hier::BoxOverlap const& overlap); + void unpackStreamAndSum(SAMRAI::tbox::MessageStream& stream, + SAMRAI::hier::BoxOverlap const& overlap); GridLayoutT gridLayout; Grid_t field; @@ -351,28 +336,34 @@ namespace amr /*** \brief copy data from the intersection box * */ + + template void copy_(SAMRAI::hier::Box const& intersectBox, SAMRAI::hier::Box const& sourceBox, - SAMRAI::hier::Box const& destinationBox, - [[maybe_unused]] FieldData const& source, Grid_t const& fieldSource, + SAMRAI::hier::Box const& destinationBox, FieldData const& source, Grid_t& fieldDestination) { - // First we represent the intersection that is defined in AMR space to the local space - // of the source - - SAMRAI::hier::Box localSourceBox{AMRToLocal(intersectBox, sourceBox)}; - - // Then we represent the intersection into the local space of the destination - SAMRAI::hier::Box localDestinationBox{AMRToLocal(intersectBox, destinationBox)}; - - - // We can finally perform the copy of the element in the correct range - internals_.copyImpl(localSourceBox, fieldSource, localDestinationBox, fieldDestination); + // First we represent the intersection that is defined in AMR space to the local + // space of the source Then we represent the intersection into the local space of + // the destination We can finally perform the copy of the element in the correct + // range + + core::FieldBox dst{ + fieldDestination, gridLayout, + as_unsigned_phare_box(AMRToLocal(intersectBox, destinationBox))}; + core::FieldBox const src{ + source.field, source.gridLayout, + as_unsigned_phare_box(AMRToLocal(intersectBox, sourceBox))}; + operate_on_fields(dst, src); } - - void copy_(FieldData const& source, FieldOverlap const& overlap) + { + copy_(source, overlap, field); + } + + template + void copy_(FieldData const& source, FieldOverlap const& overlap, Grid_t& dst) { // Here the first step is to get the transformation from the overlap // we transform the box from the source, and from the destination @@ -384,7 +375,7 @@ namespace amr SAMRAI::hier::Transformation const& transformation = overlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) + if (transformation.getRotation() == NO_ROTATE) { SAMRAI::hier::BoxContainer const& boxList = overlap.getDestinationBoxContainer(); @@ -395,29 +386,21 @@ namespace amr { for (auto const& box : boxList) { - SAMRAI::hier::Box sourceBox = Geometry::toFieldBox( + SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox( source.getGhostBox(), quantity_, source.gridLayout); - - SAMRAI::hier::Box destinationBox = Geometry::toFieldBox( + SAMRAI::hier::Box const destinationBox = Geometry::toFieldBox( this->getGhostBox(), quantity_, this->gridLayout); - SAMRAI::hier::Box transformedSource{sourceBox}; transformation.transform(transformedSource); - - SAMRAI::hier::Box intersectionBox{box * transformedSource * destinationBox}; - + SAMRAI::hier::Box const intersectionBox{box * transformedSource + * destinationBox}; if (!intersectionBox.empty()) - { - Grid_t const& sourceField = source.field; - Grid_t& destinationField = field; - - copy_(intersectionBox, transformedSource, destinationBox, source, - sourceField, destinationField); - } + copy_(intersectionBox, transformedSource, destinationBox, + source, dst); } } } @@ -429,292 +412,66 @@ namespace amr /*** \brief Compute the maximum amount of memory needed to hold FieldData information on - * the specified overlap, this version work on the source, or the destination - * depending on withTransform parameter + * the specified overlap */ - template std::size_t getDataStreamSize_(SAMRAI::hier::BoxOverlap const& overlap) const { // The idea here is to tell SAMRAI the maximum memory will be used by our type // on a given region. - // throws on failure auto& fieldOverlap = dynamic_cast(overlap); if (fieldOverlap.isOverlapEmpty()) - { return 0; - } // TODO: see FieldDataFactory todo of the same function SAMRAI::hier::BoxContainer const& boxContainer = fieldOverlap.getDestinationBoxContainer(); - return boxContainer.getTotalSizeOfBoxes() * sizeof(typename Grid_t::type); - } - - - FieldDataInternals internals_; - }; // namespace PHARE - - - - - // 1D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - destination(xDestination) = source(xSource); - } - } - - - - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& sourceBox) const - { - int xStart = overlap.lower(0) - sourceBox.lower(0); - int xEnd = overlap.upper(0) - sourceBox.lower(0); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - buffer.push_back(source(xi)); - } - } - - - - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - source(xi) = buffer[seek]; - ++seek; - } - } - }; - - - - // 2D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1)); - std::uint32_t yDestinationStart - = static_cast(localDestinationBox.lower(1)); - - std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1)); - std::uint32_t yDestinationEnd - = static_cast(localDestinationBox.upper(1)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart; - ySource <= ySourceEnd && yDestination <= yDestinationEnd; - ++ySource, ++yDestination) - { - destination(xDestination, yDestination) = source(xSource, ySource); - } - } - } - - - - - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const - - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - buffer.push_back(source(xi, yi)); - } - } - } - - - - - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - source(xi, yi) = buffer[seek]; - ++seek; - } - } + return boxContainer.getTotalSizeOfBoxes() * sizeof(value_type); } }; - // 3D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1)); - std::uint32_t yDestinationStart - = static_cast(localDestinationBox.lower(1)); - - std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1)); - std::uint32_t yDestinationEnd - = static_cast(localDestinationBox.upper(1)); - - std::uint32_t zSourceStart = static_cast(localSourceBox.lower(2)); - std::uint32_t zDestinationStart - = static_cast(localDestinationBox.lower(2)); - - std::uint32_t zSourceEnd = static_cast(localSourceBox.upper(2)); - std::uint32_t zDestinationEnd - = static_cast(localDestinationBox.upper(2)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart; - ySource <= ySourceEnd && yDestination <= yDestinationEnd; - ++ySource, ++yDestination) - { - for (std::uint32_t zSource = zSourceStart, zDestination = zDestinationStart; - zSource <= zSourceEnd && zDestination <= zDestinationEnd; - ++zSource, ++zDestination) - { - destination(xDestination, yDestination, zDestination) - = source(xSource, ySource, zSource); - } - } - } - } - - +} // namespace amr +} // namespace PHARE - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); +namespace PHARE::amr +{ - int zStart = overlap.lower(2) - destination.lower(2); - int zEnd = overlap.upper(2) - destination.lower(2); - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - for (int zi = zStart; zi <= zEnd; ++zi) - { - buffer.push_back(source(xi, yi, zi)); - } - } - } - } +template +void FieldData::unpackStreamAndSum( + SAMRAI::tbox::MessageStream& stream, SAMRAI::hier::BoxOverlap const& overlap) +{ + using PlusEqualOp = core::PlusEquals; + unpackStream(stream, overlap, field); +} - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); +template +void FieldData::sum(SAMRAI::hier::PatchData const& src, + SAMRAI::hier::BoxOverlap const& overlap) +{ + using PlusEqualOp = core::PlusEquals; - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); + TBOX_ASSERT_OBJDIM_EQUALITY2(*this, src); - int zStart = overlap.lower(2) - destination.lower(2); - int zEnd = overlap.upper(2) - destination.lower(2); + auto& fieldOverlap = dynamic_cast(overlap); + auto& fieldSource = dynamic_cast(src); - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - for (int zi = zStart; zi <= zEnd; ++zi) - { - source(xi, yi, zi) = buffer[seek]; - ++seek; - } - } - } - } - }; + copy_(fieldSource, fieldOverlap, field); +} -} // namespace amr -} // namespace PHARE +} // namespace PHARE::amr #endif diff --git a/src/amr/data/field/field_variable_fill_pattern.hpp b/src/amr/data/field/field_variable_fill_pattern.hpp index c890a2944..29318c686 100644 --- a/src/amr/data/field/field_variable_fill_pattern.hpp +++ b/src/amr/data/field/field_variable_fill_pattern.hpp @@ -1,43 +1,26 @@ #ifndef PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP #define PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP -#include #include "core/def/phare_mpi.hpp" +#include + +#include +#include "amr/data/field/field_geometry.hpp" +#include #include "SAMRAI/xfer/VariableFillPattern.h" -#include "core/utilities/types.hpp" -#include "core/utilities/mpi_utils.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" +#include namespace PHARE::amr { /* - This class is used from multiple schedules - To know which schedule we are coming from, we have `std::optional opt_overwrite_interior_` - - the modes are : - - 1. To synchronize primal nodes on shared patch borders - e.g. hybrid_hybrid_messenger_strategy.hpp - HybridHybridMessengerStrategy::magneticSharedNodes_ - - in this case, the fillPattern is constructed - with "std::optional opt_overwrite_interior_ == std::nullopt", - we set the forwarding flag of "bool overwrite_interior" to true by default - and it is only set to false for one of the 2 patches involved in the overlap - so that only one process assigns its value to the shared border node - We also remove the exclusive interior of the src patch to isolate only shared primal - nodes. - - 2. To synchronize pure ghost values from src domain values - in that case, the optional is set to "false" and overwrite_interior takes this value - none of the two patches overwrites the shared border nodes and only pure ghost nodes are - filled. + This class is used to synchronize pure ghost values from src domain values + in that case, we default overwrite_interior to "false" so none of the two patches overwrites + the shared border nodes and only pure ghost nodes are filled. Notes on shared-node overwrite interior: https://github.com/LLNL/SAMRAI/issues/170 - */ // This class is mostly a copy of BoxGeometryVariableFillPattern template @@ -46,19 +29,11 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern constexpr static std::size_t dim = dimension; public: - FieldFillPattern(std::optional overwrite_interior) - : opt_overwrite_interior_{overwrite_interior} - { - } + // defaulted param makes this the default constructor also + FieldFillPattern(bool overwrite_interior = false) + : overwrite_interior_{overwrite_interior} - static auto make_shared(std::shared_ptr const& samrai_op) { - auto const& op = dynamic_cast(*samrai_op); - - if (op.node_only) - return std::make_shared>(std::nullopt); - - return std::make_shared>(false); } @@ -69,51 +44,21 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern SAMRAI::hier::BoxGeometry const& src_geometry, SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, SAMRAI::hier::Box const& fill_box, bool const fn_overwrite_interior, - SAMRAI::hier::Transformation const& transformation) const + SAMRAI::hier::Transformation const& transformation) const override { #ifndef DEBUG_CHECK_DIM_ASSERTIONS NULL_USE(dst_patch_box); #endif TBOX_ASSERT_OBJDIM_EQUALITY2(dst_patch_box, src_mask); - bool overwrite_interior = true; // replace func param - assert(fn_overwrite_interior == overwrite_interior); - - if (opt_overwrite_interior_) // not node only - { - // this sets overwrite_interior to false - overwrite_interior = *opt_overwrite_interior_; - } - - // opt_overwrite_interior_ is nullopt : assume primal node shared border schedule - else - { - // cast into the Base class to get the pureInteriorFieldBox method - // see field_geometry.hpp for more explanations about why this base class exists - auto& dst_cast = dynamic_cast const&>(dst_geometry); - auto& src_cast = dynamic_cast const&>(src_geometry); + assert(fn_overwrite_interior == true); // expect default as true - if (src_cast.patchBox.getGlobalId().getOwnerRank() - != dst_cast.patchBox.getGlobalId().getOwnerRank()) - overwrite_interior - = src_cast.patchBox.getGlobalId() > dst_cast.patchBox.getGlobalId(); + return dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, overwrite_interior_, - auto basic_overlap = dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, - overwrite_interior, transformation); - auto& overlap = dynamic_cast(*basic_overlap); - - auto destinationBoxes = overlap.getDestinationBoxContainer(); - destinationBoxes.removeIntersections(src_cast.pureInteriorFieldBox()); - - return std::make_shared(destinationBoxes, overlap.getTransformation()); - } - - // overwrite_interior is always false here - return dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, overwrite_interior, transformation); } - std::string const& getPatternName() const { return s_name_id; } + std::string const& getPatternName() const override { return s_name_id; } private: FieldFillPattern(FieldFillPattern const&) = delete; @@ -121,7 +66,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; - SAMRAI::hier::IntVector const& getStencilWidth() + SAMRAI::hier::IntVector const& getStencilWidth() override { TBOX_ERROR("getStencilWidth() should not be\n" << "called. This pattern creates overlaps based on\n" @@ -146,7 +91,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, SAMRAI::hier::BoxContainer const& node_fill_boxes, SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, - SAMRAI::hier::PatchDataFactory const& pdf) const + SAMRAI::hier::PatchDataFactory const& pdf) const override { NULL_USE(node_fill_boxes); @@ -162,9 +107,119 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern return pdf.getBoxGeometry(patch_box)->setUpOverlap(overlap_boxes, transformation); } - std::optional opt_overwrite_interior_{nullptr}; + bool overwrite_interior_; +}; + + + +// We use this fill pattern to sum the contributions of border fields like rho and flux +/** \brief VariableFillPattern that is used to fill incomplete ghost domain moment nodes + * + * After deposition of domain particles, some domain and ghost nodes lack contributions + * from particle that exist on a neighboring patch. + * The extent of incomplete nodes in the ghost layer and in domain depends on interp order. + * + * Example, at interpolation order 1, only the border node will be incomplete after + * depositing domain particles since these hit only the two primal nodes surronding its position. + * However, we deposit also leaving domain particles before they are sent to patchGhost particles + * and shipped to neighrboring patches. + * Leaving particles can be found in the first ghost cell from domain, so the first primal + * ghost node from domain will also have some incomplete contribution. + * + * At order 1, thus, there is an overlap of 3 primal nodes that are incomplete: + * the shared border node and the first domain and first ghost. + * + * ghost cells <-|-> patch + * + + + + * | leaving | domain particles + * | particles | + * + * + * As a first try and to keep it simple, this fill pattern simply creates the overlap + * that is the intersection of the field ghost boxes of the source and destination patch datas. + * That is, at interpolation 1 we have 2 ghost cells thus it is 5 nodes that overlap + * even though the outermost ghost should have 0 contribution. + * + * ghost cells <-|-> patch + * + + + + + + * ^ | leaving | domain particles + * | | particles | + * 0 + * */ +template // ASSUMED ALL PRIMAL! +class FieldGhostInterpOverlapFillPattern : public SAMRAI::xfer::VariableFillPattern +{ + std::size_t constexpr static dim = Gridlayout_t::dimension; + using FieldGeometry_t = FieldGeometryBase; + +public: + FieldGhostInterpOverlapFillPattern() {} + ~FieldGhostInterpOverlapFillPattern() override {} + + std::shared_ptr + calculateOverlap(SAMRAI::hier::BoxGeometry const& _dst_geometry, + SAMRAI::hier::BoxGeometry const& _src_geometry, + SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, + SAMRAI::hier::Box const& fill_box, bool const overwrite_interior, + SAMRAI::hier::Transformation const& transformation) const override + { + PHARE_LOG_SCOPE(3, "FieldGhostInterpOverlapFillPattern::calculateOverlap"); + + // Skip if src and dst are the same + if (phare_box_from(dst_patch_box) == phare_box_from(src_mask)) + return std::make_shared(SAMRAI::hier::BoxContainer{}, transformation); + + auto& dst_geometry = dynamic_cast(_dst_geometry); + auto& src_geometry = dynamic_cast(_src_geometry); + + auto const _primal_ghost_box = [](auto const& box) { + auto gb = grow(box, Gridlayout_t::nbrGhosts()); + gb.upper += 1; + return gb; + }; + + auto const src_ghost_box = [&]() { + auto const box = phare_box_from(src_geometry.patchBox); + auto const primal_ghost_box = _primal_ghost_box(box); + return amr::shift(primal_ghost_box, transformation); + }(); + + auto const dst_ghost_box = [&]() { + auto const box = phare_box_from(dst_geometry.patchBox); + return _primal_ghost_box(box); + }(); + + + SAMRAI::hier::BoxContainer dest; + if (auto overlap = dst_ghost_box * src_ghost_box) + dest.push_back(samrai_box_from(*overlap)); + + return std::make_shared(dest, transformation); + } + + std::string const& getPatternName() const override { return s_name_id; } + +private: + static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; + + SAMRAI::hier::IntVector const& getStencilWidth() override + { + throw std::runtime_error("never called"); + } + + + std::shared_ptr + computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, + SAMRAI::hier::BoxContainer const& node_fill_boxes, + SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, + SAMRAI::hier::PatchDataFactory const& pdf) const override + { + throw std::runtime_error("no refinement supported or expected"); + } }; + + } // namespace PHARE::amr #endif /* PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_H */ diff --git a/src/amr/data/field/refine/field_refine_operator.hpp b/src/amr/data/field/refine/field_refine_operator.hpp index 7ba73cf41..adfc0cca1 100644 --- a/src/amr/data/field/refine/field_refine_operator.hpp +++ b/src/amr/data/field/refine/field_refine_operator.hpp @@ -6,39 +6,25 @@ #include "core/def.hpp" #include "amr/data/field/field_data.hpp" -#include "amr/data/field/field_geometry.hpp" -#include "core/data/grid/gridlayout.hpp" + #include "field_linear_refine.hpp" -#include "field_refiner.hpp" -#include #include +#include + #include -#include namespace PHARE::amr { -class AFieldRefineOperator -{ -public: - AFieldRefineOperator(bool b) - : node_only{b} - { - } - virtual ~AFieldRefineOperator() {} - - bool const node_only = false; -}; - using core::dirX; using core::dirY; using core::dirZ; template -class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRefineOperator +class FieldRefineOperator : public SAMRAI::hier::RefineOperator { public: static constexpr std::size_t dimension = GridLayoutT::dimension; @@ -48,7 +34,7 @@ class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRe FieldRefineOperator(bool node_only = false) : SAMRAI::hier::RefineOperator{"FieldRefineOperator"} - , AFieldRefineOperator{node_only} + { } diff --git a/src/amr/data/particles/particles_data.hpp b/src/amr/data/particles/particles_data.hpp index 803247447..259125a46 100644 --- a/src/amr/data/particles/particles_data.hpp +++ b/src/amr/data/particles/particles_data.hpp @@ -1,34 +1,32 @@ #ifndef PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP #define PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP -#include -#include -#include -#include -#include - #include "core/def/phare_mpi.hpp" -#include -#include -#include -#include -#include -#include -#include "SAMRAI/hier/Transformation.h" - #include "core/def.hpp" #include "core/data/ions/ion_population/particle_pack.hpp" #include "core/data/particles/particle_array.hpp" #include "core/data/particles/particle_packer.hpp" -#include "amr/resources_manager/amr_utils.hpp" + #include "amr/utilities/box/amr_box.hpp" -#include "core/utilities/point/point.hpp" +#include "amr/resources_manager/amr_utils.hpp" +#include + -#include "core/logger.hpp" +#include +#include +#include +#include +#include +#include "SAMRAI/hier/Transformation.h" +#include +#include +#include +#include +#include namespace PHARE @@ -37,69 +35,29 @@ namespace amr { - template - NO_DISCARD inline bool isInBox(SAMRAI::hier::Box const& box, Particle const& particle) - { - constexpr auto dim = Particle::dimension; - - auto const& iCell = particle.iCell; - - auto const& lower = box.lower(); - auto const& upper = box.upper(); - - - if (iCell[0] >= lower(0) && iCell[0] <= upper(0)) - { - if constexpr (dim > 1) - { - if (iCell[1] >= lower(1) && iCell[1] <= upper(1)) - { - if constexpr (dim > 2) - { - if (iCell[2] >= lower(2) && iCell[2] <= upper(2)) - { - return true; - } - } - else - { - return true; - } - } - } - else - { - return true; - } - } - return false; - } - - - /** @brief ParticlesData is a concrete SAMRAI::hier::PatchData subclass to store Particle data - * - * This class encapsulates particle storage known by the module core, and by being derived - * from PatchData is compatible with the SAMRAI data management system. + /** @brief ParticlesData is a concrete SAMRAI::hier::PatchData subclass + * to store Particle data * * A ParticlesData encapsulates **three** different particle arrays: * - * - domainParticles : these particles are those for which iCell is within the physical domain - * of the patch + * - domainParticles : these particles are those for which iCell + * is within the physical domain of the patch * - * - patchGhostParticles: these particles are located within the ghost layer around the physical - * domain of the patch. We call the "ghost layer" the layer of ghostCellWidth just outside the - * physical domain of the patch, on borders that have neighbors patchs of the same level. - * All the particles in the ghost layer are exact clones of particles located on a neighbor - * patch of the same level. The ghost particles are getting here when then exit the neighbor - * patch, and can enter the patch. + * - patchGhostParticles: represents particles that left the patch domain and are + * physically located in the patch ghost layer of a patch. * - * - levelGhostParticles: these particles are located in a layer just passed the patch - * boundaries that also are level boundaries. These particles are getting here when there is a - * particle refinement from a coarser level + * - levelGhostParticles: represent particles obtained from refinement and + * located in level ghost layer. These particles are to be pushed and injected + * in domain if they arrive in there. + * + *- levelGhostParticlesOld: same as levelGhostParticles but defined at previous + * next coarse time step. Used to deposit contribution of these particles + * to moments in level ghost nodes + * + *- levelGhostParticlesNew: same as levelGhostParticles but defined at next + * coarser future time step. Used to deposit contribution of these particles + * to moments in level ghost nodes * - */ - /** - * @brief The ParticlesData class */ template class ParticlesData : public SAMRAI::hier::PatchData @@ -170,7 +128,6 @@ namespace amr }; putParticles("domainParticles", domainParticles); - putParticles("patchGhostParticles", patchGhostParticles); putParticles("levelGhostParticles", levelGhostParticles); putParticles("levelGhostParticlesNew", levelGhostParticlesNew); putParticles("levelGhostParticlesOld", levelGhostParticlesOld); @@ -215,7 +172,6 @@ namespace amr }; getParticles("domainParticles", domainParticles); - getParticles("patchGhostParticles", patchGhostParticles); getParticles("levelGhostParticles", levelGhostParticles); getParticles("levelGhostParticlesNew", levelGhostParticlesNew); getParticles("levelGhostParticlesOld", levelGhostParticlesOld); @@ -265,10 +221,26 @@ namespace amr } + + template + void copy_from_ghost(Args&&... args); + + + void copy_from_cell_overlap(ParticlesData const& pSource, + SAMRAI::pdat::CellOverlap const& pOverlap) + { + SAMRAI::hier::Transformation const& transformation = pOverlap.getTransformation(); + SAMRAI::hier::BoxContainer const& boxList = pOverlap.getDestinationBoxContainer(); + for (auto const& overlapBox : boxList) + copy_(overlapBox, pSource, transformation); + } + /** - * @brief copy with an overlap. Does the copy as the other overload but this time - * the copy must account for the intersection with the boxes within the overlap - * The copy is done between the source patch data and myself + * @brief copy with an overlap given by SAMARAI. + * At runtime we can deal with two kinds of overlaps: + * - ParticlesDomainOverlap: means this copy is from a context when we're grabbing + * leaving domain particles from the neighbor patch, in the patchghost array. + * - CellOverlap: means domain particles are copied as part of a refinement operation. */ void copy(SAMRAI::hier::PatchData const& source, SAMRAI::hier::BoxOverlap const& overlap) override @@ -276,15 +248,16 @@ namespace amr PHARE_LOG_SCOPE(3, "ParticlesData::copy with overlap"); // casts throw on failure - auto& pSource = dynamic_cast(source); - auto& pOverlap = dynamic_cast(overlap); + auto& pSource = dynamic_cast(source); - SAMRAI::hier::Transformation const& transformation = pOverlap.getTransformation(); - SAMRAI::hier::BoxContainer const& boxList = pOverlap.getDestinationBoxContainer(); - for (auto const& overlapBox : boxList) - { - copy_(overlapBox, pSource, transformation); - } + if (auto particleOverlap = dynamic_cast(&overlap)) + copy_from_ghost(pSource, *particleOverlap); + + else if (auto pOverlap = dynamic_cast(&overlap)) + copy_from_cell_overlap(pSource, *pOverlap); + + else + throw std::runtime_error("Unknown overlap type"); } @@ -298,42 +271,44 @@ namespace amr bool canEstimateStreamSizeFromBox() const override { return false; } + std::size_t getOutGoingDataStreamSize(ParticlesDomainOverlap const& pOverlap) const + { + auto& transformation = pOverlap.getTransformation(); + auto const& offset = as_point(transformation); + auto const& noffset = offset * -1; + std::size_t numberParticles = 0; + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + numberParticles += patchGhostParticles.nbr_particles_in( + shift(phare_box_from(overlapBox), noffset)); + return sizeof(std::size_t) + numberParticles * sizeof(Particle_t); + } + std::size_t getCellOverlapDataStreamSize(SAMRAI::pdat::CellOverlap const& pOverlap) const + { + return sizeof(std::size_t) + countNumberParticlesIn_(pOverlap) * sizeof(Particle_t); + } std::size_t getDataStreamSize(SAMRAI::hier::BoxOverlap const& overlap) const override { - auto const& pOverlap{dynamic_cast(overlap)}; + if (auto particleOverlap = dynamic_cast(&overlap)) + return getOutGoingDataStreamSize(*particleOverlap); - return countNumberParticlesIn_(pOverlap) * sizeof(Particle_t); + else if (auto pOverlap = dynamic_cast(&overlap)) + return getCellOverlapDataStreamSize(*pOverlap); + + else + throw std::runtime_error("Unknown overlap type"); } - /** - * @brief packStream is the function that takes particles from our particles arrays - * that lie in the boxes of the given overlap, and pack them to a stream. - * - * Streaming particles means that we have to take particles with iCell on a local source - * index space , communicate them, and load them at destination with iCell on a destination - * local index space. To do that we need to: - * - * 1- translate source iCell to source AMR index space - * 2- Apply the offset to shift this AMR index on top of the destination cells - * 3- pack and communicate particles - * 4- move back iCell from the shifted AMR index space to the local destination index space - * - * Note that step 2 could be done upon reception of the pack, we chose to do it before. - * - */ - void packStream(SAMRAI::tbox::MessageStream& stream, - SAMRAI::hier::BoxOverlap const& overlap) const override - { - PHARE_LOG_SCOPE(3, "ParticleData::packStream"); - - auto const& pOverlap{dynamic_cast(overlap)}; + void pack_from_ghost(SAMRAI::tbox::MessageStream&, ParticlesDomainOverlap const&) const; + void pack_from_cell_overlap(SAMRAI::tbox::MessageStream& stream, + SAMRAI::pdat::CellOverlap const& pOverlap) const + { std::vector outBuffer; if (pOverlap.isOverlapEmpty()) @@ -353,78 +328,123 @@ namespace amr } } - - /** - * @brief unpackStream is the function that unpacks a stream of particles to our particle - * arrays. + * @brief packStream is the function that takes particles from our particles arrays + * that lie in the boxes of the given overlap, and pack them to a stream. * - * We get a stream and an overlap. The overlap contains boxes where to put particles and - * transformation from source to destination AMR indexes. + * Streaming particles means that we have to take particles with iCell on a local source + * index space , communicate them, and load them at destination with iCell on a + * destination local index space. To do that we need to: + * + * 1- translate source iCell to source AMR index space + * 2- Apply the offset to shift this AMR index on top of the destination cells + * 3- pack and communicate particles + * 4- move back iCell from the shifted AMR index space to the local destination index + * space * - * By convention chosen in patckStream, packed particles have their iCell in our AMR index - * space. This means that before putting them into our local arrays, we need to apply - * AMRToLocal() to get the proper shift to apply to them + * Note that step 2 could be done upon reception of the pack, we chose to do it before. * + * As for copy(), we can have two kinds of overlaps: + * - ParticlesDomainOverlap : for grabbing leaving domain particles + * - CellOverlap : copy as part of refinement operations */ - void unpackStream(SAMRAI::tbox::MessageStream& stream, - SAMRAI::hier::BoxOverlap const& overlap) override + void packStream(SAMRAI::tbox::MessageStream& stream, + SAMRAI::hier::BoxOverlap const& overlap) const override { - PHARE_LOG_SCOPE(3, "ParticleData::unpackStream"); + PHARE_LOG_SCOPE(3, "ParticleData::packStream"); - auto const& pOverlap{dynamic_cast(overlap)}; + if (auto particleOverlap = dynamic_cast(&overlap)) + { + pack_from_ghost(stream, *particleOverlap); + } + else if (auto pOverlap = dynamic_cast(&overlap)) + pack_from_cell_overlap(stream, *pOverlap); + else + throw std::runtime_error("Unknown overlap type"); + } + + + + + void unpack_from_ghost(SAMRAI::tbox::MessageStream& stream, + ParticlesDomainOverlap const& overlap); + void unpack_cell_overlap(SAMRAI::tbox::MessageStream& stream, + SAMRAI::pdat::CellOverlap const& pOverlap) + { if (!pOverlap.isOverlapEmpty()) { - // unpack particles into a particle array std::size_t numberParticles = 0; stream >> numberParticles; std::vector particleArray(numberParticles); stream.unpack(particleArray.data(), numberParticles); - // ok now our goal is to put the particles we have just unpacked - // into the particleData and in the proper particleArray : interior or ghost SAMRAI::hier::Transformation const& transformation = pOverlap.getTransformation(); if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) { - // we loop over all boxes in the overlap - // we have to first take the intersection of each of these boxes - // with our ghostBox. This is where unpacked particles should go. - SAMRAI::hier::BoxContainer const& overlapBoxes = pOverlap.getDestinationBoxContainer(); - auto myBox = getBox(); - auto myGhostBox = getGhostBox(); - for (auto const& overlapBox : overlapBoxes) { - // our goal here is : - // 1/ to check if each particle is in the intersect of the overlap boxes - // and our ghostBox 2/ if yes, check if these particles should go within the - // interior array or ghost array + // note that we intersect the overlap box with the *ghost* box + // and not with the box although in the code, we never fill + // the patch ghost layer with particles (the level ghost layer + // is filled with particles but that is done in the refinement op). + // The reason for taking the ghost box YET putting the particles + // in the domain particle array is that SAMRAI may ask us to stream + // particles from a distant patch into a local temporary patch + // whost ghost box extends over the source data selection box. + // particles falling into our "ghost" layer here are thus not really + // ghost particles so they are just put in domain. + // Consistently, the ParticleRefineOperator will only look for + // particles to split from the domain particle array + // + // Note: see issue #1026 this intersection and check with isInBox + // may not be useful if particles all fall into the domain anyway auto const intersect = getGhostBox() * overlapBox; for (auto const& particle : particleArray) - { if (isInBox(intersect, particle)) - { - if (isInBox(myBox, particle)) - { - domainParticles.push_back(particle); - } - else - { - patchGhostParticles.push_back(particle); - } - } - } // end species loop + domainParticles.push_back(particle); + } // end box loop } // end no rotation } // end overlap not empty } + /** + * @brief unpackStream is the function that unpacks a stream of particles to our + * domain particle array + * + * We get a stream and an overlap. The overlap contains boxes where to put particles and + * transformation from source to destination AMR indexes. + * + * By convention chosen in patckStream, packed particles have their iCell in our AMR + * index space since we are the destination. + * + * like for packStream, we can have two kinds of overlaps: + * - ParticlesDomainOverlap : for unpacking leaving domain particles + * - CellOverlap : unpacking as part of refinement operations + * + */ + void unpackStream(SAMRAI::tbox::MessageStream& stream, + SAMRAI::hier::BoxOverlap const& overlap) override + { + PHARE_LOG_SCOPE(3, "ParticleData::unpackStream"); + + if (auto* particleOverlap = dynamic_cast(&overlap)) + unpack_from_ghost(stream, *particleOverlap); + + else if (auto const* pOverlap + = dynamic_cast(&overlap)) + unpack_cell_overlap(stream, *pOverlap); + + else + throw std::runtime_error("Unknown overlap type"); + } + core::ParticlesPack* getPointer() { return &pack; } @@ -446,9 +466,9 @@ namespace amr private: - //! interiorLocalBox_ is the box, in local index space, that goes from the first to the last - //! cell in our patch physical domain, i.e. "from dual physical start index to dual physical - //! end index" + //! interiorLocalBox_ is the box, in local index space, that goes from the first to the + //! last cell in our patch physical domain, i.e. "from dual physical start index to dual + //! physical end index" SAMRAI::hier::Box interiorLocalBox_; std::string name_; @@ -461,12 +481,12 @@ namespace amr // first copy particles that fall into our domain array // they can come from the source domain or patch ghost - auto destBox = myDomainBox * overlapBox; - auto new_size = domainParticles.size(); + auto const destBox = myDomainBox * overlapBox; + auto new_size = domainParticles.size(); if (!destBox.empty()) { - auto destBox_p = phare_box_from(destBox); + auto const destBox_p = phare_box_from(destBox); new_size += srcDomainParticles.nbr_particles_in(destBox_p); if (domainParticles.capacity() < new_size) domainParticles.reserve(new_size); @@ -481,7 +501,7 @@ namespace amr SAMRAI::hier::BoxContainer ghostLayerBoxes{}; ghostLayerBoxes.removeIntersections(overlapBox, myDomainBox); - new_size = patchGhostParticles.size(); + new_size = domainParticles.size(); for (auto& selectionBox : ghostLayerBoxes) { if (!selectionBox.empty()) @@ -490,8 +510,8 @@ namespace amr new_size += srcDomainParticles.nbr_particles_in(selectionBox_p); } } - if (patchGhostParticles.capacity() < new_size) - patchGhostParticles.reserve(new_size); + if (domainParticles.capacity() < new_size) + domainParticles.reserve(new_size); for (auto const& selectionBox : ghostLayerBoxes) @@ -499,7 +519,7 @@ namespace amr if (!selectionBox.empty()) { auto selectionBox_p = phare_box_from(selectionBox); - srcDomainParticles.export_particles(selectionBox_p, patchGhostParticles); + srcDomainParticles.export_particles(selectionBox_p, domainParticles); } } PHARE_LOG_STOP(3, "ParticlesData::copy_ DomainToGhosts"); @@ -560,7 +580,7 @@ namespace amr SAMRAI::hier::BoxContainer ghostLayerBoxes{}; ghostLayerBoxes.removeIntersections(overlapBox, myDomainBox); - new_size = patchGhostParticles.size(); + new_size = domainParticles.size(); for (auto& selectionBox : ghostLayerBoxes) { if (!selectionBox.empty()) @@ -570,8 +590,8 @@ namespace amr new_size += srcDomainParticles.nbr_particles_in(selectionBox_p); } } - if (patchGhostParticles.capacity() < new_size) - patchGhostParticles.reserve(new_size); + if (domainParticles.capacity() < new_size) + domainParticles.reserve(new_size); // ghostLayer boxes already have been inverse transformed @@ -581,8 +601,7 @@ namespace amr if (!selectionBox.empty()) { auto selectionBox_p = phare_box_from(selectionBox); - srcDomainParticles.export_particles(selectionBox_p, patchGhostParticles, - offseter); + srcDomainParticles.export_particles(selectionBox_p, domainParticles, offseter); } } @@ -671,8 +690,101 @@ namespace amr } } }; -} // namespace amr + +} // namespace amr } // namespace PHARE + +namespace PHARE::amr +{ + +template +template +void ParticlesData::copy_from_ghost(Args&&... args) +{ + PHARE_LOG_SCOPE(3, "ParticlesData::copy_from_ghost"); + + auto&& [pSource, pOverlap] = std::forward_as_tuple(args...); + auto& src_particles = pSource.patchGhostParticles; + auto& dst_particles = domainParticles; + auto const& offset = as_point(pOverlap.getTransformation()); + auto const& noffset = offset * -1; + + auto const offsetToDest = [&](auto const& particle) { + auto shiftedParticle{particle}; + for (std::size_t idir = 0; idir < dim; ++idir) + shiftedParticle.iCell[idir] += offset[idir]; + return shiftedParticle; + }; + // we shift the overlap box to the our array index space since it is given + // in the destinaton index space. + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + src_particles.export_particles(shift(phare_box_from(overlapBox), noffset), + dst_particles, offsetToDest); +} + + + +template +void ParticlesData::pack_from_ghost(SAMRAI::tbox::MessageStream& stream, + ParticlesDomainOverlap const& pOverlap) const +{ + PHARE_LOG_SCOPE(3, "ParticlesData::pack_from_ghost"); + + if (pOverlap.isOverlapEmpty()) + { + constexpr std::size_t zero = 0; + stream << zero; + return; + } + + std::vector outBuffer; + auto& src_particles = patchGhostParticles; + auto const& offset = as_point(pOverlap.getTransformation()); + auto const& noffset = offset * -1; + + auto const offsetToDest = [&](auto const& particle) { + auto shiftedParticle{particle}; + for (std::size_t idir = 0; idir < dim; ++idir) + shiftedParticle.iCell[idir] += offset[idir]; + return shiftedParticle; + }; + + // we shift the overlap box to the our array index space since it is given + // in the destinaton index space. + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + src_particles.export_particles(shift(phare_box_from(overlapBox), noffset), outBuffer, + offsetToDest); + + stream << outBuffer.size(); + stream.growBufferAsNeeded(); + stream.pack(outBuffer.data(), outBuffer.size()); +} + +// The overlap is not needed here as the pack selects only from the desired overlap +// and the transform if applicable is performed during packing +template +void ParticlesData::unpack_from_ghost(SAMRAI::tbox::MessageStream& stream, + ParticlesDomainOverlap const& /*pOverlap*/) +{ + PHARE_LOG_SCOPE(3, "ParticlesData::unpack_from_ghost"); + + std::size_t numberParticles = 0; + stream >> numberParticles; + std::vector particleArray(numberParticles); + stream.unpack(particleArray.data(), numberParticles); + + domainParticles.reserve(domainParticles.size() + numberParticles); + // we disregard the overlap boxes in this function + // contrary to unpack_cell_overlap. + // the reason is that we only get here when we're unpacking + // particles that are leaving neighbor domain into and so they + // must be in the domain box, no need to check. + for (auto const& p : particleArray) + domainParticles.push_back(p); +} + +} // namespace PHARE::amr + #endif diff --git a/src/amr/data/particles/particles_variable_fill_pattern.hpp b/src/amr/data/particles/particles_variable_fill_pattern.hpp new file mode 100644 index 000000000..dc1b836c3 --- /dev/null +++ b/src/amr/data/particles/particles_variable_fill_pattern.hpp @@ -0,0 +1,138 @@ +#ifndef PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_HPP +#define PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_HPP + +#include "core/def/phare_mpi.hpp" +#include + +#include +#include +#include +#include +#include "SAMRAI/xfer/VariableFillPattern.h" + +#include +#include + +namespace PHARE::amr +{ + +/** ParticlesDomainOverlap is used as a signal in particles_data.hpp + that we are performing an export from the patch ghost layer + of one patch, to the domain of adjacent patch which is the analogue + of the original patch ghost layer + */ +class ParticlesDomainOverlap : public SAMRAI::pdat::CellOverlap +{ + using Super = SAMRAI::pdat::CellOverlap; + +public: + ParticlesDomainOverlap(SAMRAI::hier::BoxContainer const& boxes, + SAMRAI::hier::Transformation const& transformation) + : Super{boxes, transformation} + { + } + + ~ParticlesDomainOverlap() = default; +}; + + + +/** + * \brief VariableFillPattern that is used to grab particles leaving neighboring patches + * + * This pattern is only use to grab incoming particles and not in refinement operations. + * Thus, only calculateOverlap is implemented. computeFillBoxesOverlap, only used in refinement + * is not to be used. + * + * Leaving neighbor particles will be searched in a layer around neighbor patch domain. + * Typically, because a particle should not travel more than one cell in one time step + * this layer should be one cell wide. + * + * Here, we compute the overlap using the particle data geometry, which is the CellGeometry + * This overlap will be the intersection of the source box with the destination ghost box. + * + * What we want is the opposite, we want to take leaving particles from the source GHOST box + * that are found in the destination box. + * + * We thus grow the overlap by some amount to extend beyond the source box + * and then we intersect with the destination box, to exclude the destination ghost layer. + * + * As explained above, the overlap could probably be grown by only one cell. + * Here we use the particle ghost width defined in the GridLayout_t. + */ +template +class ParticleDomainFromGhostFillPattern : public SAMRAI::xfer::VariableFillPattern +{ + std::size_t constexpr static dim = GridLayout_t::dimension; + bool constexpr static overwrite_interior = false; + +public: + ParticleDomainFromGhostFillPattern() {} + + virtual ~ParticleDomainFromGhostFillPattern() {} + + std::shared_ptr + calculateOverlap(SAMRAI::hier::BoxGeometry const& dst_geometry, + SAMRAI::hier::BoxGeometry const& src_geometry, + SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, + SAMRAI::hier::Box const& fill_box, bool const fn_overwrite_interior, + SAMRAI::hier::Transformation const& transformation) const override + { + PHARE_LOG_SCOPE(3, "ParticleDomainFromGhostFillPattern::calculateOverlap"); +#ifndef DEBUG_CHECK_DIM_ASSERTIONS + NULL_USE(dst_patch_box); +#endif + TBOX_ASSERT_OBJDIM_EQUALITY2(dst_patch_box, src_mask); + + auto basic_overlap = dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, + overwrite_interior, transformation); + + auto& cell_overlap = dynamic_cast(*basic_overlap); + + SAMRAI::hier::BoxContainer boxes; + for (auto const& box : cell_overlap.getDestinationBoxContainer()) + { + auto const ghost_overlap + = grow(phare_box_from(box), GridLayout_t::nbrParticleGhosts()); + auto const domain_overlap = ghost_overlap * phare_box_from(dst_patch_box); + boxes.pushBack(samrai_box_from(*domain_overlap)); + } + + return std::make_shared(boxes, cell_overlap.getTransformation()); + } + + std::string const& getPatternName() const override { return s_name_id; } + +private: + ParticleDomainFromGhostFillPattern(ParticleDomainFromGhostFillPattern const&) = delete; + ParticleDomainFromGhostFillPattern& operator=(ParticleDomainFromGhostFillPattern const&) + = delete; + + static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; + + SAMRAI::hier::IntVector const& getStencilWidth() override + { + TBOX_ERROR("getStencilWidth() should not be\n" + << "called. This pattern creates overlaps based on\n" + << "the BoxGeometry objects and is not restricted to a\n" + << "specific stencil.\n"); + + return SAMRAI::hier::IntVector::getZero(SAMRAI::tbox::Dimension(1)); + } + + + std::shared_ptr + computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, + SAMRAI::hier::BoxContainer const& node_fill_boxes, + SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, + SAMRAI::hier::PatchDataFactory const& pdf) const override + { + PHARE_LOG_SCOPE(2, "ParticleDomainFromGhostFillPattern::computeFillBoxesOverlap"); + + throw std::runtime_error("no refinement supported or expected"); + } +}; + +} // namespace PHARE::amr + +#endif /* PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_H */ diff --git a/src/amr/data/particles/refine/particles_data_split.hpp b/src/amr/data/particles/refine/particles_data_split.hpp index 90c62cc9c..bb01f9fe2 100644 --- a/src/amr/data/particles/refine/particles_data_split.hpp +++ b/src/amr/data/particles/refine/particles_data_split.hpp @@ -143,21 +143,25 @@ namespace amr SAMRAI::pdat::CellOverlap const& destFieldOverlap) const { // the source PatchData is a possible restriction of a "real" patchdata - // so that it is the closest from the destination boxes - // if all particles from the original source patchdata are in "domainParticles" - // they can now be found in either domain of ghost particle arrays of this - // temporary restriction "source" patchData - // therefore we need references to the domain and ghost particle arrays + // (typically if the original patchdata is on a distant MPI rank, the one we are + // given would b a copy of the data in the region of interest only) + // particles to be split only ever come from domain array + // even if they are from a temporary patchdata created by streaming + // remote particles locally. This is to be consistent with + // ParticleData::unpack_cell_overlap which only puts particle in domain array. auto const& srcInteriorParticles = srcParticlesData.domainParticles; - auto const& srcGhostParticles = srcParticlesData.patchGhostParticles; // the particle refine operator's job is to fill either domain (during initialization of // new patches) or coarse to fine boundaries (during advance), so we need references to - // these arrays on the destination. We don't fill ghosts with this operator, they are - // filled from exchanging with neighbor patches. - auto const& destBoxes = destFieldOverlap.getDestinationBoxContainer(); + // these arrays on the destination. We don't fill patch ghost particles with this + // operator + auto const& destBoxes = destFieldOverlap.getDestinationBoxContainer(); + + // used when initializing a new patch + auto& destDomainParticles = destParticlesData.domainParticles; + + // used when filling level ghost boundaries auto& destCoarseBoundaryParticles = destParticlesData.levelGhostParticles; - auto& destDomainParticles = destParticlesData.domainParticles; auto& destCoarseBoundaryOldParticles = destParticlesData.levelGhostParticlesOld; auto& destCoarseBoundaryNewParticles = destParticlesData.levelGhostParticlesNew; @@ -173,79 +177,72 @@ namespace amr // in case of interior, this will be just one box usually for (auto const& destinationBox : destBoxes) { - std::array particlesArrays{&srcInteriorParticles, &srcGhostParticles}; - auto splitBox = getSplitBox(destinationBox); + auto const splitBox = getSplitBox(destinationBox); - auto isInDest = [&destinationBox](auto const& particle) // - { return isInBox(destinationBox, particle); }; + auto const isInDest = [&destinationBox](auto const& particle) { + return isInBox(destinationBox, particle); + }; - for (auto const& sourceParticlesArray : particlesArrays) + for (auto const& particle : srcInteriorParticles) { - for (auto const& particle : *sourceParticlesArray) - { - std::array - refinedParticles; - auto particleRefinedPos = toFineGrid(particle); + std::array refinedParticles; + auto particleRefinedPos = toFineGrid(particle); - if (isInBox(splitBox, particleRefinedPos)) - { - split(particleRefinedPos, refinedParticles); + if (isInBox(splitBox, particleRefinedPos)) + { + split(particleRefinedPos, refinedParticles); - // we need to know in which of interior or levelGhostParticlesXXXX - // arrays we must put particles + // we need to know in which of interior or levelGhostParticlesXXXX + // arrays we must put particles - bool constexpr putParticlesInCoarseBoundary - = splitType == ParticlesDataSplitType::coarseBoundary - || splitType == ParticlesDataSplitType::coarseBoundaryOld - || splitType == ParticlesDataSplitType::coarseBoundaryNew; + bool constexpr putParticlesInCoarseBoundary + = splitType == ParticlesDataSplitType::coarseBoundary + || splitType == ParticlesDataSplitType::coarseBoundaryOld + || splitType == ParticlesDataSplitType::coarseBoundaryNew; - if constexpr (putParticlesInCoarseBoundary) + if constexpr (putParticlesInCoarseBoundary) + { + if constexpr (splitType == ParticlesDataSplitType::coarseBoundary) { - if constexpr (splitType == ParticlesDataSplitType::coarseBoundary) - { - /*std::cout << "copying " << refinedParticles.size() - << " particles into levelGhost\n";*/ - std::copy_if( - std::begin(refinedParticles), std::end(refinedParticles), - std::back_inserter(destCoarseBoundaryParticles), isInDest); - } - else if constexpr (splitType - == ParticlesDataSplitType::coarseBoundaryOld) - { - /*std::cout << "copying " << refinedParticles.size() - << " particles into levelGhostOld\n";*/ - std::copy_if(std::begin(refinedParticles), - std::end(refinedParticles), - std::back_inserter(destCoarseBoundaryOldParticles), - isInDest); - } - else // splitType is coarseBoundaryNew - { - /*std::cout << "copying " << refinedParticles.size() - << " particles into levelGhostNew\n";*/ - std::copy_if(std::begin(refinedParticles), - std::end(refinedParticles), - std::back_inserter(destCoarseBoundaryNewParticles), - isInDest); - } + /*std::cout << "copying " << refinedParticles.size() + << " particles into levelGhost\n";*/ + std::copy_if( + std::begin(refinedParticles), std::end(refinedParticles), + std::back_inserter(destCoarseBoundaryParticles), isInDest); } - - else + else if constexpr (splitType + == ParticlesDataSplitType::coarseBoundaryOld) + { + /*std::cout << "copying " << refinedParticles.size() + << " particles into levelGhostOld\n";*/ + std::copy_if( + std::begin(refinedParticles), std::end(refinedParticles), + std::back_inserter(destCoarseBoundaryOldParticles), isInDest); + } + else // splitType is coarseBoundaryNew { /*std::cout << "copying " << refinedParticles.size() - << " particles into domain\n";*/ - std::copy_if(std::begin(refinedParticles), - std::end(refinedParticles), - std::back_inserter(destDomainParticles), isInDest); + << " particles into levelGhostNew\n";*/ + std::copy_if( + std::begin(refinedParticles), std::end(refinedParticles), + std::back_inserter(destCoarseBoundaryNewParticles), isInDest); } - } // end is candidate for split - } // end loop on particles - } // end loop on source particle arrays - } // loop on destination box + } + + else + { + /*std::cout << "copying " << refinedParticles.size() + << " particles into domain\n";*/ + std::copy_if(std::begin(refinedParticles), std::end(refinedParticles), + std::back_inserter(destDomainParticles), isInDest); + } + } // end is candidate for split + } // end loop on source particle arrays + } // loop on destination box } diff --git a/src/amr/level_initializer/hybrid_level_initializer.hpp b/src/amr/level_initializer/hybrid_level_initializer.hpp index 3e112a34e..90f1c07d6 100644 --- a/src/amr/level_initializer/hybrid_level_initializer.hpp +++ b/src/amr/level_initializer/hybrid_level_initializer.hpp @@ -79,25 +79,37 @@ namespace solver } } - // now all particles are here - // we must compute moments. + // now all particles are here, we must compute moments. + auto& ions = hybridModel.state.ions; + auto& rm = *hybridModel.resourcesManager; - for (auto& patch : level) + for (auto& patch : rm.enumerate(level, ions)) { - auto& ions = hybridModel.state.ions; - auto& resourcesManager = hybridModel.resourcesManager; - auto dataOnPatch = resourcesManager->setOnPatch(*patch, ions); - auto layout = amr::layoutFromPatch(*patch); - + auto layout = amr::layoutFromPatch(*patch); core::resetMoments(ions); core::depositParticles(ions, layout, interpolate_, core::DomainDeposit{}); - core::depositParticles(ions, layout, interpolate_, core::PatchGhostDeposit{}); + } + + // at this point flux and density is computed for all pops + // but nodes on ghost box overlaps are not complete because they lack + // contribution of neighbor particles. + // The following two calls will += flux and density on these overlaps. + hybMessenger.fillFluxBorders(ions, level, initDataTime); + hybMessenger.fillDensityBorders(ions, level, initDataTime); + // the only remaning incomplete nodes are those next to and on level ghost layers + // we now complete them by depositing levelghost particles + for (auto& patch : rm.enumerate(level, ions)) + { if (!isRootLevel(levelNumber)) { + auto layout = amr::layoutFromPatch(*patch); core::depositParticles(ions, layout, interpolate_, core::LevelGhostDeposit{}); } + + // now all nodes are complete, the total ion moments + // can safely be computed. ions.computeChargeDensity(); ions.computeBulkVelocity(); } @@ -110,7 +122,7 @@ namespace solver // is not needed. But is still seems to use the messenger temporaries like // NiOld etc. so prepareStep() must be called, see end of the function. // - TODO more better comment(s) - hybMessenger.fillIonMomentGhosts(hybridModel.state.ions, level, initDataTime); + hybMessenger.fillIonMomentGhosts(ions, level, initDataTime); // now moments are known everywhere, compute J and E diff --git a/src/amr/messengers/communicator.hpp b/src/amr/messengers/communicator.hpp index 821e8a54d..df16eb24d 100644 --- a/src/amr/messengers/communicator.hpp +++ b/src/amr/messengers/communicator.hpp @@ -52,6 +52,11 @@ namespace amr public: + Communicator() {} + virtual ~Communicator() {} + Communicator(Communicator const&) = delete; + Communicator(Communicator&&) = default; + // we have an algorithm for each quantity, like Bx, By, Bz // even if they are to be refined/synced together. // the reason is that SAMRAI assumes that all Variables registered diff --git a/src/amr/messengers/field_sum_transaction.hpp b/src/amr/messengers/field_sum_transaction.hpp new file mode 100644 index 000000000..d28635244 --- /dev/null +++ b/src/amr/messengers/field_sum_transaction.hpp @@ -0,0 +1,242 @@ +#ifndef PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP +#define PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP + +#include + +#include +#include +#include +#include + +#include + +namespace PHARE::amr +{ + + +/** * @brief FieldBorderSumTransaction is used to += pop density and flux on ghost box overlaps + * + * A FieldBorderSumTransaction is a SAMRAI Transaction created by the + * FieldBorderSumTransactionFactory provided (via createShedule) to schedules that accumulate + * incomplete density and flux on ghost box overlaps. + * + * Due to the lack of neighbor particle contributions, some domain nodes and ghost nodes + * have incomplete moments after deposition. The complement of these nodes is what has + * been deposited on (also incomplete) neighbor nodes. + * + * Default SAMRAI transaction calls PatchData::copy and PatchData::packStream + * This transaction defines these override to these methods to call specific methods + * of FieldData to perform the += instead of =. + * These methods are copyAndSum and unpackStreamAndSum. + * + */ +template +class FieldBorderSumTransaction : public SAMRAI::tbox::Transaction +{ +public: + FieldBorderSumTransaction(std::shared_ptr const& dst_level, + std::shared_ptr const& src_level, + std::shared_ptr const& overlap, + SAMRAI::hier::Box const& dst_node, SAMRAI::hier::Box const& src_node, + SAMRAI::xfer::RefineClasses::Data const** refine_data, int item_id) + : d_dst_level(dst_level) + , d_src_level(src_level) + , d_overlap(overlap) + , d_dst_node(dst_node) + , d_src_node(src_node) + , d_refine_data(refine_data) + , d_item_id(item_id) + , d_incoming_bytes(0) + , d_outgoing_bytes(0) + { + TBOX_ASSERT(dst_level); + TBOX_ASSERT(src_level); + TBOX_ASSERT(overlap); + TBOX_ASSERT(dst_node.getLocalId() >= 0); + TBOX_ASSERT(src_node.getLocalId() >= 0); + TBOX_ASSERT(item_id >= 0); + TBOX_ASSERT(refine_data[item_id] != 0); + + TBOX_ASSERT_OBJDIM_EQUALITY4(*dst_level, *src_level, dst_node, src_node); + } + + virtual ~FieldBorderSumTransaction() {} + + + virtual bool canEstimateIncomingMessageSize(); + + virtual size_t computeIncomingMessageSize(); + + virtual size_t computeOutgoingMessageSize(); + + virtual int getSourceProcessor(); + + virtual int getDestinationProcessor(); + + virtual void packStream(SAMRAI::tbox::MessageStream& stream); + + virtual void unpackStream(SAMRAI::tbox::MessageStream& stream); + + virtual void copyLocalData(); + + virtual void printClassData(std::ostream& stream) const; + +private: + std::shared_ptr d_dst_level; + std::shared_ptr d_src_level; + std::shared_ptr d_overlap; + SAMRAI::hier::Box d_dst_node; + SAMRAI::hier::Box d_src_node; + SAMRAI::xfer::RefineClasses::Data const** d_refine_data; + int d_item_id; + size_t d_incoming_bytes; + size_t d_outgoing_bytes; +}; + + +template +bool FieldBorderSumTransaction::canEstimateIncomingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::canEstimateIncomingMessageSize"); + bool can_estimate = false; + if (getSourceProcessor() == d_src_level->getBoxLevel()->getMPI().getRank()) + { + can_estimate = d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->canEstimateStreamSizeFromBox(); + } + else + { + can_estimate = d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch) + ->canEstimateStreamSizeFromBox(); + } + return can_estimate; +} + + +template +size_t FieldBorderSumTransaction::computeIncomingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::computeIncomingMessageSize"); + d_incoming_bytes = d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch) + ->getDataStreamSize(*d_overlap); + return d_incoming_bytes; +} + +template +size_t FieldBorderSumTransaction::computeOutgoingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::computeOutgoingMessageSize"); + d_outgoing_bytes = d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->getDataStreamSize(*d_overlap); + return d_outgoing_bytes; +} + +template +int FieldBorderSumTransaction::getSourceProcessor() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::getSourceProcessor"); + return d_src_node.getOwnerRank(); +} + +template +int FieldBorderSumTransaction::getDestinationProcessor() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::getDestinationProcessor"); + return d_dst_node.getOwnerRank(); +} + +template +void FieldBorderSumTransaction::packStream(SAMRAI::tbox::MessageStream& stream) +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::packStream"); + d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->packStream(stream, *d_overlap); +} + +template +void FieldBorderSumTransaction::unpackStream(SAMRAI::tbox::MessageStream& stream) +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::unpackStream"); + std::shared_ptr onode_dst_data( + SAMRAI_SHARED_PTR_CAST( + d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch))); + TBOX_ASSERT(onode_dst_data); + + onode_dst_data->unpackStreamAndSum(stream, *d_overlap); +} + + +template +void FieldBorderSumTransaction::printClassData(std::ostream& stream) const +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::printClassData"); + throw std::runtime_error("FieldBorderSumTransaction::printClassData!"); +} + +template +void FieldBorderSumTransaction::copyLocalData() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::copyLocalData"); + std::shared_ptr onode_dst_data( + SAMRAI_SHARED_PTR_CAST( + d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch))); + TBOX_ASSERT(onode_dst_data); + + std::shared_ptr onode_src_data( + SAMRAI_SHARED_PTR_CAST( + d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src))); + TBOX_ASSERT(onode_src_data); + + onode_dst_data->sum(*onode_src_data, *d_overlap); +} + + +template +class FieldBorderSumTransactionFactory : public SAMRAI::xfer::RefineTransactionFactory +{ +public: + std::shared_ptr + allocate(std::shared_ptr const& dst_level, + std::shared_ptr const& src_level, + std::shared_ptr const& overlap, + SAMRAI::hier::Box const& dst_node, SAMRAI::hier::Box const& src_node, + SAMRAI::xfer::RefineClasses::Data const** refine_data, int item_id, + SAMRAI::hier::Box const& box, bool use_time_interpolation) const override + { + NULL_USE(box); + NULL_USE(use_time_interpolation); + + TBOX_ASSERT(dst_level); + TBOX_ASSERT(src_level); + TBOX_ASSERT(overlap); + TBOX_ASSERT(dst_node.getLocalId() >= 0); + TBOX_ASSERT(src_node.getLocalId() >= 0); + TBOX_ASSERT(refine_data != 0); + TBOX_ASSERT_OBJDIM_EQUALITY4(*dst_level, *src_level, dst_node, src_node); + + PHARE_LOG_SCOPE(2, "FieldBorderSumTransactionFactory::allocate"); + return std::make_shared>( + dst_level, src_level, overlap, dst_node, src_node, refine_data, item_id); + } + + void + preprocessScratchSpace(std::shared_ptr const& level, double fill_time, + SAMRAI::hier::ComponentSelector const& preprocess_vector) const override + { + PHARE_LOG_SCOPE(2, "FieldBorderSumTransactionFactory::preprocessScratchSpace"); + + // noop + } +}; + +} // namespace PHARE::amr + +#endif // PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp index 5ba834534..a5966a015 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp @@ -5,45 +5,46 @@ #include "core/logger.hpp" #include "core/def/phare_mpi.hpp" -#include "SAMRAI/hier/CoarseFineBoundary.h" -#include "SAMRAI/hier/IntVector.h" -#include "core/utilities/index/index.hpp" + +#include "core/utilities/point/point.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/hybrid/hybrid_quantities.hpp" +#include "core/data/vecfield/vecfield_component.hpp" +#include "core/numerics/interpolator/interpolator.hpp" + #include "refiner_pool.hpp" #include "synchronizer_pool.hpp" -#include "amr/data/field/coarsening/default_field_coarsener.hpp" -#include "amr/data/field/coarsening/magnetic_field_coarsener.hpp" -#include "amr/data/field/refine/field_refiner.hpp" -#include "amr/data/field/refine/magnetic_field_refiner.hpp" -#include "amr/data/field/refine/electric_field_refiner.hpp" -#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" -#include "amr/data/field/coarsening/field_coarsen_operator.hpp" #include "amr/messengers/messenger_info.hpp" +#include "amr/resources_manager/amr_utils.hpp" +#include "amr/data/field/refine/field_refiner.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" #include "amr/messengers/hybrid_messenger_strategy.hpp" -#include "amr/resources_manager/amr_utils.hpp" #include "amr/data/field/refine/magnetic_refine_patch_strategy.hpp" -#include "core/numerics/interpolator/interpolator.hpp" -#include "core/hybrid/hybrid_quantities.hpp" -#include "core/data/particles/particle_array.hpp" -#include "core/data/vecfield/vecfield_component.hpp" -#include "core/data/vecfield/vecfield.hpp" -#include "core/utilities/point/point.hpp" - - +#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/refine/electric_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_field_refiner.hpp" +#include "amr/data/field/coarsening/field_coarsen_operator.hpp" +#include "amr/data/field/coarsening/default_field_coarsener.hpp" +#include "amr/data/field/coarsening/magnetic_field_coarsener.hpp" +#include "amr/data/particles/particles_variable_fill_pattern.hpp" +#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include "SAMRAI/xfer/RefineAlgorithm.h" -#include "SAMRAI/xfer/RefineSchedule.h" -#include "SAMRAI/xfer/BoxGeometryVariableFillPattern.h" +#include +#include +#include +#include +#include -#include -#include +#include +#include #include #include #include -#include +#include + namespace PHARE @@ -71,22 +72,23 @@ namespace amr template class HybridHybridMessengerStrategy : public HybridMessengerStrategy { - using GridT = typename HybridModel::grid_type; - using IonsT = typename HybridModel::ions_type; - using ElectromagT = typename HybridModel::electromag_type; - using VecFieldT = typename HybridModel::vecfield_type; - using GridLayoutT = typename HybridModel::gridlayout_type; - using FieldT = typename VecFieldT::field_type; + using GridT = HybridModel::grid_type; + using IonsT = HybridModel::ions_type; + using ElectromagT = HybridModel::electromag_type; + using VecFieldT = HybridModel::vecfield_type; + using TensorFieldT = IonsT::tensorfield_type; + using GridLayoutT = HybridModel::gridlayout_type; + using FieldT = VecFieldT::field_type; using FieldDataT = FieldData; - using ResourcesManagerT = typename HybridModel::resources_manager_type; - using IPhysicalModel = typename HybridModel::Interface; + using ResourcesManagerT = HybridModel::resources_manager_type; + using IPhysicalModel = HybridModel::Interface; static constexpr std::size_t dimension = GridLayoutT::dimension; static constexpr std::size_t interpOrder = GridLayoutT::interp_order; - using InteriorParticleRefineOp = typename RefinementParams::InteriorParticleRefineOp; - using CoarseToFineRefineOpOld = typename RefinementParams::CoarseToFineRefineOpOld; - using CoarseToFineRefineOpNew = typename RefinementParams::CoarseToFineRefineOpNew; + using InteriorParticleRefineOp = RefinementParams::InteriorParticleRefineOp; + using CoarseToFineRefineOpOld = RefinementParams::CoarseToFineRefineOpOld; + using CoarseToFineRefineOpNew = RefinementParams::CoarseToFineRefineOpNew; template using BaseRefineOp = FieldRefineOperator; @@ -114,6 +116,9 @@ namespace amr resourcesManager_->registerResources(Jold_); resourcesManager_->registerResources(NiOld_); resourcesManager_->registerResources(ViOld_); + resourcesManager_->registerResources(sumVec_); + resourcesManager_->registerResources(sumField_); + resourcesManager_->registerResources(sumTensor_); } virtual ~HybridHybridMessengerStrategy() = default; @@ -134,6 +139,9 @@ namespace amr resourcesManager_->allocate(Jold_, patch, allocateTime); resourcesManager_->allocate(NiOld_, patch, allocateTime); resourcesManager_->allocate(ViOld_, patch, allocateTime); + resourcesManager_->allocate(sumVec_, patch, allocateTime); + resourcesManager_->allocate(sumField_, patch, allocateTime); + resourcesManager_->allocate(sumTensor_, patch, allocateTime); } @@ -178,12 +186,6 @@ namespace amr Balgo.registerRefine(*by_id, *by_id, *by_id, BfieldRefineOp_, yVariableFillPattern); Balgo.registerRefine(*bz_id, *bz_id, *bz_id, BfieldRefineOp_, zVariableFillPattern); - BalgoNode.registerRefine(*bx_id, *bx_id, *bx_id, BfieldNodeRefineOp_, - xVariableFillPattern); - BalgoNode.registerRefine(*by_id, *by_id, *by_id, BfieldNodeRefineOp_, - yVariableFillPattern); - BalgoNode.registerRefine(*bz_id, *bz_id, *bz_id, BfieldNodeRefineOp_, - zVariableFillPattern); auto ex_id = resourcesManager_->getID(hybridInfo->modelElectric.xName); auto ey_id = resourcesManager_->getID(hybridInfo->modelElectric.yName); @@ -215,8 +217,7 @@ namespace amr { auto const level = hierarchy->getPatchLevel(levelNumber); - magSharedNodeRefineSchedules[levelNumber] - = BalgoNode.createSchedule(level, &magneticRefinePatchStrategy_); + magPatchGhostsRefineSchedules[levelNumber] = Balgo.createSchedule(level, &magneticRefinePatchStrategy_); @@ -226,17 +227,18 @@ namespace amr magGhostsRefineSchedules[levelNumber] = Balgo.createSchedule( level, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); - elecSharedNodesRefiners_.registerLevel(hierarchy, level); - currentSharedNodesRefiners_.registerLevel(hierarchy, level); elecGhostsRefiners_.registerLevel(hierarchy, level); currentGhostsRefiners_.registerLevel(hierarchy, level); - rhoGhostsRefiners_.registerLevel(hierarchy, level); velGhostsRefiners_.registerLevel(hierarchy, level); + domainGhostPartRefiners_.registerLevel(hierarchy, level); - patchGhostPartRefiners_.registerLevel(hierarchy, level); + for (auto& refiner : popFluxBorderSumRefiners_) + refiner.registerLevel(hierarchy, level); + for (auto& refiner : popDensityBorderSumRefiners_) + refiner.registerLevel(hierarchy, level); // root level is not initialized with a schedule using coarser level data // so we don't create these schedules if root level @@ -246,6 +248,8 @@ namespace amr // those are for refinement magInitRefineSchedules[levelNumber] = Balgo.createSchedule( level, nullptr, levelNumber - 1, hierarchy, &magneticRefinePatchStrategy_); + + electricInitRefiners_.registerLevel(hierarchy, level); domainParticlesRefiners_.registerLevel(hierarchy, level); lvlGhostPartOldRefiners_.registerLevel(hierarchy, level); @@ -273,12 +277,11 @@ namespace amr auto& hybridModel = dynamic_cast(model); auto level = hierarchy->getPatchLevel(levelNumber); - bool isRegriddingL0 = levelNumber == 0 and oldLevel; + bool const isRegriddingL0 = levelNumber == 0 and oldLevel; magneticRegriding_(hierarchy, level, oldLevel, hybridModel, initDataTime); electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); // regriding will fill the new level wherever it has points that overlap @@ -293,6 +296,7 @@ namespace amr if (!isRegriddingL0) { auto& E = hybridModel.state.electromag.E; + elecGhostsRefiners_.fill(E, levelNumber, initDataTime); } @@ -361,12 +365,6 @@ namespace amr PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); domainParticlesRefiners_.fill(levelNumber, initDataTime); PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); - // however we need to call the ghost communicator for patch ghost particles - // since the interior schedules have a restriction to the interior of the patch. - PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : patch ghost part fill schedule"); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); - PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : patch ghost part fill schedule"); - lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); @@ -389,7 +387,6 @@ namespace amr void fillElectricGhosts(VecFieldT& E, int const levelNumber, double const fillTime) override { PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillElectricGhosts"); - elecSharedNodesRefiners_.fill(E, levelNumber, fillTime); elecGhostsRefiners_.fill(E, levelNumber, fillTime); } @@ -399,7 +396,6 @@ namespace amr void fillCurrentGhosts(VecFieldT& J, int const levelNumber, double const fillTime) override { PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillCurrentGhosts"); - currentSharedNodesRefiners_.fill(J, levelNumber, fillTime); currentGhostsRefiners_.fill(J, levelNumber, fillTime); } @@ -416,15 +412,77 @@ namespace amr { PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonGhostParticles"); - for (auto patch : level) - { - auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); + domainGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); + + for (auto patch : resourcesManager_->enumerate(level, ions)) for (auto& pop : ions) - { pop.patchGhostParticles().clear(); - } + } + + + + void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) override + { + auto constexpr N = core::detail::tensor_field_dim_from_rank<1>(); + using value_type = FieldT::value_type; + + + // we cannot have the schedule doign the += in place in the flux array + // because some overlaps could be counted several times. + // we therefore first copy flux into a sumVec buffer and then + // execute the schedule onto that before copying it back onto the flux array + for (std::size_t i = 0; i < ions.size(); ++i) + { + for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(sumVec_[c].data(), ions[i].flux()[c].data(), + ions[i].flux()[c].size() * sizeof(value_type)); + + + popFluxBorderSumRefiners_[i].fill(level.getLevelNumber(), fillTime); + + for (auto patch : resourcesManager_->enumerate(level, ions, sumVec_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(ions[i].flux()[c].data(), sumVec_[c].data(), + ions[i].flux()[c].size() * sizeof(value_type)); + } + } + + void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) override + { + using value_type = FieldT::value_type; + + std::size_t const fieldsPerPop = popDensityBorderSumRefiners_.size() / ions.size(); + + for (std::size_t i = 0; i < ions.size(); ++i) + { + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(sumField_.data(), ions[i].particleDensity().data(), + ions[i].particleDensity().size() * sizeof(value_type)); + + + popDensityBorderSumRefiners_[i * fieldsPerPop].fill(level.getLevelNumber(), + fillTime); + + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(ions[i].particleDensity().data(), sumField_.data(), + ions[i].particleDensity().size() * sizeof(value_type)); + + // + + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(sumField_.data(), ions[i].chargeDensity().data(), + ions[i].chargeDensity().size() * sizeof(value_type)); + + popDensityBorderSumRefiners_[i * fieldsPerPop + 1].fill(level.getLevelNumber(), + fillTime); + + for (auto patch : resourcesManager_->enumerate(level, ions, sumField_)) + std::memcpy(ions[i].chargeDensity().data(), sumField_.data(), + ions[i].chargeDensity().size() * sizeof(value_type)); } - patchGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); } @@ -433,15 +491,14 @@ namespace amr /** * @brief fillIonPopMomentGhosts works on moment ghost nodes * - * patch border node moments are completed by the deposition of patch ghost - * particles for all populations level border nodes are completed by the deposition + * level border nodes are completed by the deposition * of level ghost [old,new] particles for all populations, linear time interpolation * is used to get the contribution of old/new particles */ void fillIonPopMomentGhosts(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const afterPushTime) override { - PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); + PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonPopMomentGhosts"); auto alpha = timeInterpCoef_(afterPushTime, level.getLevelNumber()); if (level.getLevelNumber() > 0 and (alpha < 0 or alpha > 1)) @@ -459,23 +516,21 @@ namespace amr for (auto& pop : ions) { - // first thing to do is to project patchGhostParitcles moments - auto& patchGhosts = pop.patchGhostParticles(); - auto& particleDensity = pop.particleDensity(); - auto& chargeDensity = pop.chargeDensity(); - auto& flux = pop.flux(); - - interpolate_(makeRange(patchGhosts), particleDensity, chargeDensity, flux, layout); + auto& particleDensity = pop.particleDensity(); + auto& chargeDensity = pop.chargeDensity(); + auto& flux = pop.flux(); if (level.getLevelNumber() > 0) // no levelGhost on root level { // then grab levelGhostParticlesOld and levelGhostParticlesNew // and project them with alpha and (1-alpha) coefs, respectively auto& levelGhostOld = pop.levelGhostParticlesOld(); - interpolate_(makeRange(levelGhostOld), particleDensity, chargeDensity, flux, layout, 1. - alpha); + interpolate_(makeRange(levelGhostOld), particleDensity, chargeDensity, flux, + layout, 1. - alpha); auto& levelGhostNew = pop.levelGhostParticlesNew(); - interpolate_(makeRange(levelGhostNew), particleDensity, chargeDensity, flux, layout, alpha); + interpolate_(makeRange(levelGhostNew), particleDensity, chargeDensity, flux, + layout, alpha); } } } @@ -490,6 +545,7 @@ namespace amr virtual void fillIonMomentGhosts(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const afterPushTime) override { + PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); rhoGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); velGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); } @@ -615,11 +671,7 @@ namespace amr auto& hybridModel = static_cast(model); - elecSharedNodesRefiners_.fill(hybridModel.state.electromag.E, levelNumber, - initDataTime); - elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, initDataTime); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); // at some point in the future levelGhostParticles could be filled with injected // particles depending on the domain boundary condition. @@ -667,8 +719,6 @@ namespace amr PHARE_LOG_LINE_STR("postSynchronize level " + std::to_string(levelNumber)) - magSharedNodeRefineSchedules[levelNumber]->fillData(time); - elecSharedNodesRefiners_.fill(hybridModel.state.electromag.E, levelNumber, time); // we fill magnetic field ghosts only on patch ghost nodes and not on level // ghosts the reason is that 1/ filling ghosts is necessary to prevent mismatch @@ -684,37 +734,32 @@ namespace amr } private: - void registerGhostComms_(std::unique_ptr const& info) + auto makeKeys(auto const& vecFieldNames) { - auto makeKeys = [](auto const& vecFieldNames) { - std::vector keys; - std::transform(std::begin(vecFieldNames), std::end(vecFieldNames), - std::back_inserter(keys), [](auto const& d) { return d.vecName; }); - return keys; - }; - - elecSharedNodesRefiners_.addStaticRefiners(info->ghostElectric, EfieldNodeRefineOp_, - makeKeys(info->ghostElectric)); + std::vector keys; + std::transform(std::begin(vecFieldNames), std::end(vecFieldNames), + std::back_inserter(keys), [](auto const& d) { return d.vecName; }); + return keys; + }; + void registerGhostComms_(std::unique_ptr const& info) + { elecGhostsRefiners_.addStaticRefiners(info->ghostElectric, EfieldRefineOp_, - makeKeys(info->ghostElectric)); - - currentSharedNodesRefiners_.addTimeRefiners(info->ghostCurrent, info->modelCurrent, - core::VecFieldNames{Jold_}, - EfieldNodeRefineOp_, fieldTimeOp_); + makeKeys(info->ghostElectric), + defaultFieldFillPattern); currentGhostsRefiners_.addTimeRefiners(info->ghostCurrent, info->modelCurrent, core::VecFieldNames{Jold_}, EfieldRefineOp_, - fieldTimeOp_); + fieldTimeOp_, defaultFieldFillPattern); rhoGhostsRefiners_.addTimeRefiner(info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, fieldTimeOp_, - info->modelIonDensity); + info->modelIonDensity, defaultFieldFillPattern); velGhostsRefiners_.addTimeRefiners(info->ghostBulkVelocity, info->modelIonBulkVelocity, core::VecFieldNames{ViOld_}, fieldRefineOp_, - fieldTimeOp_); + fieldTimeOp_, defaultFieldFillPattern); } @@ -722,13 +767,6 @@ namespace amr void registerInitComms(std::unique_ptr const& info) { - auto makeKeys = [](auto const& descriptor) { - std::vector keys; - std::transform(std::begin(descriptor), std::end(descriptor), - std::back_inserter(keys), [](auto const& d) { return d.vecName; }); - return keys; - }; - electricInitRefiners_.addStaticRefiners(info->initElectric, EfieldRefineOp_, makeKeys(info->initElectric)); @@ -747,10 +785,26 @@ namespace amr info->levelGhostParticlesNew); - patchGhostPartRefiners_.addStaticRefiners(info->patchGhostParticles, nullptr, - info->patchGhostParticles); - } + domainGhostPartRefiners_.addStaticRefiners( + info->patchGhostParticles, nullptr, info->patchGhostParticles, + std::make_shared>()); + + for (auto const& vecfield : info->ghostFlux) + { + auto pop_flux_vec = std::vector{vecfield}; + popFluxBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + core::VecFieldNames{sumVec_}, vecfield, nullptr, sumVec_.name(), + std::make_shared>()); + } + + for (auto const& field : info->sumBorderFields) + popDensityBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + sumField_.name(), field, nullptr, sumField_.name(), + std::make_shared>()); + } @@ -1001,6 +1055,11 @@ namespace amr VecFieldT ViOld_{stratName + "_VBulkOld", core::HybridQuantity::Vector::V}; FieldT NiOld_{stratName + "_NiOld", core::HybridQuantity::Scalar::rho}; + TensorFieldT sumTensor_{"PHARE_sumTensor", core::HybridQuantity::Tensor::M}; + VecFieldT sumVec_{"PHARE_sumVec", core::HybridQuantity::Vector::V}; + FieldT sumField_{"PHARE_sumField", core::HybridQuantity::Scalar::rho}; + + //! ResourceManager shared with other objects (like the HybridModel) std::shared_ptr resourcesManager_; @@ -1018,37 +1077,39 @@ namespace amr // these refiners are used to initialize electromagnetic fields when creating // a new level (initLevel) or regridding (regrid) - using InitRefinerPool = RefinerPool; - using SharedNodeRefinerPool = RefinerPool; - using GhostRefinerPool = RefinerPool; - using PatchGhostRefinerPool = RefinerPool; - using InitDomPartRefinerPool = RefinerPool; - using PatchGhostPartRefinerPool = RefinerPool; + using InitRefinerPool = RefinerPool; + using GhostRefinerPool = RefinerPool; + using PatchGhostRefinerPool = RefinerPool; + using InitDomPartRefinerPool = RefinerPool; + using DomainGhostPartRefinerPool = RefinerPool; + using FieldGhostSumRefinerPool = RefinerPool; + using FieldFillPattern_t = FieldFillPattern; + + //! += flux on ghost box overlap incomplete population moment nodes + std::vector popFluxBorderSumRefiners_; + //! += density on ghost box overlap incomplete population moment nodes + std::vector popDensityBorderSumRefiners_; InitRefinerPool electricInitRefiners_{resourcesManager_}; + SAMRAI::xfer::RefineAlgorithm Balgo; SAMRAI::xfer::RefineAlgorithm Ealgo; - SAMRAI::xfer::RefineAlgorithm BalgoNode; std::map> magInitRefineSchedules; std::map> magGhostsRefineSchedules; std::map> magPatchGhostsRefineSchedules; std::map> elecPatchGhostsRefineSchedules; - std::map> magSharedNodeRefineSchedules; //! store refiners for electric fields that need ghosts to be filled - SharedNodeRefinerPool elecSharedNodesRefiners_{resourcesManager_}; GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; - GhostRefinerPool currentSharedNodesRefiners_{resourcesManager_}; GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; // moment ghosts - // these do not need sharedNode refiners. The reason is that - // the border node is already complete by the deposit of ghost particles + // The border node is already complete by the deposit of ghost particles // these refiners are used to fill ghost nodes, and therefore, owing to - // the GhostField tag, will only assign pur ghost nodes. Border nodes will + // the GhostField tag, will only assign pure ghost nodes. Border nodes will // be overwritten only on level borders, which does not seem to be an issue. GhostRefinerPool rhoGhostsRefiners_{resourcesManager_}; GhostRefinerPool velGhostsRefiners_{resourcesManager_}; @@ -1071,8 +1132,8 @@ namespace amr RefOp_ptr levelGhostParticlesNewOp_{std::make_shared()}; - // this contains refiners for each population to exchange patch ghost particles - PatchGhostPartRefinerPool patchGhostPartRefiners_{resourcesManager_}; + //! to grab particle leaving neighboring patches and inject into domain + DomainGhostPartRefinerPool domainGhostPartRefiners_{resourcesManager_}; SynchronizerPool densitySynchronizers_{resourcesManager_}; SynchronizerPool ionBulkVelSynchronizers_{resourcesManager_}; @@ -1081,13 +1142,11 @@ namespace amr RefOp_ptr fieldRefineOp_{std::make_shared()}; - // see field_variable_fill_pattern.hpp for explanation about this "node_only" flag - // note that refinement operator, via the boolean argument, serve as a relay for the - // the refinealgorithm to get the correct variablefillpattern - RefOp_ptr BfieldNodeRefineOp_{std::make_shared(/*node_only=*/true)}; + RefOp_ptr BfieldRefineOp_{std::make_shared()}; - RefOp_ptr EfieldNodeRefineOp_{std::make_shared(/*node_only=*/true)}; RefOp_ptr EfieldRefineOp_{std::make_shared()}; + std::shared_ptr defaultFieldFillPattern + = std::make_shared>(); // stateless (mostly) std::shared_ptr fieldTimeOp_{std::make_shared()}; diff --git a/src/amr/messengers/hybrid_messenger.hpp b/src/amr/messengers/hybrid_messenger.hpp index 14818ea85..7dc8aeabd 100644 --- a/src/amr/messengers/hybrid_messenger.hpp +++ b/src/amr/messengers/hybrid_messenger.hpp @@ -2,15 +2,12 @@ #define PHARE_HYBRID_MESSENGER_HPP +#include "core/def.hpp" +#include - -#include "core/hybrid/hybrid_quantities.hpp" -#include "amr/messengers/hybrid_messenger_strategy.hpp" #include "amr/messengers/messenger.hpp" #include "amr/messengers/messenger_info.hpp" -#include "amr/messengers/mhd_messenger.hpp" -#include "core/def.hpp" - +#include "amr/messengers/hybrid_messenger_strategy.hpp" @@ -334,6 +331,15 @@ namespace amr void syncIonMoments(IonsT& ions) { strat_->syncIonMoments(ions); } + void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const fillTime) + { + strat_->fillFluxBorders(ions, level, fillTime); + } + + void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const fillTime) + { + strat_->fillDensityBorders(ions, level, fillTime); + } /* ------------------------------------------------------------------------- End HybridMessenger Interface @@ -341,11 +347,11 @@ namespace amr - virtual ~HybridMessenger() = default; + private: - const std::unique_ptr strat_; + std::unique_ptr const strat_; }; diff --git a/src/amr/messengers/hybrid_messenger_info.hpp b/src/amr/messengers/hybrid_messenger_info.hpp index e1ae2c4f2..62593c598 100644 --- a/src/amr/messengers/hybrid_messenger_info.hpp +++ b/src/amr/messengers/hybrid_messenger_info.hpp @@ -66,6 +66,9 @@ namespace amr std::vector ghostElectric; std::vector ghostCurrent; std::vector ghostBulkVelocity; + std::vector ghostFlux; + + std::vector sumBorderFields; virtual ~HybridMessengerInfo() = default; }; diff --git a/src/amr/messengers/hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_messenger_strategy.hpp index 3afdb5305..1acf06abe 100644 --- a/src/amr/messengers/hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_messenger_strategy.hpp @@ -54,7 +54,7 @@ namespace amr = 0; virtual void regrid(std::shared_ptr const& hierarchy, - const int levelNumber, + int const levelNumber, std::shared_ptr const& oldLevel, IPhysicalModel& model, double const initDataTime) = 0; @@ -119,6 +119,13 @@ namespace amr double const time) = 0; + virtual void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) + = 0; + virtual void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) + = 0; + std::string name() const { return stratname_; } diff --git a/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp b/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp index 457a598c8..4208e1769 100644 --- a/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp @@ -19,7 +19,7 @@ namespace amr using IPhysicalModel = typename HybridModel::Interface; public: - static const std::string stratName; + static std::string const stratName; MHDHybridMessengerStrategy( std::shared_ptr mhdResourcesManager, @@ -66,7 +66,7 @@ namespace amr } void regrid(std::shared_ptr const& /*hierarchy*/, - const int /*levelNumber*/, + int const /*levelNumber*/, std::shared_ptr const& /*oldLevel*/, IPhysicalModel& /*model*/, double const /*initDataTime*/) override { @@ -108,8 +108,18 @@ namespace amr { } + + void fillFluxBorders(IonsT& /*ions*/, SAMRAI::hier::PatchLevel& /*level*/, + double const /*fillTime*/) override + { + } + void fillDensityBorders(IonsT& /*ions*/, SAMRAI::hier::PatchLevel& /*level*/, + double const /*fillTime*/) override + { + } + void firstStep(IPhysicalModel& /*model*/, SAMRAI::hier::PatchLevel& /*level*/, - const std::shared_ptr& /*hierarchy*/, + std::shared_ptr const& /*hierarchy*/, double const /*currentTime*/, double const /*prevCoarserTime*/, double const /*newCoarserTime*/) override { @@ -147,7 +157,7 @@ namespace amr }; template - const std::string MHDHybridMessengerStrategy::stratName + std::string const MHDHybridMessengerStrategy::stratName = "MHDModel-HybridModel"; } // namespace amr diff --git a/src/amr/messengers/refiner.hpp b/src/amr/messengers/refiner.hpp index 0810501b1..0d44a73f9 100644 --- a/src/amr/messengers/refiner.hpp +++ b/src/amr/messengers/refiner.hpp @@ -4,7 +4,11 @@ #include "communicator.hpp" #include "core/data/vecfield/vecfield.hpp" -#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/messengers/field_sum_transaction.hpp" + +#include +#include + namespace PHARE::amr { @@ -15,14 +19,18 @@ enum class RefinerType { InitField, InitInteriorPart, LevelBorderParticles, - InteriorGhostParticles, - SharedBorder + PatchFieldBorderSum, + ExteriorGhostParticles }; + template class Refiner : private Communicator { + using FieldData_t = typename ResourcesManager::UserField_t::patch_data_type; + + public: void registerLevel(std::shared_ptr const& hierarchy, std::shared_ptr const& level) @@ -64,6 +72,17 @@ class Refiner : private Communicator this->add(algo, algo->createSchedule(level), levelNumber); } + // schedule used to += density and flux for populations + // on incomplete overlaped ghost box nodes + else if constexpr (Type == RefinerType::PatchFieldBorderSum) + { + this->add(algo, + algo->createSchedule( + level, 0, + std::make_shared>()), + levelNumber); + } + // this createSchedule overload is used to initialize fields. // note that here we must take that createsSchedule() overload and put nullptr // as src since we want to take from coarser level everywhere. using the @@ -107,15 +126,8 @@ class Refiner : private Communicator levelNumber); } - // this branch is used to create a schedule that will transfer particles into - // the patches' ghost zones. - else if constexpr (Type == RefinerType::InteriorGhostParticles) - { - this->add(algo, algo->createSchedule(level), levelNumber); - } - // schedule to synchronize shared border values, and not include refinement - else if constexpr (Type == RefinerType::SharedBorder) + else if constexpr (Type == RefinerType::ExteriorGhostParticles) { this->add(algo, algo->createSchedule(level), levelNumber); } @@ -186,69 +198,30 @@ class Refiner : private Communicator Refiner(core::VecFieldNames const& ghost, core::VecFieldNames const& model, core::VecFieldNames const& oldModel, std::shared_ptr const& rm, std::shared_ptr refineOp, - std::shared_ptr timeOp) + std ::shared_ptr timeOp, + std::shared_ptr variableFillPattern = nullptr) { constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, this, &refineOp, &timeOp](std::string const& ghost_, std::string const& model_, - std::string const& oldModel_, auto& fillPattern) { - auto src_id = rm->getID(ghost_); - auto dest_id = rm->getID(ghost_); - auto new_id = rm->getID(model_); - auto old_id = rm->getID(oldModel_); - - if (src_id && dest_id && old_id) - { - this->add_algorithm()->registerRefine( - *dest_id, // dest - *src_id, // source at same time - *old_id, // source at past time (for time interp) - *new_id, // source at future time (for time interp) - *dest_id, // scratch - refineOp, timeOp, fillPattern); - } - }; - - registerRefine(ghost.xName, model.xName, oldModel.xName, variableFillPattern); - registerRefine(ghost.yName, model.yName, oldModel.yName, variableFillPattern); - registerRefine(ghost.zName, model.zName, oldModel.zName, variableFillPattern); + + register_time_interpolated_vector_field( // + rm, ghost, ghost, oldModel, model, refineOp, timeOp, variableFillPattern); } + /** * @brief creates a Refiner for a scalar quantity with time refinement */ Refiner(std::string const& ghost, std::string const& model, std::string const& oldModel, std::shared_ptr const& rm, std::shared_ptr refineOp, - std::shared_ptr timeOp) + std ::shared_ptr timeOp, + std::shared_ptr variableFillPattern = nullptr) { constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, this, &refineOp, &timeOp](std::string const& ghost_, std::string const& model_, - std::string const& oldModel_, auto& fillPattern) { - auto src_id = rm->getID(ghost_); - auto dest_id = rm->getID(ghost_); - auto new_id = rm->getID(model_); - auto old_id = rm->getID(oldModel_); - - if (src_id && dest_id && old_id) - { - this->add_algorithm()->registerRefine( - *dest_id, // dest - *src_id, // source at same time - *old_id, // source at past time (for time interp) - *new_id, // source at future time (for time interp) - *dest_id, // scratch - refineOp, timeOp, fillPattern); - } - }; - - registerRefine(ghost, model, oldModel, variableFillPattern); + + register_time_interpolated_resource( // + rm, ghost, ghost, oldModel, model, refineOp, timeOp, variableFillPattern); } @@ -262,52 +235,32 @@ class Refiner : private Communicator { } + /** * @brief this overload creates a Refiner for communication without time interpolation * and from one quantity to another quantity. */ - Refiner(core::VecFieldNames const& source, core::VecFieldNames const& destination, + Refiner(core::VecFieldNames const& dst, core::VecFieldNames const& src, std::shared_ptr const& rm, - std::shared_ptr refineOp) + std::shared_ptr refineOp, + std::shared_ptr variableFillPattern = nullptr) { - constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, &refineOp, this](std::string src, std::string dst, auto& fillPattern) { - auto idSrc = rm->getID(src); - auto idDest = rm->getID(dst); - if (idSrc and idDest) - { - /*if is a ghost field type Refiner, we need to add a fillPattern - * that will be used to overwrite or not the shared border node*/ - if constexpr (Type == RefinerType::GhostField - or Type == RefinerType::PatchGhostField - or Type == RefinerType::SharedBorder) - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp, - fillPattern); - else - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp); - } - }; - registerRefine(source.xName, destination.xName, variableFillPattern); - registerRefine(source.yName, destination.yName, variableFillPattern); - registerRefine(source.zName, destination.zName, variableFillPattern); + register_vector_field(rm, dst, src, refineOp, variableFillPattern); } - Refiner(std::string const& dest, std::string const& src, + + + Refiner(std::string const& dst, std::string const& src, std::shared_ptr const& rm, - std::shared_ptr refineOp) + std::shared_ptr refineOp, + std::shared_ptr fillPattern = nullptr) { - auto idSrc = rm->getID(src); - auto idDest = rm->getID(dest); - if (idSrc and idDest) - { - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp); - } + auto&& [idDst, idSrc] = rm->getIDsList(dst, src); + this->add_algorithm()->registerRefine(idDst, idSrc, idDst, refineOp, fillPattern); } + /** * @brief This overload of makeRefiner creates a Refiner for communication from one * scalar quantity to itself without time interpolation. @@ -317,7 +270,50 @@ class Refiner : private Communicator : Refiner{name, name, rm, refineOp} { } + + + + auto& register_resource(auto& rm, auto& dst, auto& src, auto& scratch, auto&&... args) + { + auto&& [idDst, idSrc, idScrtch] = rm->getIDsList(dst, src, scratch); + this->add_algorithm()->registerRefine(idDst, idSrc, idScrtch, args...); + return *this; + } + + + auto& register_time_interpolated_resource(auto& rm, auto& dst, auto& src, auto& told, + auto& tnew, auto&&... args) + { + auto&& [idDst, idSrc, idTold, idTnew] = rm->getIDsList(dst, src, told, tnew); + this->add_algorithm()->registerRefine(idDst, idSrc, idTold, idTnew, idDst, args...); + return *this; + } + + + auto& register_vector_field(auto& rm, auto& dst, auto& src, auto& refOp, auto& fillPat) + { + return (*this) + .register_resource(rm, dst.xName, src.xName, dst.xName, refOp, fillPat) + .register_resource(rm, dst.yName, src.yName, dst.yName, refOp, fillPat) + .register_resource(rm, dst.zName, src.zName, dst.zName, refOp, fillPat); + } + + + auto& register_time_interpolated_vector_field(auto& rm, auto& dst, auto& src, auto& told, + auto& tnew, auto&&... args) + { + return (*this) + .register_time_interpolated_resource(rm, dst.xName, src.xName, told.xName, tnew.xName, + args...) + .register_time_interpolated_resource(rm, dst.yName, src.yName, told.yName, tnew.yName, + args...) + .register_time_interpolated_resource(rm, dst.zName, src.zName, told.zName, tnew.zName, + args...); + } }; + + + } // namespace PHARE::amr #endif diff --git a/src/amr/messengers/refiner_pool.hpp b/src/amr/messengers/refiner_pool.hpp index f1433d006..8fb17a8a2 100644 --- a/src/amr/messengers/refiner_pool.hpp +++ b/src/amr/messengers/refiner_pool.hpp @@ -9,6 +9,7 @@ #include #include +#include namespace PHARE @@ -23,47 +24,77 @@ namespace amr template class RefinerPool { - using Refiner_t = Refiner; - using RefineOperator = SAMRAI::hier::RefineOperator; + using Refiner_t = Refiner; - public: - /*@brief add a static communication between sources and destinations. - * This overload takes several sources/destinations/keys and add one refiner for each*/ - template - void addStaticRefiners(Names const& destinations, Names const& sources, - std::shared_ptr refineOp, - std::vector keys); + public: + RefinerPool(std::shared_ptr const& rm) + : rm_{rm} + { + } - /*@brief convenience overload of the above when source = destination, for VecField*/ - template - void addStaticRefiners(Names const& src_dest, std::shared_ptr refineOp, - std::vector key); + virtual ~RefinerPool() {} + RefinerPool(RefinerPool const&) = delete; + RefinerPool(RefinerPool&&) = default; /* @brief add a static communication between a single source and destination.*/ - template - void addStaticRefiner(Name const& ghostName, Name const& src, - std::shared_ptr const& refineOp, - std::string const key); + template + void addStaticRefiner(Resource const& ghostName, Resource const& src, + std::shared_ptr const& refineOp, + Key const& key, + std::shared_ptr fillPattern + = nullptr); /** * @brief convenience overload of above addStaticRefiner taking only one name * used for communications from a quantity to the same quantity.*/ - template - void addStaticRefiner(Name const& src_dest, std::shared_ptr const& refineOp, - std::string const key); + template + void addStaticRefiner(Resource const& src_dest, + std::shared_ptr const& refineOp, + Key const& key, + std::shared_ptr fillPattern + = nullptr); + + + /*@brief add a static communication between sources and destinations. + * This overload takes several sources/destinations/keys and add one refiner for each*/ + template + void + addStaticRefiners(Resources const& destinations, Resources const& sources, + std::shared_ptr refineOp, Keys const& keys, + std::shared_ptr fillPattern = nullptr); + + + /*@brief convenience overload of the above when source = destination, for VecField*/ + template + void + addStaticRefiners(Srcs const& src_dest, + std::shared_ptr refineOp, Keys const& key, + std::shared_ptr fillPattern = nullptr); + + + // this overload takes simple strings. + void addTimeRefiner(std::string const& ghost, std::string const& model, + std::string const& oldModel, + std::shared_ptr const& refineOp, + std::shared_ptr const& timeOp, + std::string const& key, + std::shared_ptr fillPattern + = nullptr); + /** * @brief fill the given pool of refiners with a new refiner per VecField * in ghostVecs. Data will be spatially refined using the specified refinement * operator, and time interpolated between time n and n+1 of next coarser data, * represented by modelVec and oldModelVec.*/ - void addTimeRefiners(std::vector const& ghostVecs, - core::VecFieldNames const& modelVec, - core::VecFieldNames const& oldModelVec, - std::shared_ptr& refineOp, - std::shared_ptr& timeOp); + void + addTimeRefiners(std::vector const& ghostVecs, + core::VecFieldNames const& modelVec, core::VecFieldNames const& oldModelVec, + std::shared_ptr& refineOp, + std::shared_ptr& timeOp, + std::shared_ptr fillPattern = nullptr); @@ -78,17 +109,11 @@ namespace amr * This overload is for vector fields*/ void addTimeRefiner(core::VecFieldNames const& ghost, core::VecFieldNames const& model, core::VecFieldNames const& oldModel, - std::shared_ptr const& refineOp, + std::shared_ptr const& refineOp, std::shared_ptr const& timeOp, - std::string key); - - - // this overload takes simple strings. - void addTimeRefiner(std::string const& ghost, std::string const& model, - std::string const& oldModel, - std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, - std::string key); + std::string const& key, + std::shared_ptr fillPattern + = nullptr); @@ -102,7 +127,6 @@ namespace amr } - /** @brief this overload will execute communications for all quantities in the pool. */ void fill(int const levelNumber, double const initDataTime) const { @@ -123,10 +147,9 @@ namespace amr } - /** @brief executes a regridding for all quantities in the pool.*/ virtual void regrid(std::shared_ptr const& hierarchy, - const int levelNumber, + int const levelNumber, std::shared_ptr const& oldLevel, double const initDataTime) { @@ -137,109 +160,114 @@ namespace amr } - RefinerPool(std::shared_ptr const& rm) - : rm_{rm} - { - } - private: using Qty = std::string; - std::map> refiners_; + std::map refiners_; std::shared_ptr rm_; }; - template - template - void RefinerPool::addStaticRefiners( - Names const& destinations, Names const& sources, std::shared_ptr refineOp, - std::vector keys) - { - assert(destinations.size() == sources.size()); - auto key = std::begin(keys); - for (std::size_t i = 0; i < destinations.size(); ++i) - { - addStaticRefiner(destinations[i], sources[i], refineOp, *key++); - } - } +} // namespace amr +} // namespace PHARE - template - template - void - RefinerPool::addStaticRefiners(Names const& src_dest, - std::shared_ptr refineOp, - std::vector key) - { - addStaticRefiners(src_dest, src_dest, refineOp, key); - } +namespace PHARE::amr +{ +template +template +void RefinerPool::addStaticRefiner( + Resource const& dst, Resource const& src, + std::shared_ptr const& refineOp, Key const& key, + std::shared_ptr fillPattern) +{ + auto const [it, success] + = refiners_.insert({key, Refiner_t(dst, src, rm_, refineOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} - template - template - void RefinerPool::addStaticRefiner( - Name const& ghostName, Name const& src, std::shared_ptr const& refineOp, - std::string const key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghostName, src, rm_, refineOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } +template +template +void RefinerPool::addStaticRefiner( + Resource const& src_dst, std::shared_ptr const& refineOp, + Key const& key, std::shared_ptr fillPattern) +{ + addStaticRefiner(src_dst, src_dst, refineOp, key, fillPattern); +} +template +template +void RefinerPool::addStaticRefiners( + Resources const& destinations, Resources const& sources, + std::shared_ptr refineOp, Keys const& keys, + std::shared_ptr fillPattern) +{ + assert(destinations.size() == sources.size()); + assert(destinations.size() == keys.size()); - template - template - void RefinerPool::addStaticRefiner( - Name const& descriptor, std::shared_ptr const& refineOp, - std::string const key) - { - addStaticRefiner(descriptor, descriptor, refineOp, key); - } + for (std::size_t i = 0; i < destinations.size(); ++i) + addStaticRefiner(destinations[i], sources[i], refineOp, keys[i], fillPattern); +} - template - void RefinerPool::addTimeRefiners( - std::vector const& ghostVecs, core::VecFieldNames const& modelVec, - core::VecFieldNames const& oldModelVec, std::shared_ptr& refineOp, - std::shared_ptr& timeOp) - { - for (auto const& ghostVec : ghostVecs) - { - addTimeRefiner(ghostVec, modelVec, oldModelVec, refineOp, timeOp, ghostVec.vecName); - } - } +template +template +void RefinerPool::addStaticRefiners( + Srcs const& src_dest, std::shared_ptr refineOp, Keys const& keys, + std::shared_ptr fillPattern) +{ + addStaticRefiners(src_dest, src_dest, refineOp, keys, fillPattern); +} - template - void RefinerPool::addTimeRefiner( - core::VecFieldNames const& ghost, core::VecFieldNames const& model, - core::VecFieldNames const& oldModel, std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, std::string key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } - template - void RefinerPool::addTimeRefiner( - std::string const& ghost, std::string const& model, std::string const& oldModel, - std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, std::string key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } -} // namespace amr -} // namespace PHARE +template +void RefinerPool::addTimeRefiner( + std::string const& ghost, std::string const& model, std::string const& oldModel, + std::shared_ptr const& refineOp, + std::shared_ptr const& timeOp, std::string const& key, + std::shared_ptr fillPattern) +{ + auto const [it, success] = refiners_.insert( + {key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} + + +template +void RefinerPool::addTimeRefiners( + std::vector const& ghostVecs, core::VecFieldNames const& modelVec, + core::VecFieldNames const& oldModelVec, std::shared_ptr& refineOp, + std::shared_ptr& timeOp, + std::shared_ptr fillPattern) +{ + for (auto const& ghostVec : ghostVecs) + addTimeRefiner(ghostVec, modelVec, oldModelVec, refineOp, timeOp, ghostVec.vecName, + fillPattern); +} + +template +void RefinerPool::addTimeRefiner( + core::VecFieldNames const& ghost, core::VecFieldNames const& model, + core::VecFieldNames const& oldModel, + std::shared_ptr const& refineOp, + std::shared_ptr const& timeOp, std::string const& key, + std::shared_ptr fillPattern) +{ + auto const [it, success] = refiners_.insert( + {key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} + + +} // namespace PHARE::amr #endif diff --git a/src/amr/physical_models/hybrid_model.hpp b/src/amr/physical_models/hybrid_model.hpp index 3a04fa0e3..aaa1fa1b0 100644 --- a/src/amr/physical_models/hybrid_model.hpp +++ b/src/amr/physical_models/hybrid_model.hpp @@ -49,7 +49,7 @@ class HybridModel : public IPhysicalModel std::shared_ptr resourcesManager; - virtual void initialize(level_t& level) override; + void initialize(level_t& level) override; /** @@ -69,7 +69,7 @@ class HybridModel : public IPhysicalModel * @brief fillMessengerInfo describes which variables of the model are to be initialized or * filled at ghost nodes. */ - virtual void fillMessengerInfo(std::unique_ptr const& info) const override; + void fillMessengerInfo(std::unique_ptr const& info) const override; NO_DISCARD auto setOnPatch(patch_t& patch) @@ -165,7 +165,6 @@ void HybridModel::f hybridInfo.ghostCurrent.push_back(core::VecFieldNames{state.J}); hybridInfo.ghostBulkVelocity.push_back(hybridInfo.modelIonBulkVelocity); - auto transform_ = [](auto& ions, auto& inserter) { std::transform(std::begin(ions), std::end(ions), std::back_inserter(inserter), [](auto const& pop) { return pop.name(); }); @@ -174,6 +173,13 @@ void HybridModel::f transform_(state.ions, hybridInfo.levelGhostParticlesOld); transform_(state.ions, hybridInfo.levelGhostParticlesNew); transform_(state.ions, hybridInfo.patchGhostParticles); + + for (auto const& pop : state.ions) + { + hybridInfo.ghostFlux.emplace_back(pop.flux()); + hybridInfo.sumBorderFields.emplace_back(pop.particleDensity().name()); + hybridInfo.sumBorderFields.emplace_back(pop.chargeDensity().name()); + } } diff --git a/src/amr/resources_manager/amr_utils.cpp b/src/amr/resources_manager/amr_utils.cpp index 48218a28c..1ceeffa17 100644 --- a/src/amr/resources_manager/amr_utils.cpp +++ b/src/amr/resources_manager/amr_utils.cpp @@ -31,10 +31,12 @@ namespace amr /** * @brief AMRToLocal sets the AMRBox to local indexing relative to the referenceAMRBox */ - void AMRToLocal(SAMRAI::hier::Box& AMRBox, SAMRAI::hier::Box const& referenceAMRBox) + SAMRAI::hier::Box& AMRToLocal(SAMRAI::hier::Box& AMRBox, + SAMRAI::hier::Box const& referenceAMRBox) { AMRBox.setLower(AMRBox.lower() - referenceAMRBox.lower()); AMRBox.setUpper(AMRBox.upper() - referenceAMRBox.lower()); + return AMRBox; } diff --git a/src/amr/resources_manager/amr_utils.hpp b/src/amr/resources_manager/amr_utils.hpp index c7b537926..f8e46f187 100644 --- a/src/amr/resources_manager/amr_utils.hpp +++ b/src/amr/resources_manager/amr_utils.hpp @@ -3,21 +3,22 @@ #include "core/def/phare_mpi.hpp" -#include -#include -#include -#include -#include -#include - -#include "amr/types/amr_types.hpp" +#include "core/def.hpp" #include "core/utilities/constants.hpp" #include "core/utilities/point/point.hpp" -#include "core/def.hpp" +#include "amr/types/amr_types.hpp" #include "amr/utilities/box/amr_box.hpp" +#include +#include +#include +#include +#include +#include +#include + namespace PHARE { namespace amr @@ -43,7 +44,8 @@ namespace amr /** * @brief AMRToLocal sets the AMRBox to local indexing relative to the referenceAMRBox */ - void AMRToLocal(SAMRAI::hier::Box& AMRBox, SAMRAI::hier::Box const& referenceAMRBox); + SAMRAI::hier::Box& AMRToLocal(SAMRAI::hier::Box& AMRBox, + SAMRAI::hier::Box const& referenceAMRBox); @@ -154,7 +156,7 @@ namespace amr template NO_DISCARD GridLayoutT layoutFromPatch(SAMRAI::hier::Patch const& patch) { - int constexpr dimension = GridLayoutT::dimension; + auto constexpr dimension = GridLayoutT::dimension; SAMRAI::tbox::Dimension const dim{dimension}; @@ -206,7 +208,31 @@ namespace amr return GridLayoutT{dl, nbrCell, origin, amr::Box{domain}, lvlNbr}; } - inline auto to_string(SAMRAI::hier::GlobalId const& id) + + // potentially to replace with SAMRAI coarse to fine boundary stuff + template // fow now it gives us a box for only patch ghost layer + NO_DISCARD auto makeNonLevelGhostBoxFor(SAMRAI::hier::Patch const& patch, + SAMRAI::hier::PatchHierarchy const& hierarchy) + { + auto constexpr dimension = GridLayoutT::dimension; + auto const lvlNbr = patch.getPatchLevelNumber(); + SAMRAI::hier::Box const domain = patch.getBox(); + auto const domBox = phare_box_from(domain); + auto const particleGhostBox = grow(domBox, GridLayoutT::nbrParticleGhosts()); + + SAMRAI::hier::HierarchyNeighbors const hier_nbrs{hierarchy, lvlNbr, lvlNbr}; + auto const neighbors = hier_nbrs.getSameLevelNeighbors(domain, lvlNbr); + std::vector> patchGhostLayerBoxes; + patchGhostLayerBoxes.reserve(neighbors.size() + 1); + patchGhostLayerBoxes.emplace_back(domBox); + for (auto const& neighbox : neighbors) + patchGhostLayerBoxes.emplace_back( + *(particleGhostBox * phare_box_from(neighbox))); + + return patchGhostLayerBoxes; + } + + inline auto to_string(auto const& id) { std::stringstream patchID; patchID << id; @@ -226,6 +252,7 @@ namespace amr } + template void visitHierarchy(SAMRAI::hier::PatchHierarchy& hierarchy, ResMan& resman, Action&& action, int minLevel, int maxLevel, Args&&... args) diff --git a/src/amr/resources_manager/resources_manager.hpp b/src/amr/resources_manager/resources_manager.hpp index 063aefd29..4e5c13583 100644 --- a/src/amr/resources_manager/resources_manager.hpp +++ b/src/amr/resources_manager/resources_manager.hpp @@ -319,6 +319,18 @@ namespace amr return ids; } + + auto getIDsList(auto&&... keys) const + { + auto const Fn = [&](auto& key) { + if (auto const id = getID(key)) + return *id; + throw std::runtime_error("bad key"); + }; + return std::array{Fn(keys)...}; + } + + // iterate per patch and set args on patch template auto inline enumerate(SAMRAI::hier::PatchLevel& level, Args&&... args) diff --git a/src/amr/solvers/solver_ppc.hpp b/src/amr/solvers/solver_ppc.hpp index 7357f72a3..f6c73d712 100644 --- a/src/amr/solvers/solver_ppc.hpp +++ b/src/amr/solvers/solver_ppc.hpp @@ -3,8 +3,13 @@ #include "core/def/phare_mpi.hpp" -#include +#include "core/numerics/ion_updater/ion_updater.hpp" +#include "core/numerics/ampere/ampere.hpp" +#include "core/numerics/faraday/faraday.hpp" +#include "core/numerics/ohm/ohm.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/data/grid/gridlayout_utils.hpp" #include "amr/messengers/hybrid_messenger.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" @@ -13,17 +18,10 @@ #include "amr/solvers/solver.hpp" #include "amr/solvers/solver_ppc_model_view.hpp" -#include "core/numerics/ion_updater/ion_updater.hpp" -#include "core/numerics/ampere/ampere.hpp" -#include "core/numerics/faraday/faraday.hpp" -#include "core/numerics/ohm/ohm.hpp" - -#include "core/data/vecfield/vecfield.hpp" -#include "core/data/grid/gridlayout_utils.hpp" +#include +#include -#include -#include namespace PHARE::solver { @@ -36,20 +34,21 @@ class SolverPPC : public ISolver static constexpr auto dimension = HybridModel::dimension; static constexpr auto interp_order = HybridModel::gridlayout_type::interp_order; - using Electromag = typename HybridModel::electromag_type; - using Ions = typename HybridModel::ions_type; - using ParticleArray = typename Ions::particle_array_type; - using VecFieldT = typename HybridModel::vecfield_type; - using GridLayout = typename HybridModel::gridlayout_type; - using ResourcesManager = typename HybridModel::resources_manager_type; + using Electromag = HybridModel::electromag_type; + using Ions = HybridModel::ions_type; + using ParticleArray = Ions::particle_array_type; + using VecFieldT = HybridModel::vecfield_type; + using GridLayout = HybridModel::gridlayout_type; + using ResourcesManager = HybridModel::resources_manager_type; using IPhysicalModel_t = IPhysicalModel; using IMessenger = amr::IMessenger; using HybridMessenger = amr::HybridMessenger; using ModelViews_t = HybridPPCModelView; - using Faraday_t = typename ModelViews_t::Faraday_t; - using Ampere_t = typename ModelViews_t::Ampere_t; - using Ohm_t = typename ModelViews_t::Ohm_t; + using Faraday_t = ModelViews_t::Faraday_t; + using Ampere_t = ModelViews_t::Ampere_t; + using Ohm_t = ModelViews_t::Ohm_t; + using IonUpdater_t = PHARE::core::IonUpdater; Electromag electromagPred_{"EMPred"}; Electromag electromagAvg_{"EMAvg"}; @@ -58,13 +57,13 @@ class SolverPPC : public ISolver Ampere_t ampere_; Ohm_t ohm_; - PHARE::core::IonUpdater ionUpdater_; + IonUpdater_t ionUpdater_; public: - using patch_t = typename AMR_Types::patch_t; - using level_t = typename AMR_Types::level_t; - using hierarchy_t = typename AMR_Types::hierarchy_t; + using patch_t = AMR_Types::patch_t; + using level_t = AMR_Types::level_t; + using hierarchy_t = AMR_Types::hierarchy_t; @@ -97,7 +96,11 @@ class SolverPPC : public ISolver double const newTime) override; - void onRegrid() override { ionUpdater_.reset(); } + void onRegrid() override + { + boxing.clear(); + ionUpdater_.reset(); + } std::shared_ptr make_view(level_t& level, IPhysicalModel_t& model) override @@ -147,28 +150,36 @@ class SolverPPC : public ISolver double newTime; }; + void make_boxes(hierarchy_t const& hierarchy, level_t& level) + { + int const lvlNbr = level.getLevelNumber(); + if (boxing.count(lvlNbr)) + return; + + auto& levelBoxing = boxing[lvlNbr]; // creates if missing + + for (auto const& patch : level) + if (auto [it, suc] = levelBoxing.try_emplace( + amr::to_string(patch->getGlobalId()), + Boxing_t{amr::layoutFromPatch(*patch), + amr::makeNonLevelGhostBoxFor(*patch, hierarchy)}); + !suc) + throw std::runtime_error("boxing map insertion failure"); + } - // extend lifespan - std::unordered_map tmpDomain; - std::unordered_map patchGhost; - - template - static void add_to(Map& map, std::string const& key, ParticleArray const& ps) + auto& setup_level(hierarchy_t const& hierarchy, int const levelNumber) { - // vector copy drops the capacity (over allocation of the source) - // we want to keep the overallocation somewhat - how much to be assessed - ParticleArray empty{ps.box()}; - - if (!map.count(key)) - map.emplace(key, empty); - else - map.at(key) = empty; - - auto& v = map.at(key); - v.reserve(ps.capacity()); - v.replace_from(ps); + auto level = hierarchy.getPatchLevel(levelNumber); + if (boxing.count(levelNumber) == 0) + make_boxes(hierarchy, *level); + return *level; } + + using Boxing_t = core::UpdaterSelectionBoxing; + std::unordered_map> boxing; + + }; // end solverPPC @@ -213,41 +224,6 @@ void SolverPPC::fillMessengerInfo( } -template -void SolverPPC::saveState_(level_t& level, ModelViews_t& views) -{ - PHARE_LOG_SCOPE(1, "SolverPPC::saveState_"); - - for (auto& state : views) - { - std::stringstream ss; - ss << state.patch->getGlobalId(); - for (auto& pop : state.ions) - { - std::string const key = ss.str() + "_" + pop.name(); - add_to(tmpDomain, key, pop.domainParticles()); - add_to(patchGhost, key, pop.patchGhostParticles()); - } - } -} - -template -void SolverPPC::restoreState_(level_t& level, ModelViews_t& views) -{ - PHARE_LOG_SCOPE(1, "SolverPPC::restoreState_"); - - for (auto& state : views) - { - std::stringstream ss; - ss << state.patch->getGlobalId(); - - for (auto& pop : state.ions) - { - pop.domainParticles() = std::move(tmpDomain.at(ss.str() + "_" + pop.name())); - pop.patchGhostParticles() = std::move(patchGhost.at(ss.str() + "_" + pop.name())); - } - } -} template @@ -260,26 +236,22 @@ void SolverPPC::advanceLevel(hierarchy_t const& hierarch auto& modelView = dynamic_cast(views); auto& fromCoarser = dynamic_cast(fromCoarserMessenger); - auto level = hierarchy.getPatchLevel(levelNumber); - - predictor1_(*level, modelView, fromCoarser, currentTime, newTime); + auto& level = setup_level(hierarchy, levelNumber); - average_(*level, modelView, fromCoarser, newTime); + predictor1_(level, modelView, fromCoarser, currentTime, newTime); - saveState_(*level, modelView); + average_(level, modelView, fromCoarser, newTime); - moveIons_(*level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::domain_only); + moveIons_(level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::domain_only); - predictor2_(*level, modelView, fromCoarser, currentTime, newTime); + predictor2_(level, modelView, fromCoarser, currentTime, newTime); - average_(*level, modelView, fromCoarser, newTime); + average_(level, modelView, fromCoarser, newTime); - restoreState_(*level, modelView); + moveIons_(level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::all); - moveIons_(*level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::all); - - corrector_(*level, modelView, fromCoarser, currentTime, newTime); + corrector_(level, modelView, fromCoarser, currentTime, newTime); } @@ -418,21 +390,14 @@ void _debug_log_move_ions(Args const&... args) { auto const& [views] = std::forward_as_tuple(args...); - std::size_t nbrDomainParticles = 0; - std::size_t nbrPatchGhostParticles = 0; - std::size_t nbrLevelGhostNewParticles = 0; std::size_t nbrLevelGhostOldParticles = 0; - std::size_t nbrLevelGhostParticles = 0; // + std::size_t nbrLevelGhostParticles = 0; for (auto& state : views) { for (auto& pop : state.ions) { - nbrDomainParticles += pop.domainParticles().size(); - nbrPatchGhostParticles += pop.patchGhostParticles().size(); - nbrLevelGhostNewParticles += pop.levelGhostParticlesNew().size(); nbrLevelGhostOldParticles += pop.levelGhostParticlesOld().size(); nbrLevelGhostParticles += pop.levelGhostParticles().size(); - nbrPatchGhostParticles += pop.patchGhostParticles().size(); if (nbrLevelGhostOldParticles < nbrLevelGhostParticles and nbrLevelGhostOldParticles > 0) @@ -454,18 +419,23 @@ void SolverPPC::moveIons_(level_t& level, ModelViews_t& PHARE_DEBUG_DO(_debug_log_move_ions(views);) TimeSetter setTime{views, newTime}; + auto const& levelBoxing = boxing[level.getLevelNumber()]; { auto dt = newTime - currentTime; for (auto& state : views) - ionUpdater_.updatePopulations(state.ions, state.electromagAvg, state.layout, dt, mode); + ionUpdater_.updatePopulations( + state.ions, state.electromagAvg, + levelBoxing.at(amr::to_string(state.patch->getGlobalId())), dt, mode); } // this needs to be done before calling the messenger setTime([](auto& state) -> auto& { return state.ions; }); - fromCoarser.fillIonGhostParticles(views.model().state.ions, level, newTime); + fromCoarser.fillFluxBorders(views.model().state.ions, level, newTime); + fromCoarser.fillDensityBorders(views.model().state.ions, level, newTime); fromCoarser.fillIonPopMomentGhosts(views.model().state.ions, level, newTime); + fromCoarser.fillIonGhostParticles(views.model().state.ions, level, newTime); for (auto& state : views) ionUpdater_.updateIons(state.ions); diff --git a/src/amr/utilities/box/amr_box.hpp b/src/amr/utilities/box/amr_box.hpp index 4b615acd9..35f4e18b9 100644 --- a/src/amr/utilities/box/amr_box.hpp +++ b/src/amr/utilities/box/amr_box.hpp @@ -30,6 +30,14 @@ NO_DISCARD auto phare_box_from(SAMRAI::hier::Box const& box) return PHARE::core::Box{core::Point{lower}, core::Point{upper}}; } +template +NO_DISCARD auto as_unsigned_phare_box(SAMRAI::hier::Box const& box) +{ + auto const& amr_box = phare_box_from(box); + return PHARE::core::Box{core::Point{amr_box.lower}.as_unsigned(), + core::Point{amr_box.upper}.as_unsigned()}; +} + NO_DISCARD inline bool operator==(SAMRAI::hier::Box const& b1, SAMRAI::hier::Box const& b2) { auto dim1 = b1.getDim().getValue(); @@ -85,6 +93,43 @@ struct Box : public PHARE::core::Box } }; + +template +NO_DISCARD inline bool isInBox(SAMRAI::hier::Box const& box, Particle const& particle) +{ + constexpr auto dim = Particle::dimension; + auto const& iCell = particle.iCell; + auto const& lower = box.lower(); + auto const& upper = box.upper(); + for (std::size_t i = 0; i < dim; ++i) + if (iCell[i] < lower(i) || iCell[i] > upper(i)) + return false; + return true; +} + + +template +auto as_point(SAMRAI::hier::IntVector const& vec) +{ + return core::Point{ + core::for_N([&](auto i) { return vec[i]; })}; +} + + +template +auto as_point(SAMRAI::hier::Transformation const& tform) +{ + return as_point(tform.getOffset()); +} + + +template +NO_DISCARD core::Box shift(core::Box const& box, + SAMRAI::hier::Transformation const& tform) +{ + return core::shift(box, as_point(tform)); +} + } // namespace PHARE::amr #endif diff --git a/src/core/data/field/field_box.hpp b/src/core/data/field/field_box.hpp new file mode 100644 index 000000000..6868b3414 --- /dev/null +++ b/src/core/data/field/field_box.hpp @@ -0,0 +1,94 @@ +#ifndef PHARE_CORE_DATA_FIELD_FIELD_BOX_HPP +#define PHARE_CORE_DATA_FIELD_FIELD_BOX_HPP + +#include "core/def.hpp" +#include "core/logger.hpp" +#include "core/utilities/types.hpp" +#include "core/utilities/box/box.hpp" + +#include +#include +#include + +namespace PHARE::core +{ + +template +class FieldBox +{ + using value_type = std::decay_t; + +public: + auto constexpr static dimension = Field_t::dimension; + + Field_t& field; + Box amr_ghost_box; + Box lcl_box; + + template + FieldBox(Field_t& field_, GridLayout_t const& layout) + : field{field_} + , amr_ghost_box{layout.AMRGhostBoxFor(field.physicalQuantity())} + , lcl_box{layout.ghostBoxFor(field)} + { + } + + template + FieldBox(Field_t& field_, GridLayout_t const& layout, + Box const& selection) + : field{field_} + , amr_ghost_box{layout.AMRGhostBoxFor(field.physicalQuantity())} + , lcl_box{selection} + { + } + + template + FieldBox(Field_t& field_, GridLayout_t const& layout, Box const& selection) + : field{field_} + , amr_ghost_box{layout.AMRGhostBoxFor(field.physicalQuantity())} + , lcl_box{layout.AMRToLocal(selection)} + { + } + + + template + void set_from(std::vector const& vec, std::size_t seek = 0); + + void append_to(std::vector& vec); +}; + + +template +void operate_on_fields(auto& dst, auto const& src) +{ + assert(dst.lcl_box.size() == src.lcl_box.size()); + auto src_it = src.lcl_box.begin(); + auto dst_it = dst.lcl_box.begin(); + for (; dst_it != dst.lcl_box.end(); ++src_it, ++dst_it) + Operator{dst.field(*dst_it)}(src.field(*src_it)); +} + + + +template +template +void FieldBox::set_from(std::vector const& vec, std::size_t seek) +{ + auto dst_it = lcl_box.begin(); + for (; dst_it != lcl_box.end(); ++seek, ++dst_it) + Operator{field(*dst_it)}(vec[seek]); +} + +template +void FieldBox::append_to(std::vector& vec) +{ + // reserve vec before use! + auto src_it = lcl_box.begin(); + for (; src_it != lcl_box.end(); ++src_it) + vec.push_back(field(*src_it)); +} + +} // namespace PHARE::core + + +#endif diff --git a/src/core/data/grid/grid.hpp b/src/core/data/grid/grid.hpp index 482d1f142..53987c3e3 100644 --- a/src/core/data/grid/grid.hpp +++ b/src/core/data/grid/grid.hpp @@ -34,7 +34,6 @@ class Grid : public NdArrayImpl Grid() = delete; - Grid(Grid const& source) = delete; Grid(Grid&& source) = default; Grid& operator=(Grid&& source) = delete; Grid& operator=(Grid const& source) = delete; @@ -44,7 +43,6 @@ class Grid : public NdArrayImpl : Super{dims...} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} { static_assert(sizeof...(Dims) == dimension, "Invalid dimension"); } @@ -54,7 +52,6 @@ class Grid : public NdArrayImpl : Super{dims} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} { } @@ -63,7 +60,13 @@ class Grid : public NdArrayImpl : Super{layout.allocSize(qty)} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} + { + } + + Grid(Grid const& source) // let field_ default + : Super{source.shape()} + , name_{source.name()} + , qty_{source.physicalQuantity()} { } @@ -86,7 +89,7 @@ class Grid : public NdArrayImpl private: std::string name_{"No Name"}; PhysicalQuantity qty_; - field_type field_; + field_type field_{name_, qty_, Super::data(), Super::shape()}; }; diff --git a/src/core/data/grid/gridlayout.hpp b/src/core/data/grid/gridlayout.hpp index 7fcaec7bc..93afe371a 100644 --- a/src/core/data/grid/gridlayout.hpp +++ b/src/core/data/grid/gridlayout.hpp @@ -134,7 +134,6 @@ namespace core } } - inverseMeshSize_[0] = 1. / meshSize_[0]; if constexpr (dimension > 1) { @@ -1149,6 +1148,34 @@ namespace core + // essentially box form of allocSize(...) + template + Box ghostBoxFor(Field const& field) const + { + return _BoxFor(field, [&](auto const& centering, auto const direction) { + return this->ghostStartToEnd(centering, direction); + }); + } + + + + template + auto AMRGhostBoxFor(Field const& field) const + { + auto const centerings = centering(field); + auto const growBy = [&]() { + std::array arr; + for (std::uint8_t i = 0; i < dimension; ++i) + arr[i] = nbrGhosts(centerings[i]); + return arr; + }(); + auto ghostBox = grow(AMRBox_, growBy); + for (std::uint8_t i = 0; i < dimension; ++i) + ghostBox.upper[i] += (centerings[i] == QtyCentering::primal) ? 1 : 0; + return ghostBox; + } + + template void evalOnBox(Field& field, Fn&& fn) const @@ -1206,6 +1233,30 @@ namespace core } + template + auto _BoxFor(Field const& field, Fn startToEnd) const + { + std::array lower, upper; + + auto const [ix0, ix1] = startToEnd(field, Direction::X); + lower[0] = ix0; + upper[0] = ix1; + if constexpr (dimension > 1) + { + auto const [iy0, iy1] = startToEnd(field, Direction::Y); + lower[1] = iy0; + upper[1] = iy1; + } + if constexpr (dimension == 3) + { + auto const [iz0, iz1] = startToEnd(field, Direction::Z); + lower[2] = iz0; + upper[2] = iz1; + } + return Box{lower, upper}; + } + + template auto StartToEndIndices_(Centering const& centering, StartToEnd const&& startToEnd, bool const includeEnd = false) const @@ -1513,7 +1564,6 @@ namespace core std::array, 2> ghostEndIndexTable_; Box AMRBox_; - // this constexpr initialization only works if primal==0 and dual==1 // this is defined in gridlayoutdefs.hpp don't change it because these // arrays will be accessed with [primal] and [dual] indexes. diff --git a/src/core/data/ions/ion_population/ion_population.hpp b/src/core/data/ions/ion_population/ion_population.hpp index 33dfea9d8..ba7dfd645 100644 --- a/src/core/data/ions/ion_population/ion_population.hpp +++ b/src/core/data/ions/ion_population/ion_population.hpp @@ -43,16 +43,25 @@ namespace core NO_DISCARD std::string const& name() const { return name_; } - NO_DISCARD auto const& particleInitializerInfo() const { return particleInitializerInfo_; } + + + NO_DISCARD auto const& particleInitializerInfo() const + { + assert(particleInitializerInfo_.contains("density")); + return particleInitializerInfo_; + } + NO_DISCARD bool isUsable() const { - return core::isUsable(particles_, particleDensity_, chargeDensity_, flux_, momentumTensor_); + return core::isUsable(particles_, particleDensity_, chargeDensity_, flux_, + momentumTensor_); } NO_DISCARD bool isSettable() const { - return core::isSettable(particles_, particleDensity_, chargeDensity_, flux_, momentumTensor_); + return core::isSettable(particles_, particleDensity_, chargeDensity_, flux_, + momentumTensor_); } NO_DISCARD auto& domainParticles() const { return particles_.domainParticles(); } diff --git a/src/core/data/ions/ions.hpp b/src/core/data/ions/ions.hpp index a9a536802..3c8632c64 100644 --- a/src/core/data/ions/ions.hpp +++ b/src/core/data/ions/ions.hpp @@ -167,6 +167,19 @@ namespace core NO_DISCARD auto begin() const { return std::begin(populations_); } NO_DISCARD auto end() const { return std::end(populations_); } + NO_DISCARD auto& population(std::size_t const i) + { + if (i >= populations_.size()) + throw std::out_of_range("Ions population index out of range"); + return populations_[i]; + } + + NO_DISCARD auto const& population(std::size_t const i) const + { + if (i >= populations_.size()) + throw std::out_of_range("Ions population index out of range"); + return populations_[i]; + } // in the following isUsable and isSettable the massDensity_ is not checked // because it is for internal use only so no object will ever need to access it. @@ -234,6 +247,9 @@ namespace core } + auto& operator[](std::size_t const i) const { return populations_[i]; } + auto& operator[](std::size_t const i) { return populations_[i]; } + private: field_type massDensity_; field_type chargeDensity_; @@ -241,7 +257,10 @@ namespace core std::vector populations_; tensorfield_type momentumTensor_; }; + } // namespace core } // namespace PHARE + + #endif diff --git a/src/core/data/particles/particle_array.hpp b/src/core/data/particles/particle_array.hpp index 23990c47d..372b90b80 100644 --- a/src/core/data/particles/particle_array.hpp +++ b/src/core/data/particles/particle_array.hpp @@ -95,12 +95,8 @@ class ParticleArray NO_DISCARD auto back() { return particles_.back(); } NO_DISCARD auto front() { return particles_.front(); } - auto erase(IndexRange_& range) { cellMap_.erase(particles_, range); } - auto erase(IndexRange_&& range) - { - // TODO move ctor for range? - cellMap_.erase(std::forward(range)); - } + + auto erase(IndexRange_ range) { cellMap_.erase(range); } iterator erase(iterator first, iterator last) { @@ -201,6 +197,14 @@ class ParticleArray return cellMap_.partition(makeIndexRange(*this), std::forward(pred)); } + template + auto partition(Range_t&& range, Predicate&& pred) + { + auto const ret = cellMap_.partition(range, std::forward(pred)); + assert(ret.size() <= range.size()); + return ret; + } + template void print(CellIndex const& cell) const { @@ -228,18 +232,6 @@ class ParticleArray auto& box() const { return box_; } - auto& replace_from(ParticleArray const& that) - { - if (this == &that) // just in case - return *this; - this->resize(that.size()); - std::copy(that.begin(), that.end(), this->begin()); - this->box_ = that.box_; - this->cellMap_ = that.cellMap_; - return *this; - } - - private: Vector particles_; box_t box_; diff --git a/src/core/numerics/ion_updater/ion_updater.hpp b/src/core/numerics/ion_updater/ion_updater.hpp index 6f51a6147..57a925916 100644 --- a/src/core/numerics/ion_updater/ion_updater.hpp +++ b/src/core/numerics/ion_updater/ion_updater.hpp @@ -2,20 +2,18 @@ #define PHARE_ION_UPDATER_HPP +#include "core/logger.hpp" #include "core/utilities/box/box.hpp" #include "core/utilities/range/range.hpp" -#include "core/numerics/interpolator/interpolator.hpp" #include "core/numerics/pusher/pusher.hpp" +#include "core/numerics/moments/moments.hpp" #include "core/numerics/pusher/pusher_factory.hpp" +#include "core/numerics/interpolator/interpolator.hpp" #include "core/numerics/boundary_condition/boundary_condition.hpp" -#include "core/numerics/moments/moments.hpp" -#include "core/data/ions/ions.hpp" #include "initializer/data_provider.hpp" -#include "core/logger.hpp" -#include #include @@ -26,6 +24,8 @@ enum class UpdaterMode { domain_only = 1, all = 2 }; template class IonUpdater { + using This = IonUpdater; + public: static constexpr auto dimension = GridLayout::dimension; static constexpr auto interp_order = GridLayout::interp_order; @@ -55,7 +55,8 @@ class IonUpdater { } - void updatePopulations(Ions& ions, Electromag const& em, GridLayout const& layout, double dt, + template + void updatePopulations(Ions& ions, Electromag const& em, Boxing_t const& boxing, double dt, UpdaterMode = UpdaterMode::all); @@ -70,9 +71,11 @@ class IonUpdater private: - void updateAndDepositDomain_(Ions& ions, Electromag const& em, GridLayout const& layout); + template + void updateAndDepositDomain_(Ions& ions, Electromag const& em, Boxing_t const& boxing); - void updateAndDepositAll_(Ions& ions, Electromag const& em, GridLayout const& layout); + template + void updateAndDepositAll_(Ions& ions, Electromag const& em, Boxing_t const& boxing); // dealloced on regridding/load balancing coarsest @@ -83,22 +86,23 @@ class IonUpdater template +template void IonUpdater::updatePopulations(Ions& ions, Electromag const& em, - GridLayout const& layout, - double dt, UpdaterMode mode) + Boxing_t const& boxing, double dt, + UpdaterMode mode) { PHARE_LOG_SCOPE(3, "IonUpdater::updatePopulations"); resetMoments(ions); - pusher_->setMeshAndTimeStep(layout.meshSize(), dt); + pusher_->setMeshAndTimeStep(boxing.layout.meshSize(), dt); if (mode == UpdaterMode::domain_only) { - updateAndDepositDomain_(ions, em, layout); + updateAndDepositDomain_(ions, em, boxing); } else { - updateAndDepositAll_(ions, em, layout); + updateAndDepositAll_(ions, em, boxing); } } @@ -111,85 +115,88 @@ void IonUpdater::updateIons(Ions& ions) ions.computeBulkVelocity(); } +// this is to detach how we partition particles from the updater directly +template +struct UpdaterSelectionBoxing +{ + auto constexpr static partGhostWidth = GridLayout::nbrParticleGhosts(); + using GridLayout_t = GridLayout; + using Box_t = IonUpdater_t::Box; + using Selector_t = IonUpdater_t::Pusher::ParticleSelector; + GridLayout_t const layout; + std::vector const nonLevelGhostBox; + Box_t const domainBox = layout.AMRBox(); + Box_t const ghostBox = grow(domainBox, partGhostWidth); + + Selector_t const noop = [](auto& particleRange) { return particleRange; }; + + // lambda copy captures to detach from above references in case of class copy construct + Selector_t const inDomainBox = [domainBox = domainBox](auto& particleRange) { + return particleRange.array().partition( + particleRange, [&](auto const& cell) { return core::isIn(cell, domainBox); }); + }; + + Selector_t const inGhostBox = [ghostBox = ghostBox](auto& particleRange) { + return particleRange.array().partition( + particleRange, [&](auto const& cell) { return isIn(cell, ghostBox); }); + }; + + Selector_t const inNonLevelGhostBox + = [nonLevelGhostBox = nonLevelGhostBox](auto& particleRange) { + return particleRange.array().partition(particleRange, [&](auto const& cell) { + return isIn(Point{cell}, nonLevelGhostBox); + }); + }; + + Selector_t const inGhostLayer + = [ghostBox = ghostBox, domainBox = domainBox](auto& particleRange) { + return particleRange.array().partition(particleRange, [&](auto const& cell) { + return isIn(cell, ghostBox) and !isIn(cell, domainBox); + }); + }; +}; -template /** * @brief IonUpdater::updateAndDepositDomain_ evolves moments from time n to n+1 without updating particles, which stay at time n */ +template +template void IonUpdater::updateAndDepositDomain_(Ions& ions, Electromag const& em, - GridLayout const& layout) + Boxing_t const& boxing) { PHARE_LOG_SCOPE(3, "IonUpdater::updateAndDepositDomain_"); - auto domainBox = layout.AMRBox(); - - auto inDomainBox = [&domainBox](auto& particleRange) // - { - auto& box = domainBox; - return particleRange.array().partition( - [&](auto const& cell) { return core::isIn(Point{cell}, box); }); - }; - - auto constexpr partGhostWidth = GridLayout::nbrParticleGhosts(); - auto ghostBox{domainBox}; - ghostBox.grow(partGhostWidth); - - auto inGhostBox = [&](auto& particleRange) { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, ghostBox); }); - }; + auto const& layout = boxing.layout; for (auto& pop : ions) { - ParticleArray& domain = pop.domainParticles(); + auto& domain = (tmp_particles_ = pop.domainParticles()); // make local copy - // first push all domain particles - // push them while still inDomainBox - // accumulate those inDomainBox - // erase those which left - - auto inRange = makeIndexRange(domain); + // first push all domain particles twice + // accumulate those inNonLevelGhostBox auto outRange = makeIndexRange(domain); + auto allowed = outRange = pusher_->move(outRange, outRange, em, pop.mass(), interpolator_, + layout, boxing.noop, boxing.inNonLevelGhostBox); - auto inDomain = pusher_->move( - inRange, outRange, em, pop.mass(), interpolator_, layout, - [](auto& particleRange) { return particleRange; }, inDomainBox); - - interpolator_(inDomain, pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout); + interpolator_(allowed, pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout); - // TODO : we can erase here because we know we are working on a state - // that has been saved in the solverPPC - // this makes the updater quite coupled to how the solverPPC works while - // it kind of pretends not to be by being independent object in core... - // note we need to erase here if using the back_inserter for ghost copy - // otherwise they will be added after leaving domain particles. - domain.erase(makeRange(domain, inDomain.iend(), domain.size())); - // then push patch and level ghost particles // push those in the ghostArea (i.e. stop pushing if they're not out of it) // deposit moments on those which leave to go inDomainBox - auto pushAndAccumulateGhosts = [&](auto& inputArray, bool copyInDomain = false) { - auto& outputArray = tmp_particles_.replace_from(inputArray); + auto pushAndAccumulateGhosts = [&](auto const& inputArray) { + tmp_particles_ = inputArray; // work on local copy - inRange = makeIndexRange(inputArray); - outRange = makeIndexRange(outputArray); + auto outRange = makeIndexRange(tmp_particles_); - auto enteredInDomain = pusher_->move(inRange, outRange, em, pop.mass(), interpolator_, - layout, inGhostBox, inDomainBox); + auto enteredInDomain = pusher_->move(outRange, outRange, em, pop.mass(), interpolator_, + layout, boxing.inGhostBox, boxing.inDomainBox); interpolator_(enteredInDomain, pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout); - - if (copyInDomain) - { - domain.reserve(domain.size() + enteredInDomain.size()); - std::copy(enteredInDomain.begin(), enteredInDomain.end(), - std::back_inserter(domain)); - } }; // After this function is done domain particles overlaping ghost layers of neighbor patches @@ -200,78 +207,68 @@ void IonUpdater::updateAndDepositDomain_(Ions& ion // On the contrary level ghost particles entering the domain here do not need to be copied // since they contribute to nodes that are not shared with neighbor patches an since // level border nodes will receive contributions from levelghost old and new particles - pushAndAccumulateGhosts(pop.patchGhostParticles(), true); - pushAndAccumulateGhosts(pop.levelGhostParticles()); + + if (pop.levelGhostParticles().size()) + pushAndAccumulateGhosts(pop.levelGhostParticles()); } } -template /** * @brief IonUpdater::updateAndDepositDomain_ evolves moments and particles from time n to n+1 */ +template +template void IonUpdater::updateAndDepositAll_(Ions& ions, Electromag const& em, - GridLayout const& layout) + Boxing_t const& boxing) { PHARE_LOG_SCOPE(3, "IonUpdater::updateAndDepositAll_"); - auto constexpr partGhostWidth = GridLayout::nbrParticleGhosts(); - auto domainBox = layout.AMRBox(); - auto ghostBox{domainBox}; - ghostBox.grow(partGhostWidth); - - auto inDomainBox = [&domainBox](auto& particleRange) // - { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, domainBox); }); - }; - - auto inGhostBox = [&](auto& particleRange) { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, ghostBox); }); - }; - - - auto inGhostLayer = [&](auto& particleRange) { - return particleRange.array().partition([&](auto const& cell) { - return isIn(Point{cell}, ghostBox) and !isIn(Point{cell}, domainBox); - }); - }; + auto const& layout = boxing.layout; // push domain particles, erase from array those leaving domain - // push patch and level ghost particles that are in ghost area (==ghost box without domain) - // copy patch and ghost particles out of ghost area that are in domain, in particle array - // finally all particles in domain are to be interpolated on mesh. + // push level ghost particles that are in ghost area (==ghost box without domain) + // copy ghost particles out of ghost area that are in domain, in particle array + // finally all particles in non level ghost box are to be interpolated on mesh. for (auto& pop : ions) { auto& domainParticles = pop.domainParticles(); auto domainPartRange = makeIndexRange(domainParticles); - auto inDomain = pusher_->move( - domainPartRange, domainPartRange, em, pop.mass(), interpolator_, layout, - [](auto const& particleRange) { return particleRange; }, inDomainBox); + auto inDomain = pusher_->move(domainPartRange, domainPartRange, em, pop.mass(), + interpolator_, layout, boxing.noop, boxing.inDomainBox); + + auto now_ghosts = makeRange(domainParticles, inDomain.iend(), domainParticles.size()); + auto const not_level_ghosts = boxing.inNonLevelGhostBox(now_ghosts); + + // copy out new patch ghosts + auto& patchGhost = pop.patchGhostParticles(); + patchGhost.reserve(patchGhost.size() + not_level_ghosts.size()); + std::copy(not_level_ghosts.begin(), not_level_ghosts.end(), std::back_inserter(patchGhost)); - domainParticles.erase(makeRange(domainParticles, inDomain.iend(), domainParticles.size())); + domainParticles.erase(now_ghosts); // drop all ghosts - auto pushAndCopyInDomain = [&](auto&& particleRange) { - auto inGhostLayerRange = pusher_->move(particleRange, particleRange, em, pop.mass(), - interpolator_, layout, inGhostBox, inGhostLayer); + if (pop.levelGhostParticles().size()) + { + auto particleRange = makeIndexRange(pop.levelGhostParticles()); + auto inGhostLayerRange + = pusher_->move(particleRange, particleRange, em, pop.mass(), interpolator_, layout, + boxing.inGhostBox, boxing.inGhostLayer); auto& particleArray = particleRange.array(); particleArray.export_particles( - domainParticles, [&](auto const& cell) { return isIn(Point{cell}, domainBox); }); + domainParticles, [&](auto const& cell) { return isIn(cell, boxing.domainBox); }); particleArray.erase( makeRange(particleArray, inGhostLayerRange.iend(), particleArray.size())); - }; - - pushAndCopyInDomain(makeIndexRange(pop.patchGhostParticles())); - pushAndCopyInDomain(makeIndexRange(pop.levelGhostParticles())); + } - interpolator_(makeIndexRange(domainParticles), pop.particleDensity(), pop.chargeDensity(), - pop.flux(), layout); + interpolator_( // + domainParticles, pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout); + interpolator_( // + patchGhost, pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout); } } diff --git a/src/core/numerics/moments/moments.hpp b/src/core/numerics/moments/moments.hpp index 87b805fa1..af6f190f5 100644 --- a/src/core/numerics/moments/moments.hpp +++ b/src/core/numerics/moments/moments.hpp @@ -1,10 +1,11 @@ #ifndef MOMENTS_HPP #define MOMENTS_HPP -#include #include "core/numerics/interpolator/interpolator.hpp" +#include + namespace PHARE { @@ -26,9 +27,6 @@ namespace core { }; - struct PatchGhostDeposit - { - }; struct LevelGhostDeposit { }; @@ -50,16 +48,13 @@ namespace core auto& partArray = pop.domainParticles(); interpolate(partArray, particleDensity, chargeDensity, flux, layout); } - else if constexpr (std::is_same_v) - { - auto& partArray = pop.patchGhostParticles(); - interpolate(partArray, particleDensity, chargeDensity, flux, layout); - } else if constexpr (std::is_same_v) { auto& partArray = pop.levelGhostParticlesOld(); interpolate(partArray, particleDensity, chargeDensity, flux, layout); } + else + throw std::runtime_error("unknown deposit tag"); } } diff --git a/src/core/numerics/pusher/pusher.hpp b/src/core/numerics/pusher/pusher.hpp index 9c1b48b9f..1e094ffd0 100644 --- a/src/core/numerics/pusher/pusher.hpp +++ b/src/core/numerics/pusher/pusher.hpp @@ -20,9 +20,9 @@ namespace core protected: static auto constexpr dimension = GridLayout::dimension; + public: using ParticleSelector = std::function; - public: // TODO : to really be independant on boris which has 2 push steps // we should have an arbitrary number of selectors, 1 per push step virtual ParticleRange move(ParticleRange const& rangeIn, ParticleRange& rangeOut, diff --git a/src/core/utilities/box/box.hpp b/src/core/utilities/box/box.hpp index 5265a3316..2d6c49f50 100644 --- a/src/core/utilities/box/box.hpp +++ b/src/core/utilities/box/box.hpp @@ -25,7 +25,7 @@ class box_iterator; template struct Box { - static constexpr std::size_t dimension = dim; + static constexpr auto dimension = dim; Point lower; @@ -74,7 +74,6 @@ struct Box void grow(Type const& size) { - assert(size >= 0); for (auto& c : lower) { c -= size; @@ -85,6 +84,14 @@ struct Box } } + template + auto& grow(std::array const& size) + { + lower -= size; + upper += size; + return *this; + } + NO_DISCARD auto shape() const { return upper - lower + 1; } NO_DISCARD auto size() const { return core::product(shape()); } @@ -232,22 +239,27 @@ bool isIn(Point const& point, BoxContainer const& boxes) return false; } +template +NO_DISCARD auto isIn(Particle const& particle, Box const& box) + -> decltype(isIn(particle.iCell, box), bool()) +{ + return isIn(particle.iCell, box); +} + /** This overload of isIn does the same as the one above but takes only * one box. */ -template -NO_DISCARD bool isIn(Point const& point, - Box const& box) +template typename Point, typename Type, std::size_t SIZE> +NO_DISCARD bool isIn(Point const& point, Box const& box) { - auto isIn1D = [](typename Point::value_type pos, typename Point::value_type lower, - typename Point::value_type upper) { return pos >= lower && pos <= upper; }; + auto isIn1D = [](auto const pos, auto const lower, auto const upper) { + return pos >= lower && pos <= upper; + }; bool pointInBox = true; - for (auto iDim = 0u; iDim < Point::dimension; ++iDim) - { + for (auto iDim = 0u; iDim < SIZE; ++iDim) pointInBox = pointInBox && isIn1D(point[iDim], box.lower[iDim], box.upper[iDim]); - } if (pointInBox) return pointInBox; @@ -255,6 +267,7 @@ NO_DISCARD bool isIn(Point const& point, } + template Box grow(Box const& box, OType const& size) { @@ -263,6 +276,15 @@ Box grow(Box const& box, OType const& size) return copy; } +template +NO_DISCARD Box shift(Box const& box, Shifter const& offset) +{ + auto copy{box}; + copy.lower += offset; + copy.upper += offset; + return copy; +} + template NO_DISCARD Box emptyBox() { @@ -283,7 +305,6 @@ auto& operator<<(std::ostream& os, Box const& box) } - } // namespace PHARE::core #endif diff --git a/src/core/utilities/cellmap.hpp b/src/core/utilities/cellmap.hpp index c50f48d5d..86ab52a48 100644 --- a/src/core/utilities/cellmap.hpp +++ b/src/core/utilities/cellmap.hpp @@ -170,7 +170,7 @@ class CellMap // erase all items indexed in the given range from both the cellmap and the // array the range is for. template - void erase(Range&& range); + void erase(Range range); // erase items indexes from the cellmap @@ -448,7 +448,7 @@ inline auto CellMap::partition(Range range, Predicate&& pred, } } - return makeRange(range.array(), range.ibegin(), range.ibegin() + pivot); + return makeRange(range.array(), range.ibegin(), pivot); } @@ -456,7 +456,7 @@ inline auto CellMap::partition(Range range, Predicate&& pred, template template -inline void CellMap::erase(Range&& range) +inline void CellMap::erase(Range range) { auto& items = range.array(); diff --git a/src/core/utilities/point/point.hpp b/src/core/utilities/point/point.hpp index 367636b49..f812bc1f7 100644 --- a/src/core/utilities/point/point.hpp +++ b/src/core/utilities/point/point.hpp @@ -130,7 +130,33 @@ namespace core return p; } + auto& operator+=(Type const& value) + { + for (auto iDim = 0u; iDim < dim; ++iDim) + r[iDim] += value; + return *this; + } + template typename Arr, typename T> + auto& operator+=(Arr const& value) + { + for (auto iDim = 0u; iDim < dim; ++iDim) + r[iDim] += value[iDim]; + return *this; + } + auto& operator-=(Type const& value) + { + for (auto iDim = 0u; iDim < dim; ++iDim) + r[iDim] -= value; + return *this; + } + template typename Arr, typename T> + auto& operator-=(Arr const& value) + { + for (auto iDim = 0u; iDim < dim; ++iDim) + r[iDim] -= value[iDim]; + return *this; + } auto operator+(Type const& value) const { @@ -165,6 +191,22 @@ namespace core } auto operator-(Point const& value) const { return (*this) - value.r; } + auto operator*(Type const& value) const + { + auto copy = *this; + for (auto iDim = 0u; iDim < dim; ++iDim) + copy[iDim] *= value; + return copy; + } + auto operator*(std::array const& value) const + { + auto copy = *this; + for (auto iDim = 0u; iDim < dim; ++iDim) + copy[iDim] *= value[iDim]; + return copy; + } + auto operator*(Point const& value) const { return (*this) * value.r; } + NO_DISCARD constexpr auto size() const { return dim; } NO_DISCARD auto begin() { return r.begin(); } @@ -174,6 +216,17 @@ namespace core NO_DISCARD auto& operator*() const { return r; } + auto as_unsigned() const + { + for (auto iDim = 0u; iDim < dim; ++iDim) + if (r[iDim] < 0) + throw std::runtime_error("Cannot make unsigned from negative values"); + + if constexpr (sizeof(Type) == 4) + return Point{this->template toArray()}; + // else no return cause not yet handled + } + private: std::array r{}; }; diff --git a/src/core/utilities/types.hpp b/src/core/utilities/types.hpp index f960ee036..8629e1e5e 100644 --- a/src/core/utilities/types.hpp +++ b/src/core/utilities/types.hpp @@ -494,6 +494,22 @@ auto make_named_tuple(Pairs&&... pairs) return std::make_tuple(pairs...); } + + +template +struct Equals +{ + void operator()(auto& d0) { d = d0; } + D& d; +}; + +template +struct PlusEquals +{ + void operator()(auto& d0) { d += d0; } + D& d; +}; + } // namespace PHARE::core diff --git a/src/diagnostic/detail/h5writer.hpp b/src/diagnostic/detail/h5writer.hpp index 378c8e901..9e636e0b4 100644 --- a/src/diagnostic/detail/h5writer.hpp +++ b/src/diagnostic/detail/h5writer.hpp @@ -166,6 +166,7 @@ class H5Writer } auto& modelView() { return modelView_; } + auto timestamp() const { return timestamp_; } std::size_t minLevel = 0, maxLevel = 10; // TODO hard-coded to be parametrized somehow HiFile::AccessMode flags; diff --git a/src/diagnostic/detail/types/fluid.hpp b/src/diagnostic/detail/types/fluid.hpp index 7221faa82..733dc0c79 100644 --- a/src/diagnostic/detail/types/fluid.hpp +++ b/src/diagnostic/detail/types/fluid.hpp @@ -75,65 +75,57 @@ void FluidDiagnosticWriter::compute(DiagnosticProperties& diagnostic) { core::MomentumTensorInterpolator interpolator; - auto& h5Writer = this->h5Writer_; - auto& modelView = h5Writer.modelView(); - auto& ions = modelView.getIons(); - auto minLvl = this->h5Writer_.minLevel; - auto maxLvl = this->h5Writer_.maxLevel; + auto& h5Writer = this->h5Writer_; + auto& modelView = h5Writer.modelView(); + auto& ions = modelView.getIons(); + auto const minLvl = this->h5Writer_.minLevel; + auto const maxLvl = this->h5Writer_.maxLevel; // compute the momentum tensor for each population that requires it // compute for all ions but that requires the computation of all pop - std::string tree{"/ions/"}; - if (isActiveDiag(diagnostic, tree, "momentum_tensor")) + + // dumps occur after the last substep but before the next first substep + // at this time, levelGhostPartsNew is emptied and not yet filled + // and the former levelGhostPartsNew has been moved to levelGhostPartsOld + + auto const fill_schedules = [&](auto& lvl) { + for (std::size_t i = 0; i < ions.size(); ++i) + modelView.fillPopMomTensor(lvl, h5Writer.timestamp(), i); + }; + + auto const interpolate_pop = [&](auto& pop, auto& layout, auto&&...) { + auto& pop_momentum_tensor = pop.momentumTensor(); + pop_momentum_tensor.zero(); + interpolator(pop.domainParticles(), pop_momentum_tensor, layout, pop.mass()); + interpolator(pop.levelGhostParticlesOld(), pop_momentum_tensor, layout, pop.mass()); + }; + + if (isActiveDiag(diagnostic, "/ions/", "momentum_tensor")) { - auto computeMomentumTensor - = [&](GridLayout& layout, std::string patchID, std::size_t iLvel) { - for (auto& pop : ions) - { - std::string tree{"/ions/pop/" + pop.name() + "/"}; - auto& pop_momentum_tensor = pop.momentumTensor(); - pop_momentum_tensor.zero(); - auto domainParts = core::makeIndexRange(pop.domainParticles()); - auto patchGhostParts = core::makeIndexRange(pop.patchGhostParticles()); - - // dumps occur after the last substep but before the next first substep - // at this time, levelGhostPartsNew is emptied and not yet filled - // and the former levelGhostPartsNew has been moved to levelGhostPartsOld - auto levelGhostParts = core::makeIndexRange(pop.levelGhostParticlesOld()); - - interpolator(domainParts, pop_momentum_tensor, layout, pop.mass()); - interpolator(patchGhostParts, pop_momentum_tensor, layout, pop.mass()); - interpolator(levelGhostParts, pop_momentum_tensor, layout, pop.mass()); - } - ions.computeFullMomentumTensor(); - }; - modelView.visitHierarchy(computeMomentumTensor, minLvl, maxLvl); + auto const interpolate = [&](auto& layout, auto&&...) { + for (auto& pop : ions) + interpolate_pop(pop, layout); + }; + modelView.visitHierarchy(interpolate, minLvl, maxLvl); + + modelView.onLevels(fill_schedules, minLvl, maxLvl); + + modelView.visitHierarchy( // + [&](auto&&...) { ions.computeFullMomentumTensor(); }, minLvl, maxLvl); } else // if not computing total momentum tensor, user may want to compute it for some pop { for (auto& pop : ions) { - std::string tree{"/ions/pop/" + pop.name() + "/"}; + std::string const tree{"/ions/pop/" + pop.name() + "/"}; - auto computePopMomentumTensor - = [&](GridLayout& layout, std::string patchID, std::size_t iLvel) { - auto& pop_momentum_tensor = pop.momentumTensor(); - pop_momentum_tensor.zero(); - auto domainParts = core::makeIndexRange(pop.domainParticles()); - auto patchGhostParts = core::makeIndexRange(pop.patchGhostParticles()); - - // dumps occur after the last substep but before the next first substep - // at this time, levelGhostPartsNew is emptied and not yet filled - // and the former levelGhostPartsNew has been moved to levelGhostPartsOld - auto levelGhostParts = core::makeIndexRange(pop.levelGhostParticlesOld()); - - interpolator(domainParts, pop_momentum_tensor, layout, pop.mass()); - interpolator(patchGhostParts, pop_momentum_tensor, layout, pop.mass()); - interpolator(levelGhostParts, pop_momentum_tensor, layout, pop.mass()); - }; - if (isActiveDiag(diagnostic, tree, "momentum_tensor")) - { - modelView.visitHierarchy(computePopMomentumTensor, minLvl, maxLvl); - } + if (!isActiveDiag(diagnostic, tree, "momentum_tensor")) + continue; + + auto const interpolate = [&](auto& layout, auto&&...) { interpolate_pop(pop, layout); }; + + modelView.visitHierarchy(interpolate, minLvl, maxLvl); + + modelView.onLevels(fill_schedules, minLvl, maxLvl); } } } diff --git a/src/diagnostic/detail/types/particle.hpp b/src/diagnostic/detail/types/particle.hpp index 72555f7b6..da1f089fa 100644 --- a/src/diagnostic/detail/types/particle.hpp +++ b/src/diagnostic/detail/types/particle.hpp @@ -22,7 +22,6 @@ namespace PHARE::diagnostic::h5 * * /t#/pl#/p#/ions/pop_(1,2,...)/domain/(weight, charge, iCell, delta, v) * /t#/pl#/p#/ions/pop_(1,2,...)/levelGhost/(weight, charge, iCell, delta, v) - * /t#/pl#/p#/ions/pop_(1,2,...)/patchGhost/(weight, charge, iCell, delta, v) */ template class ParticlesDiagnosticWriter : public H5TypeWriter @@ -71,7 +70,7 @@ void ParticlesDiagnosticWriter::createFiles(DiagnosticProperties& diag for (auto const& pop : this->h5Writer_.modelView().getIons()) { std::string tree{"/ions/pop/" + pop.name() + "/"}; - checkCreateFileFor_(diagnostic, fileData_, tree, "domain", "levelGhost", "patchGhost"); + checkCreateFileFor_(diagnostic, fileData_, tree, "domain", "levelGhost"); } } @@ -102,7 +101,6 @@ void ParticlesDiagnosticWriter::getDataSetInfo(DiagnosticProperties& d auto& popAttr = patchAttributes[lvlPatchID][pop.name()]; checkInfo(tree, "domain", popAttr, pop.domainParticles()); checkInfo(tree, "levelGhost", popAttr, pop.levelGhostParticles()); - checkInfo(tree, "patchGhost", popAttr, pop.patchGhostParticles()); } } @@ -155,7 +153,6 @@ void ParticlesDiagnosticWriter::initDataSets( std::string tree{"/ions/pop/" + pop.name() + "/"}; initIfActive(lvl, tree, attr, pop.name(), patchID, "domain"); initIfActive(lvl, tree, attr, pop.name(), patchID, "levelGhost"); - initIfActive(lvl, tree, attr, pop.name(), patchID, "patchGhost"); } }; @@ -180,7 +177,6 @@ void ParticlesDiagnosticWriter::write(DiagnosticProperties& diagnostic std::string tree{"/ions/pop/" + pop.name() + "/"}; checkWrite(tree, "domain", pop.domainParticles()); checkWrite(tree, "levelGhost", pop.levelGhostParticles()); - checkWrite(tree, "patchGhost", pop.patchGhostParticles()); } } @@ -205,7 +201,6 @@ void ParticlesDiagnosticWriter::writeAttributes( std::string tree = "/ions/pop/" + pop.name() + "/"; checkWrite(tree, "domain", pop); checkWrite(tree, "levelGhost", pop); - checkWrite(tree, "patchGhost", pop); } writeAttributes_(diagnostic, h5file, fileAttributes, patchAttributes, maxLevel); diff --git a/src/diagnostic/diagnostic_model_view.hpp b/src/diagnostic/diagnostic_model_view.hpp index 0431b49f2..e8166e75f 100644 --- a/src/diagnostic/diagnostic_model_view.hpp +++ b/src/diagnostic/diagnostic_model_view.hpp @@ -4,11 +4,15 @@ #include "core/def.hpp" #include "core/utilities/mpi_utils.hpp" -#include "amr/physical_models/hybrid_model.hpp" #include "amr/physical_models/mhd_model.hpp" +#include "amr/physical_models/hybrid_model.hpp" +#include "amr/messengers/field_sum_transaction.hpp" +#include "amr/data/field/field_variable_fill_pattern.hpp" #include "cppdict/include/dict.hpp" +#include + #include namespace PHARE::diagnostic @@ -26,8 +30,16 @@ template class BaseModelView : public IModelView { public: - using GridLayout = Model::gridlayout_type; - using VecField = Model::vecfield_type; + using GridLayout = Model::gridlayout_type; + using VecField = Model::vecfield_type; + using TensorFieldT = Model::ions_type::tensorfield_type; + using GridLayoutT = Model::gridlayout_type; + using ResMan = Model::resources_manager_type; + using FieldData_t = ResMan::UserField_t::patch_data_type; + static constexpr auto dimension = Model::dimension; + + +public: using PatchProperties = cppdict::Dict, std::vector, std::vector, std::vector, std::string, @@ -37,8 +49,47 @@ class BaseModelView : public IModelView : model_{model} , hierarchy_{hierarchy} { + declareMomentumTensorAlgos(); } + NO_DISCARD std::vector getElectromagFields() const + { + return {&model_.state.electromag.B, &model_.state.electromag.E}; + } + + NO_DISCARD auto& getIons() const { return model_.state.ions; } + + void fillPopMomTensor(auto& lvl, auto const time, auto const popidx) + { + using value_type = TensorFieldT::value_type; + auto constexpr N = core::detail::tensor_field_dim_from_rank<2>(); + + auto& rm = *model_.resourcesManager; + auto& ions = model_.state.ions; + + for (auto patch : rm.enumerate(lvl, ions, sumTensor_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(sumTensor_[c].data(), ions[popidx].momentumTensor()[c].data(), + ions[popidx].momentumTensor()[c].size() * sizeof(value_type)); + + MTAlgos[popidx].getOrCreateSchedule(hierarchy_, lvl.getLevelNumber()).fillData(time); + + for (auto patch : rm.enumerate(lvl, ions, sumTensor_)) + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(ions[popidx].momentumTensor()[c].data(), sumTensor_[c].data(), + ions[popidx].momentumTensor()[c].size() * sizeof(value_type)); + } + + + template + void onLevels(Action&& action, int minlvl = 0, int maxlvl = 0) + { + for (int ilvl = minlvl; ilvl < hierarchy_.getNumberOfLevels() && ilvl <= maxlvl; ++ilvl) + if (auto lvl = hierarchy_.getPatchLevel(ilvl)) + action(*lvl); + } + + template void visitHierarchy(Action&& action, int minLevel = 0, int maxLevel = 0) { @@ -93,6 +144,50 @@ class BaseModelView : public IModelView protected: Model& model_; Hierarchy& hierarchy_; + + void declareMomentumTensorAlgos() + { + auto& rm = *model_.resourcesManager; + + auto const dst_names = sumTensor_.componentNames(); + + for (auto& pop : model_.state.ions) + { + auto& MTAlgo = MTAlgos.emplace_back(); + auto const src_names = pop.momentumTensor().componentNames(); + + for (std::size_t i = 0; i < dst_names.size(); ++i) + { + auto&& [idDst, idSrc] = rm.getIDsList(dst_names[i], src_names[i]); + MTAlgo.MTalgo->registerRefine( + idDst, idSrc, idDst, nullptr, + std::make_shared>()); + } + } + + // can't create schedules here as the hierarchy has no levels yet + } + + struct MTAlgo + { + auto& getOrCreateSchedule(auto& hierarchy, int const ilvl) + { + if (not MTschedules.count(ilvl)) + MTschedules.try_emplace( + ilvl, + MTalgo->createSchedule( + hierarchy.getPatchLevel(ilvl), 0, + std::make_shared>())); + return *MTschedules[ilvl]; + } + + std::unique_ptr MTalgo + = std::make_unique(); + std::map> MTschedules; + }; + + std::vector MTAlgos; + TensorFieldT sumTensor_{"PHARE_sumTensor", core::HybridQuantity::Tensor::M}; }; diff --git a/src/python3/patch_level.hpp b/src/python3/patch_level.hpp index 7ef151e72..40a672ed1 100644 --- a/src/python3/patch_level.hpp +++ b/src/python3/patch_level.hpp @@ -286,7 +286,6 @@ class __attribute__((visibility("hidden"))) PatchLevel auto& inner = pop_particles.at(pop.name()); getParticleData(inner, grid, patchID, "domain", pop.domainParticles()); - getParticleData(inner, grid, patchID, "patchGhost", pop.patchGhostParticles()); getParticleData(inner, grid, patchID, "levelGhost", pop.levelGhostParticles()); } } diff --git a/tests/amr/data/field/refine/test_field_refine.cpp b/tests/amr/data/field/refine/test_field_refine.cpp index 836616d56..7fc48a6ad 100644 --- a/tests/amr/data/field/refine/test_field_refine.cpp +++ b/tests/amr/data/field/refine/test_field_refine.cpp @@ -1,24 +1,22 @@ #include "core/def/phare_mpi.hpp" -#include -#include - -#include "gmock/gmock.h" -#include "gtest/gtest.h" - +#include "core/data/grid/gridlayout.hpp" +#include #include "amr/data/field/refine/field_linear_refine.hpp" #include "amr/data/field/refine/field_refine_operator.hpp" #include "amr/data/field/refine/field_refiner.hpp" -#include "core/data/grid/gridlayout.hpp" #include "test_field_refinement_on_hierarchy.hpp" +#include +#include + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + -#include -#include -#include using namespace PHARE::core; using namespace PHARE::amr; diff --git a/tests/amr/data/particles/copy/test_particledata_copyNd.cpp b/tests/amr/data/particles/copy/test_particledata_copyNd.cpp index 2e0b3531f..951eaf775 100644 --- a/tests/amr/data/particles/copy/test_particledata_copyNd.cpp +++ b/tests/amr/data/particles/copy/test_particledata_copyNd.cpp @@ -62,22 +62,6 @@ TYPED_TEST_SUITE(AParticlesDataND, WithAllDim); -TYPED_TEST(AParticlesDataND, copiesSourceDomainParticleIntoGhostForDomainSrcOverGhostDest) -{ - static constexpr auto dim = TypeParam{}(); - - // particle is in the domain of the source patchdata - // and in first ghost of the destination patchdata - - this->particle.iCell = ConstArray(6); - - this->sourceData.domainParticles.push_back(this->particle); - this->destData.copy(this->sourceData); - - ASSERT_THAT(this->destData.patchGhostParticles.size(), Eq(1)); - ASSERT_THAT(this->destData.domainParticles.size(), Eq(0)); -} - TYPED_TEST(AParticlesDataND, copiesSourceDomainParticleIntoDomainDestForDomainOverlapCells) { @@ -125,17 +109,32 @@ TYPED_TEST(AParticlesDataND, PreservesAllParticleAttributesAfterCopy) // particle is in the domain of the source patchdata // and in last ghost of the destination patchdata + this->sourceData.domainParticles.clear(); + this->destData.domainParticles.clear(); + + EXPECT_THAT(this->sourceData.domainParticles.size(), Eq(0)); + EXPECT_THAT(this->destData.domainParticles.size(), Eq(0)); + EXPECT_THAT(this->sourceData.patchGhostParticles.size(), Eq(0)); + EXPECT_THAT(this->destData.patchGhostParticles.size(), Eq(0)); + + auto const newCell = ConstArray(6); this->particle.iCell = ConstArray(6); + EXPECT_THAT(newCell, Eq(this->particle.iCell)); this->sourceData.domainParticles.push_back(this->particle); + EXPECT_THAT(this->sourceData.domainParticles.size(), Eq(1)); + EXPECT_THAT(this->sourceData.domainParticles[0].iCell, Eq(newCell)); + this->destData.copy(this->sourceData); + EXPECT_THAT(this->destData.domainParticles.size(), Eq(1)); - EXPECT_THAT(this->destData.patchGhostParticles[0].v, Pointwise(DoubleEq(), this->particle.v)); - EXPECT_THAT(this->destData.patchGhostParticles[0].iCell, Eq(this->particle.iCell)); - EXPECT_THAT(this->destData.patchGhostParticles[0].delta, + EXPECT_THAT(this->destData.domainParticles[0].v, Pointwise(DoubleEq(), this->particle.v)); + EXPECT_THAT(this->destData.domainParticles[0].iCell, Eq(newCell)); + EXPECT_THAT(this->destData.domainParticles[0].iCell, Eq(this->particle.iCell)); + EXPECT_THAT(this->destData.domainParticles[0].delta, Pointwise(DoubleEq(), this->particle.delta)); - EXPECT_THAT(this->destData.patchGhostParticles[0].weight, DoubleEq(this->particle.weight)); - EXPECT_THAT(this->destData.patchGhostParticles[0].charge, DoubleEq(this->particle.charge)); + EXPECT_THAT(this->destData.domainParticles[0].weight, DoubleEq(this->particle.weight)); + EXPECT_THAT(this->destData.domainParticles[0].charge, DoubleEq(this->particle.charge)); } @@ -188,8 +187,8 @@ TYPED_TEST(AParticlesDataND, copiesDataWithOverlapNoTransform) this->sourceData.domainParticles.push_back(this->particle); this->destData.copy(this->sourceData, overlap); - EXPECT_THAT(this->destData.patchGhostParticles.size(), Eq(1)); - EXPECT_THAT(this->destData.domainParticles.size(), Eq(0)); + EXPECT_THAT(this->destData.patchGhostParticles.size(), Eq(0)); + EXPECT_THAT(this->destData.domainParticles.size(), Eq(1)); this->sourceData.domainParticles.clear(); this->sourceData.patchGhostParticles.clear(); @@ -241,8 +240,6 @@ TYPED_TEST(AParticlesDataND, copiesDataWithOverlapWithTransform) EXPECT_EQ(5, this->destData.domainParticles[0].iCell[0]); this->sourceData.domainParticles.clear(); - this->sourceData.patchGhostParticles.clear(); - this->destData.patchGhostParticles.clear(); this->destData.domainParticles.clear(); // particle is in the domain of the source patchdata @@ -253,13 +250,11 @@ TYPED_TEST(AParticlesDataND, copiesDataWithOverlapWithTransform) this->sourceData.domainParticles.push_back(this->particle); this->destData.copy(this->sourceData, overlap); - EXPECT_THAT(this->destData.patchGhostParticles.size(), Eq(1)); - EXPECT_THAT(this->destData.domainParticles.size(), Eq(0)); - EXPECT_EQ(6, this->destData.patchGhostParticles[0].iCell[0]); + EXPECT_THAT(this->destData.patchGhostParticles.size(), Eq(0)); + EXPECT_THAT(this->destData.domainParticles.size(), Eq(1)); + EXPECT_EQ(6, this->destData.domainParticles[0].iCell[0]); this->sourceData.domainParticles.clear(); - this->sourceData.patchGhostParticles.clear(); - this->destData.patchGhostParticles.clear(); this->destData.domainParticles.clear(); // particle is in the domain of the source patchdata diff --git a/tests/amr/data/particles/copy_overlap/test_particledata_copy_periodicNd.cpp b/tests/amr/data/particles/copy_overlap/test_particledata_copy_periodicNd.cpp index e078c7940..0318a0993 100644 --- a/tests/amr/data/particles/copy_overlap/test_particledata_copy_periodicNd.cpp +++ b/tests/amr/data/particles/copy_overlap/test_particledata_copy_periodicNd.cpp @@ -111,8 +111,8 @@ TYPED_TEST(twoParticlesDataNDTouchingPeriodicBorders, this->sourcePdat.domainParticles.push_back(this->particle); this->destPdat.copy(this->sourcePdat, *(this->cellOverlap)); - EXPECT_THAT(this->destPdat.patchGhostParticles.size(), Eq(1)); - EXPECT_EQ(leftDestGhostCell, this->destPdat.patchGhostParticles[0].iCell[0]); + EXPECT_THAT(this->destPdat.domainParticles.size(), Eq(1)); + EXPECT_EQ(leftDestGhostCell, this->destPdat.domainParticles[0].iCell[0]); } @@ -125,20 +125,20 @@ TYPED_TEST(twoParticlesDataNDTouchingPeriodicBorders, preserveParticleAttributes this->sourcePdat.domainParticles.push_back(this->particle); this->destPdat.copy(this->sourcePdat, *(this->cellOverlap)); - EXPECT_THAT(this->destPdat.patchGhostParticles.size(), Eq(1)); - EXPECT_THAT(this->destPdat.patchGhostParticles[0].v, Eq(this->particle.v)); - EXPECT_THAT(this->destPdat.patchGhostParticles[0].iCell[0], Eq(-1)); + EXPECT_THAT(this->destPdat.domainParticles.size(), Eq(1)); + EXPECT_THAT(this->destPdat.domainParticles[0].v, Eq(this->particle.v)); + EXPECT_THAT(this->destPdat.domainParticles[0].iCell[0], Eq(-1)); if constexpr (dim > 1) { - EXPECT_THAT(this->destPdat.patchGhostParticles[0].iCell[1], Eq(-1)); + EXPECT_THAT(this->destPdat.domainParticles[0].iCell[1], Eq(-1)); } if constexpr (dim > 2) { - EXPECT_THAT(this->destPdat.patchGhostParticles[0].iCell[2], Eq(-1)); + EXPECT_THAT(this->destPdat.domainParticles[0].iCell[2], Eq(-1)); } - EXPECT_THAT(this->destPdat.patchGhostParticles[0].delta, Eq(this->particle.delta)); - EXPECT_THAT(this->destPdat.patchGhostParticles[0].weight, Eq(this->particle.weight)); - EXPECT_THAT(this->destPdat.patchGhostParticles[0].charge, Eq(this->particle.charge)); + EXPECT_THAT(this->destPdat.domainParticles[0].delta, Eq(this->particle.delta)); + EXPECT_THAT(this->destPdat.domainParticles[0].weight, Eq(this->particle.weight)); + EXPECT_THAT(this->destPdat.domainParticles[0].charge, Eq(this->particle.charge)); } diff --git a/tests/amr/data/particles/stream_pack/test_main.cpp b/tests/amr/data/particles/stream_pack/test_main.cpp index 77718194e..38db1b8a5 100644 --- a/tests/amr/data/particles/stream_pack/test_main.cpp +++ b/tests/amr/data/particles/stream_pack/test_main.cpp @@ -119,8 +119,8 @@ TYPED_TEST(StreamPackTest, PreserveVelocityWhenPackStreamWithPeriodics) destData.unpackStream(particlesReadStream, *cellOverlap); - ASSERT_THAT(destData.patchGhostParticles.size(), Eq(1)); - ASSERT_THAT(destData.patchGhostParticles[0].v, Eq(particle.v)); + ASSERT_THAT(destData.domainParticles.size(), Eq(1)); + ASSERT_THAT(destData.domainParticles[0].v, Eq(particle.v)); } @@ -156,8 +156,8 @@ TYPED_TEST(StreamPackTest, ShiftTheiCellWhenPackStreamWithPeriodics) auto expectediCell = ConstArray(-1); - ASSERT_THAT(destData.patchGhostParticles.size(), Eq(1)); - ASSERT_THAT(destData.patchGhostParticles[0].iCell, Eq(expectediCell)); + ASSERT_THAT(destData.domainParticles.size(), Eq(1)); + ASSERT_THAT(destData.domainParticles[0].iCell, Eq(expectediCell)); } @@ -189,8 +189,8 @@ TYPED_TEST(StreamPackTest, PackInTheCorrectBufferWithPeriodics) auto expectediCell = ConstArray(-1); - ASSERT_THAT(destData.patchGhostParticles.size(), Eq(1)); - ASSERT_THAT(destData.patchGhostParticles[0].iCell, Eq(expectediCell)); + ASSERT_THAT(destData.domainParticles.size(), Eq(1)); + ASSERT_THAT(destData.domainParticles[0].iCell, Eq(expectediCell)); } @@ -260,11 +260,11 @@ TYPED_TEST(StreamPackTest, auto expectediCell = ConstArray(-1); - EXPECT_THAT(destData.patchGhostParticles[0].v, Eq(particle.v)); - EXPECT_THAT(destData.patchGhostParticles[0].iCell, Eq(expectediCell)); - EXPECT_THAT(destData.patchGhostParticles[0].delta, Eq(particle.delta)); - EXPECT_THAT(destData.patchGhostParticles[0].weight, Eq(particle.weight)); - EXPECT_THAT(destData.patchGhostParticles[0].charge, Eq(particle.charge)); + EXPECT_THAT(destData.domainParticles[0].v, Eq(particle.v)); + EXPECT_THAT(destData.domainParticles[0].iCell, Eq(expectediCell)); + EXPECT_THAT(destData.domainParticles[0].delta, Eq(particle.delta)); + EXPECT_THAT(destData.domainParticles[0].weight, Eq(particle.weight)); + EXPECT_THAT(destData.domainParticles[0].charge, Eq(particle.charge)); } diff --git a/tests/amr/messengers/test_messengers.cpp b/tests/amr/messengers/test_messengers.cpp index d9ba9eefd..e6d858580 100644 --- a/tests/amr/messengers/test_messengers.cpp +++ b/tests/amr/messengers/test_messengers.cpp @@ -260,16 +260,12 @@ class HybridMessengers : public ::testing::Test auto hybridModel = std::make_unique(createDict(), resourcesManagerHybrid); auto mhdModel = std::make_unique(resourcesManagerMHD); - hybridModel->resourcesManager->registerResources(hybridModel->state.electromag); - hybridModel->resourcesManager->registerResources(hybridModel->state.ions); - - mhdModel->resourcesManager->registerResources(mhdModel->state.B); - mhdModel->resourcesManager->registerResources(mhdModel->state.V); + hybridModel->resourcesManager->registerResources(hybridModel->state); + mhdModel->resourcesManager->registerResources(mhdModel->state); models.push_back(std::move(mhdModel)); models.push_back(std::move(hybridModel)); - auto mhdmhdMessenger{ messengerFactory.create("MHDModel-MHDModel", *models[0], *models[0], 0)}; auto mhdHybridMessenger{ @@ -308,6 +304,7 @@ TEST_F(HybridMessengers, receiveQuantitiesFromHybridModelsOnlyAndHybridSolver) { auto hybridSolver = std::make_unique>( createDict()["simulation"]["algo"]); + hybridSolver->registerResources(*models[1]); MessengerRegistration::registerQuantities(*messengers[2], *models[1], *models[1], *hybridSolver); } @@ -477,7 +474,6 @@ struct AfullHybridBasicHierarchy std::make_shared>(std::move(hybhybStrat))}; std::shared_ptr> solver{ - std::make_shared>( createDict()["simulation"]["algo"])}; diff --git a/tests/core/numerics/ion_updater/test_updater.cpp b/tests/core/numerics/ion_updater/test_updater.cpp index 09013e517..e41c42ab8 100644 --- a/tests/core/numerics/ion_updater/test_updater.cpp +++ b/tests/core/numerics/ion_updater/test_updater.cpp @@ -369,6 +369,7 @@ struct IonUpdaterTest : public ::testing::Test using ParticleInitializerFactory = typename PHARETypes::ParticleInitializerFactory; using IonUpdater = typename PHARE::core::IonUpdater; + using Boxing_t = PHARE::core::UpdaterSelectionBoxing; double dt{0.01}; @@ -376,6 +377,8 @@ struct IonUpdaterTest : public ::testing::Test // grid configuration std::array ncells; GridLayout layout; + // assumes no level ghost cells + Boxing_t const boxing{layout, {grow(layout.AMRBox(), GridLayout::nbrParticleGhosts())}}; // data for electromagnetic fields @@ -493,7 +496,6 @@ struct IonUpdaterTest : public ::testing::Test } - std::copy(std::begin(levelGhostPartOld), std::end(levelGhostPartOld), std::back_inserter(levelGhostPartNew)); @@ -502,39 +504,15 @@ struct IonUpdaterTest : public ::testing::Test std::back_inserter(levelGhostPart)); - // now let's create patchGhostParticles on the right of the domain - // by copying those on the last cell - - - for (auto const& part : domainPart) - { - if constexpr (interp_order == 2 or interp_order == 3) - { - if (part.iCell[0] == lastAMRCell[0] or part.iCell[0] == lastAMRCell[0] - 1) - { - auto p{part}; - p.iCell[0] += 2; - patchGhostPart.push_back(p); - } - } - else if constexpr (interp_order == 1) - { - if (part.iCell[0] == lastAMRCell[0]) - { - auto p{part}; - p.iCell[0] += 1; - patchGhostPart.push_back(p); - } - } - } + EXPECT_GT(pop.domainParticles().size(), 0ull); + EXPECT_GT(levelGhostPartOld.size(), 0ull); + EXPECT_EQ(patchGhostPart.size(), 0); } // end 1D } // end pop loop PHARE::core::depositParticles(ions, layout, Interpolator{}, PHARE::core::DomainDeposit{}); - PHARE::core::depositParticles(ions, layout, Interpolator{}, - PHARE::core::PatchGhostDeposit{}); PHARE::core::depositParticles(ions, layout, Interpolator{}, PHARE::core::LevelGhostDeposit{}); @@ -553,9 +531,6 @@ struct IonUpdaterTest : public ::testing::Test for (auto& pop : this->ions) { - interpolate(makeIndexRange(pop.patchGhostParticles()), pop.particleDensity(), - pop.chargeDensity(), pop.flux(), layout); - double alpha = 0.5; interpolate(makeIndexRange(pop.levelGhostParticlesNew()), pop.particleDensity(), pop.chargeDensity(), pop.flux(), layout, @@ -698,7 +673,7 @@ TYPED_TEST(IonUpdaterTest, loadsDomainPatchAndLevelGhostParticles) { auto check = [this](std::size_t nbrGhostCells, auto& pop) { EXPECT_EQ(this->layout.nbrCells()[0] * nbrPartPerCell, pop.domainParticles().size()); - EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.patchGhostParticles().size()); + EXPECT_EQ(0, pop.patchGhostParticles().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticlesOld().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticlesNew().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticles().size()); @@ -724,39 +699,6 @@ TYPED_TEST(IonUpdaterTest, loadsDomainPatchAndLevelGhostParticles) -TYPED_TEST(IonUpdaterTest, loadsPatchGhostParticlesOnRightGhostArea) -{ - int lastPhysCell = this->layout.physicalEndIndex(QtyCentering::dual, Direction::X); - auto lastAMRCell = this->layout.localToAMR(Point{lastPhysCell}); - - if constexpr (TypeParam::dimension == 1) - { - for (auto& pop : this->ions) - { - if constexpr (TypeParam::interp_order == 1) - { - for (auto const& part : pop.patchGhostParticles()) - { - EXPECT_EQ(lastAMRCell[0] + 1, part.iCell[0]); - } - } - else if constexpr (TypeParam::interp_order == 2 or TypeParam::interp_order == 3) - { - typename IonUpdaterTest::ParticleArray copy{pop.patchGhostParticles()}; - auto firstInOuterMostCell = std::partition( - std::begin(copy), std::end(copy), [&lastAMRCell](auto const& particle) { - return particle.iCell[0] == lastAMRCell[0] + 1; - }); - EXPECT_EQ(nbrPartPerCell, std::distance(std::begin(copy), firstInOuterMostCell)); - EXPECT_EQ(nbrPartPerCell, std::distance(firstInOuterMostCell, std::end(copy))); - } - } - } -} - - - - TYPED_TEST(IonUpdaterTest, loadsLevelGhostParticlesOnLeftGhostArea) { int firstPhysCell = this->layout.physicalStartIndex(QtyCentering::dual, Direction::X); @@ -801,7 +743,7 @@ TYPED_TEST(IonUpdaterTest, particlesUntouchedInMomentOnlyMode) IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); @@ -847,7 +789,7 @@ TYPED_TEST(IonUpdaterTest, particlesUntouchedInMomentOnlyMode) // // IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; // -// ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, +// ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, // UpdaterMode::particles_and_moments); // // this->fillIonsMomentsGhosts(); @@ -872,7 +814,7 @@ TYPED_TEST(IonUpdaterTest, momentsAreChangedInParticlesAndMomentsMode) IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, UpdaterMode::all); + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::all); this->fillIonsMomentsGhosts(); @@ -892,7 +834,7 @@ TYPED_TEST(IonUpdaterTest, momentsAreChangedInMomentsOnlyMode) IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); @@ -910,7 +852,7 @@ TYPED_TEST(IonUpdaterTest, thatNoNaNsExistOnPhysicalNodesMoments) typename IonUpdaterTest::IonUpdater ionUpdater{ init_dict["simulation"]["algo"]["ion_updater"]}; - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); diff --git a/tests/diagnostic/__init__.py b/tests/diagnostic/__init__.py index 421fa0dda..affb4b60f 100644 --- a/tests/diagnostic/__init__.py +++ b/tests/diagnostic/__init__.py @@ -47,7 +47,7 @@ def dump_all_diags(pops=[], flush_every=100, timestamps=None): population_name=pop, ) - for quantity in ["domain", "levelGhost", "patchGhost"]: + for quantity in ["domain", "levelGhost"]: ph.ParticleDiagnostics( quantity=quantity, write_timestamps=timestamps, diff --git a/tests/diagnostic/test_diagnostics.hpp b/tests/diagnostic/test_diagnostics.hpp index dcd2b1159..216d64e27 100644 --- a/tests/diagnostic/test_diagnostics.hpp +++ b/tests/diagnostic/test_diagnostics.hpp @@ -226,7 +226,7 @@ void validateAttributes(Simulator& sim, Hi5Diagnostic& hi5) using GridLayout = typename Simulator::PHARETypes::GridLayout_t; constexpr auto dimension = Simulator::dimension; constexpr std::size_t expectedPopNbr = 2; - constexpr std::size_t expectedPopAttrFiles = 6; + constexpr std::size_t expectedPopAttrFiles = 5; std::string const ionsPopPath = "/ions/pop/"; @@ -246,7 +246,6 @@ void validateAttributes(Simulator& sim, Hi5Diagnostic& hi5) h5FileTypes.emplace_back(ionsPopPath + popName + "/domain"); h5FileTypes.emplace_back(ionsPopPath + popName + "/levelGhost"); - h5FileTypes.emplace_back(ionsPopPath + popName + "/patchGhost"); h5FileTypes.emplace_back(ionsPopPath + popName + "/density"); h5FileTypes.emplace_back(ionsPopPath + popName + "/charge_density"); h5FileTypes.emplace_back(ionsPopPath + popName + "/flux"); diff --git a/tests/diagnostic/test_diagnostics.ipp b/tests/diagnostic/test_diagnostics.ipp index e108c465b..6131d26e5 100644 --- a/tests/diagnostic/test_diagnostics.ipp +++ b/tests/diagnostic/test_diagnostics.ipp @@ -64,10 +64,8 @@ void particles_test(Simulator&& sim, std::string out_dir) Hi5Diagnostic hi5{hierarchy, hybridModel, out_dir, NEW_HI5_FILE}; hi5.dMan.addDiagDict(hi5.particles("/ions/pop/alpha/domain")) .addDiagDict(hi5.particles("/ions/pop/alpha/levelGhost")) - .addDiagDict(hi5.particles("/ions/pop/alpha/patchGhost")) .addDiagDict(hi5.particles("/ions/pop/protons/domain")) - .addDiagDict(hi5.particles("/ions/pop/protons/levelGhost")) - .addDiagDict(hi5.particles("/ions/pop/protons/patchGhost")); + .addDiagDict(hi5.particles("/ions/pop/protons/levelGhost")); hi5.dump(); } diff --git a/tests/functional/conservation/conserv.py b/tests/functional/conservation/conserv.py index 49f526410..ca06a2941 100644 --- a/tests/functional/conservation/conserv.py +++ b/tests/functional/conservation/conserv.py @@ -84,7 +84,7 @@ def vthz(x): for quantity in ["B"]: ph.ElectromagDiagnostics(quantity=quantity, write_timestamps=timestamps) - for name in ["domain", "levelGhost", "patchGhost"]: + for name in ["domain", "levelGhost"]: ph.ParticleDiagnostics( quantity=name, write_timestamps=timestamps, diff --git a/tests/simulator/advance/test_particles_advance_1d.py b/tests/simulator/advance/test_particles_advance_1d.py index d2d14e6ba..9ca3e68f2 100644 --- a/tests/simulator/advance/test_particles_advance_1d.py +++ b/tests/simulator/advance/test_particles_advance_1d.py @@ -23,19 +23,6 @@ def per_interp(dic): @ddt class AdvanceTest(AdvanceTestBase): - @data( - *per_interp({}), - *per_interp({"L0": [Box1D(10, 20)]}), - *per_interp({"L0": [Box1D(2, 12), Box1D(13, 25)]}), - ) - @unpack - def test_overlapped_particledatas_have_identical_particles( - self, interp_order, refinement_boxes - ): - self._test_overlapped_particledatas_have_identical_particles( - ndim, interp_order, refinement_boxes - ) - @data(*interp_orders) def test_L0_particle_number_conservation(self, interp): self._test_L0_particle_number_conservation(ndim, interp) diff --git a/tests/simulator/advance/test_particles_advance_2d.py b/tests/simulator/advance/test_particles_advance_2d.py index c82d8eba2..d84070db6 100644 --- a/tests/simulator/advance/test_particles_advance_2d.py +++ b/tests/simulator/advance/test_particles_advance_2d.py @@ -24,24 +24,6 @@ def per_interp(dic): @ddt class AdvanceTest(AdvanceTestBase): - @data( - *per_interp({}), - *per_interp({"L0": [Box2D(10, 19)]}), - *per_interp({"L0": [Box2D(5, 9), Box2D(10, 14)]}), - ) - @unpack - def test_overlapped_particledatas_have_identical_particles( - self, interp_order, refinement_boxes - ): - self._test_overlapped_particledatas_have_identical_particles( - ndim, - interp_order, - refinement_boxes, - ppc=ppc, - cells=40, - largest_patch_size=20, - ) - @data(*interp_orders) def test_L0_particle_number_conservation(self, interp): self._test_L0_particle_number_conservation(ndim, interp, ppc=ppc) diff --git a/tests/simulator/initialize/density_check.py b/tests/simulator/initialize/density_check.py index bbd54799d..7e474dedb 100644 --- a/tests/simulator/initialize/density_check.py +++ b/tests/simulator/initialize/density_check.py @@ -25,38 +25,42 @@ ncell = 100 dl = 0.2 -L = ncell*dl +L = ncell * dl ts = 0.01 -masses=(2, 3) -charges=(1, 2) - +masses = (2, 3) +charges = (1, 2) def densityMain_1d(x): return 1.0 + def densityBeam_1d(x): - u = x/L-0.5 - return np.exp(-u**2) + u = x / L - 0.5 + return np.exp(-(u**2)) + def bx_1d(x): return 1.0 + def by_1d(x): return 0.0 + def bz_1d(x): return 0.0 + def v0_1d(x): return 0.0 + def vth_1d(x): return np.sqrt(1.0) def config_1d(): - sim = ph.Simulation( smallest_patch_size=20, largest_patch_size=60, @@ -84,8 +88,20 @@ def config_1d(): bx=bx_1d, by=by_1d, bz=bz_1d, - main={"mass": masses[0], "charge": charges[0], "density": densityMain_1d, "nbr_part_per_cell": 1000, **v_pop}, - beam={"mass": masses[1], "charge": charges[1], "density": densityBeam_1d, "nbr_part_per_cell": 1000, **v_pop}, + main={ + "mass": masses[0], + "charge": charges[0], + "density": densityMain_1d, + "nbr_part_per_cell": 1000, + **v_pop, + }, + beam={ + "mass": masses[1], + "charge": charges[1], + "density": densityBeam_1d, + "nbr_part_per_cell": 1000, + **v_pop, + }, ) ph.ElectronModel(closure="isothermal", Te=0.0) @@ -93,10 +109,7 @@ def config_1d(): timestamps = all_timestamps(global_vars.sim) for quantity in ["charge_density", "mass_density"]: - FluidDiagnostics( - quantity=quantity, - write_timestamps=timestamps - ) + FluidDiagnostics(quantity=quantity, write_timestamps=timestamps) poplist = ["main", "beam"] for pop in poplist: @@ -110,35 +123,39 @@ def config_1d(): return sim - def densityMain_2d(x, y): assert len(x) == len(y) - return 1.0*np.ones_like(x) + return 1.0 * np.ones_like(x) + def densityBeam_2d(x, y): assert len(x) == len(y) - u = x/L-0.5 - v = y/L-0.5 - return np.exp(-u**2-v**2) + u = x / L - 0.5 + v = y / L - 0.5 + return np.exp(-(u**2) - v**2) + def bx_2d(x, y): return 1.0 + def by_2d(x, y): return 0.0 + def bz_2d(x, y): return 0.0 + def v0_2d(x, y): return 0.0 + def vth_2d(x, y): return np.sqrt(1.0) def config_2d(): - sim = ph.Simulation( smallest_patch_size=20, largest_patch_size=60, @@ -166,8 +183,20 @@ def config_2d(): bx=bx_2d, by=by_2d, bz=bz_2d, - main={"mass": masses[0], "charge": charges[0], "density": densityMain_2d, "nbr_part_per_cell": 1000, **v_pop}, - beam={"mass": masses[1], "charge": charges[1], "density": densityBeam_2d, "nbr_part_per_cell": 1000, **v_pop}, + main={ + "mass": masses[0], + "charge": charges[0], + "density": densityMain_2d, + "nbr_part_per_cell": 1000, + **v_pop, + }, + beam={ + "mass": masses[1], + "charge": charges[1], + "density": densityBeam_2d, + "nbr_part_per_cell": 1000, + **v_pop, + }, ) ph.ElectronModel(closure="isothermal", Te=0.0) @@ -175,10 +204,7 @@ def config_2d(): timestamps = all_timestamps(global_vars.sim) for quantity in ["charge_density", "mass_density"]: - FluidDiagnostics( - quantity=quantity, - write_timestamps=timestamps - ) + FluidDiagnostics(quantity=quantity, write_timestamps=timestamps) poplist = ["main", "beam"] for pop in poplist: @@ -192,9 +218,7 @@ def config_2d(): return sim - def main(): - Simulator(config_1d()).run().reset() ph.global_vars.sim = None Simulator(config_2d()).run().reset() @@ -204,22 +228,13 @@ def assert_close_enough(h, H): for patch_h, patch_H in zip(lvl_h.patches, lvl_H.patches): pd_h = patch_h.patch_datas["value"] pd_H = patch_H.patch_datas["value"] - ghosts_num = pd_h.ghosts_nbr[0] - if pd_H.ndim == 1: - dset_h = pd_h.dataset[ghosts_num:-ghosts_num] - dset_H = pd_H.dataset[ghosts_num:-ghosts_num] - if pd_H.ndim == 2: - dset_h = pd_h.dataset[ghosts_num:-ghosts_num, ghosts_num:-ghosts_num] - dset_H = pd_H.dataset[ghosts_num:-ghosts_num, ghosts_num:-ghosts_num] + dset_h = pd_h[patch_h.box] + dset_H = pd_H[patch_H.box] std = np.std(dset_h - dset_H) print("dim = {}, sigma(user v - actual v) = {}".format(pd_H.ndim, std)) - assert( std < 0.06 ) # empirical value obtained from print just above - - # for h_, H_ in zip(dset_h, dset_H): - # np.testing.assert_almost_equal(h_, H_, decimal=1) - + assert std < 0.062 # empirical value obtained from print just above fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, figsize=(6, 8)) @@ -231,13 +246,23 @@ def assert_close_enough(h, H): h1 = r.GetMassDensity(time) h2 = r.GetNi(time) - H1 = hierarchy_from(hier=h1, func=ions_mass_density_func1d, masses=masses, densities=(densityMain_1d, densityBeam_1d)) - H2 = hierarchy_from(hier=h2, func=ions_charge_density_func1d, charges=charges, densities=(densityMain_1d, densityBeam_1d)) + H1 = hierarchy_from( + hier=h1, + func=ions_mass_density_func1d, + masses=masses, + densities=(densityMain_1d, densityBeam_1d), + ) + H2 = hierarchy_from( + hier=h2, + func=ions_charge_density_func1d, + charges=charges, + densities=(densityMain_1d, densityBeam_1d), + ) assert_close_enough(h1, H1) assert_close_enough(h2, H2) - cycle = plt.rcParams['axes.prop_cycle'].by_key()['color'] + cycle = plt.rcParams["axes.prop_cycle"].by_key()["color"] h1.plot(ax=ax1, ls="-", lw=2.0, color=cycle[0]) H1.plot(ax=ax1, ls="-", lw=2.0, color=cycle[1]) @@ -248,7 +273,6 @@ def assert_close_enough(h, H): ax1.set_title("mass density : 1d") ax2.set_title("charge density : 1d") - # 2d stuffs run_path = os.path.join(os.curdir, "nCheck_2d") time = 0.0 @@ -257,13 +281,23 @@ def assert_close_enough(h, H): h1 = r.GetMassDensity(time) h2 = r.GetNi(time) - H1 = hierarchy_from(hier=h1, func=ions_mass_density_func2d, masses=masses, densities=(densityMain_2d, densityBeam_2d)) - H2 = hierarchy_from(hier=h2, func=ions_charge_density_func2d, charges=charges, densities=(densityMain_2d, densityBeam_2d)) + H1 = hierarchy_from( + hier=h1, + func=ions_mass_density_func2d, + masses=masses, + densities=(densityMain_2d, densityBeam_2d), + ) + H2 = hierarchy_from( + hier=h2, + func=ions_charge_density_func2d, + charges=charges, + densities=(densityMain_2d, densityBeam_2d), + ) assert_close_enough(h1, H1) assert_close_enough(h2, H2) - cmap = mpl.colormaps['viridis'] + cmap = mpl.colormaps["viridis"] h1.plot(ax=ax3, vmin=3.75, vmax=5, cmap=cmap, title="computed mass density : 2d") H1.plot(ax=ax4, vmin=3.75, vmax=5, cmap=cmap, title="expected mass density : 2d") @@ -274,8 +308,5 @@ def assert_close_enough(h, H): plt.savefig("nCheck.pdf", dpi=300) - - # /home/smets/codes/far/PHARE/tests/simulator/initialize - if __name__ == "__main__": main() diff --git a/tests/simulator/initialize/test_particles_init_1d.py b/tests/simulator/initialize/test_particles_init_1d.py index 60c44d692..8487a2e0f 100644 --- a/tests/simulator/initialize/test_particles_init_1d.py +++ b/tests/simulator/initialize/test_particles_init_1d.py @@ -68,21 +68,6 @@ def test_domainparticles_have_correct_split_from_coarser_particle( ndim, interp_order, refinement_boxes ) - @data({"cells": 40, "smallest_patch_size": 20, "largest_patch_size": 20}) - def test_no_patch_ghost_on_refined_level_case(self, simInput): - print(f"{self._testMethodName}_{ndim}d") - self._test_patch_ghost_on_refined_level_case(ndim, False, **simInput) - - @data({"cells": 40, "interp_order": 1}) - def test_has_patch_ghost_on_refined_level_case(self, simInput): - print(f"{self._testMethodName}_{ndim}d") - from pyphare.pharein.simulation import check_patch_size - - _, smallest_patch_size = check_patch_size(ndim, **simInput) - simInput["smallest_patch_size"] = smallest_patch_size - simInput["largest_patch_size"] = smallest_patch_size - self._test_patch_ghost_on_refined_level_case(ndim, True, **simInput) - @data("berger", "tile") def test_amr_clustering(self, clustering): dim = 1 diff --git a/tests/simulator/initialize/test_particles_init_2d.py b/tests/simulator/initialize/test_particles_init_2d.py index cc56392f8..0c1045fb5 100644 --- a/tests/simulator/initialize/test_particles_init_2d.py +++ b/tests/simulator/initialize/test_particles_init_2d.py @@ -23,7 +23,7 @@ def per_interp(dic): @ddt -class Initialization1DTest(InitializationTest): +class Initialization2DTest(InitializationTest): @data(*interp_orders) def test_nbr_particles_per_cell_is_as_provided(self, interp_order): print(f"{self._testMethodName}_{ndim}d") @@ -72,36 +72,6 @@ def test_domainparticles_have_correct_split_from_coarser_particle( f"\n{self._testMethodName}_{ndim}d took {self.datetime_diff(now)} seconds" ) - @data( - { - "cells": 40, - "smallest_patch_size": 20, - "largest_patch_size": 20, - "nbr_part_per_cell": ppc, - } - ) - def test_no_patch_ghost_on_refined_level_case(self, simInput): - print(f"\n{self._testMethodName}_{ndim}d") - now = self.datetime_now() - self._test_patch_ghost_on_refined_level_case(ndim, False, **simInput) - print( - f"\n{self._testMethodName}_{ndim}d took {self.datetime_diff(now)} seconds" - ) - - @data({"cells": 40, "interp_order": 1, "nbr_part_per_cell": ppc}) - def test_has_patch_ghost_on_refined_level_case(self, simInput): - print(f"\n{self._testMethodName}_{ndim}d") - from pyphare.pharein.simulation import check_patch_size - - _, smallest_patch_size = check_patch_size(ndim, **simInput) - simInput["smallest_patch_size"] = smallest_patch_size - simInput["largest_patch_size"] = smallest_patch_size - now = self.datetime_now() - self._test_patch_ghost_on_refined_level_case(ndim, True, **simInput) - print( - f"\n{self._testMethodName}_{ndim}d took {self.datetime_diff(now)} seconds" - ) - if __name__ == "__main__": unittest.main() diff --git a/tests/simulator/test_advance.py b/tests/simulator/test_advance.py index e3eab36db..4e1989d19 100644 --- a/tests/simulator/test_advance.py +++ b/tests/simulator/test_advance.py @@ -170,11 +170,9 @@ def vthz(*xyz): population_name=pop, ) - for quantity in ["domain", "levelGhost", "patchGhost"]: + for quantity in ["domain", "levelGhost"]: ParticleDiagnostics( - quantity=quantity, - write_timestamps=timestamps, - population_name=pop, + quantity=quantity, write_timestamps=timestamps, population_name=pop ) Simulator(global_vars.sim).run() @@ -205,12 +203,6 @@ def vthz(*xyz): hier=particle_hier, ) - if is_particle_type: - particle_hier = hierarchy_from( - h5_filename=diag_outputs + "/ions_pop_protons_patchGhost.h5", - hier=particle_hier, - ) - if not block_merging_particles and qty == "particles": merge_particles(particle_hier) diff --git a/tests/simulator/test_initialization.py b/tests/simulator/test_initialization.py index 4164f1f9d..0b5de0ce4 100644 --- a/tests/simulator/test_initialization.py +++ b/tests/simulator/test_initialization.py @@ -187,7 +187,7 @@ def vthz(*xyz): population_name=pop, ) - for quantity in ["domain", "levelGhost", "patchGhost"]: + for quantity in ["domain", "levelGhost"]: ParticleDiagnostics( quantity=quantity, write_timestamps=np.zeros(time_step_nbr), @@ -222,12 +222,6 @@ def vthz(*xyz): hier=particle_hier, ) - if is_particle_type: - particle_hier = hierarchy_from( - h5_filename=diag_outputs + "/ions_pop_protons_patchGhost.h5", - hier=particle_hier, - ) - if qty == "particles": merge_particles(particle_hier) @@ -696,33 +690,6 @@ def _test_domainparticles_have_correct_split_from_coarser_particle( part2 = coarse_split_particles[pop_name].select(patch.box) self.assertEqual(part1, part2) - def _test_patch_ghost_on_refined_level_case(self, dim, has_patch_ghost, **kwargs): - import pyphare.pharein as ph - - refinement_boxes = {"L0": [nDBox(dim, 10, 19)]} - kwargs["interp_order"] = kwargs.get("interp_order", 1) - kwargs["diag_outputs"] = f"{has_patch_ghost}" - datahier = self.getHierarchy( - ndim=dim, - refinement_boxes=refinement_boxes, - qty="particles_patch_ghost", - **kwargs, - ) - - self.assertTrue( - any( - [ - diagInfo.quantity.endswith("patchGhost") - for diagname, diagInfo in ph.global_vars.sim.diagnostics.items() - ] - ) - ) - nbrPatchGhostPatchDatasOnL1 = sum( - [len(p.patch_datas) for p in datahier.level(1).patches] - ) - - self.assertTrue((nbrPatchGhostPatchDatasOnL1 > 0) == has_patch_ghost) - def _test_levelghostparticles_have_correct_split_from_coarser_particle( self, datahier ): diff --git a/tools/bench/core/numerics/ion_updater/bench_ion_updater.cpp b/tools/bench/core/numerics/ion_updater/bench_ion_updater.cpp index 03c979f13..c3be54f13 100644 --- a/tools/bench/core/numerics/ion_updater/bench_ion_updater.cpp +++ b/tools/bench/core/numerics/ion_updater/bench_ion_updater.cpp @@ -17,10 +17,13 @@ void updater_routine(benchmark::State& state) using ParticleArray = typename PHARE_Types::ParticleArray_t; using Particle_t = typename ParticleArray::value_type; using Ions = PHARE::core::UsableIons_t; + using IonUpdater = core::IonUpdater; + using Boxing_t = PHARE::core::UpdaterSelectionBoxing; GridLayout_t layout{cells}; Electromag_t em{layout}; Ions ions{layout, "protons"}; + Boxing_t const boxing{layout, grow(layout.AMRBox(), GridLayout_t::nbrParticleGhosts())}; auto& patch_particles = ions.populations[0].particles; patch_particles.domain_particles.vector() @@ -31,14 +34,14 @@ void updater_routine(benchmark::State& state) initializer::PHAREDict dict; dict["pusher"]["name"] = std::string{"modified_boris"}; - core::IonUpdater ionUpdater_{dict}; + IonUpdater ionUpdater_{dict}; double current_time = 1.0; double new_time = 1.005; auto dt = new_time - current_time; while (state.KeepRunningBatch(1)) // while (state.KeepRunning()) { - ionUpdater_.updatePopulations(ions, em, layout, dt, core::UpdaterMode::domain_only); + ionUpdater_.updatePopulations(ions, em, boxing, dt, core::UpdaterMode::domain_only); ionUpdater_.updateIons(ions); patch_particles.domain_particles = particles_copy; @@ -46,7 +49,7 @@ void updater_routine(benchmark::State& state) = std::get<3>(ions.getRunTimeResourcesViewList()[0].getCompileTimeResourcesViewList()); pack.setBuffer(&patch_particles.pack()); - ionUpdater_.updatePopulations(ions, em, layout, dt, core::UpdaterMode::all); + ionUpdater_.updatePopulations(ions, em, boxing, dt, core::UpdaterMode::all); ionUpdater_.updateIons(ions); } }