diff --git a/.github/workflows/cmake_macos.yml b/.github/workflows/cmake_macos.yml
index 5a6a26752..5f4fb51b9 100644
--- a/.github/workflows/cmake_macos.yml
+++ b/.github/workflows/cmake_macos.yml
@@ -69,10 +69,10 @@ jobs:
python -m pip install -r requirements.txt
- name: Create Build Environment
- run: cmake -E make_directory ${{runner.workspace}}/build
+ run: cmake -E make_directory ${{github.workspace}}/build
- name: Configure CMake
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: |
cmake $GITHUB_WORKSPACE -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
@@ -81,10 +81,10 @@ jobs:
-DCMAKE_CXX_FLAGS="-DPHARE_DIAG_DOUBLES=1 "
- name: Build
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: cmake --build . -j 2
- name: Test
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: ctest -j 2 --output-on-failure
diff --git a/.github/workflows/cmake_ubuntu.yml b/.github/workflows/cmake_ubuntu.yml
index 00397c95e..860e761a8 100644
--- a/.github/workflows/cmake_ubuntu.yml
+++ b/.github/workflows/cmake_ubuntu.yml
@@ -66,19 +66,19 @@ jobs:
python -m pip install -r requirements.txt
- name: Create Build Environment
- run: cmake -E make_directory ${{runner.workspace}}/build
+ run: cmake -E make_directory ${{github.workspace}}/build
- name: Configure CMake
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: |
set -ex
export CC=gcc CXX=g++
[ "${{ matrix.cc }}" = "clang" ] && export CC=clang CXX=clang++
cmake $GITHUB_WORKSPACE
- cd ${{runner.workspace}}/PHARE/subprojects/samrai && mkdir build && cd build
+ cd ${{github.workspace}}/subprojects/samrai && mkdir build && cd build
cmake .. -DENABLE_SAMRAI_TESTS=OFF -DCMAKE_BUILD_TYPE=RelWithDebInfo
make -j2 && sudo make install && cd ../.. && rm -rf samrai
- cd ${{runner.workspace}}/build && rm -rf *
+ cd ${{github.workspace}}/build && rm -rf *
cmake $GITHUB_WORKSPACE -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON --fresh \
-DCMAKE_BUILD_TYPE=Debug -Dasan=OFF \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
@@ -87,9 +87,9 @@ jobs:
-DCMAKE_CXX_FLAGS="-O3 -DPHARE_DIAG_DOUBLES=1 " -Dphare_configurator=ON
- name: Build
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: cmake --build . -j 1
- name: Test
- working-directory: ${{runner.workspace}}/build
+ working-directory: ${{github.workspace}}/build
run: ctest -j 2 --output-on-failure
diff --git a/doc/conventions.md b/doc/conventions.md
new file mode 100644
index 000000000..fc36e63e0
--- /dev/null
+++ b/doc/conventions.md
@@ -0,0 +1,79 @@
+# PHARE Conventions
+
+### Reference document for the code base
+
+
+# Sections
+
+1. C++
+2. Python
+3. CMake
+4. Tests
+5. Etc
+
+
+
+# 1. C++
+
+## 1.1 General
+
+...
+
+
+
+
+# 2. Python
+
+## 2.1 General
+
+...
+
+## 2.2 dependencies and imports
+
+Third party depenencies are stated in the file `requirements.txt` in the project root.
+Fewer dependencies is generally better but there should be a cost/benefit assessment for adding new dependencies.
+
+### 2.2.1 Python file import structure.
+
+Generally, we want to avoid importing any dependency at the top of a python script that may rely on binary libraries.
+
+Exceptions to this are things like numpy, which are widely used and tested.
+
+Things to expressly avoid importing at the top of a python script are
+
+- h5py
+- mpi4py
+- scipy.optimize
+
+The first two are noted as they can, and will pull in system libraries such as libmpi.so and libhdf5.so, which may not be the libraries which were used during PHARE build time, this can cause issues at runtime.
+
+scipy.optimize relies on system libraries which may not be available at runtime.
+
+The gist here is to only import these libraries at function scope when you actually need them, so that python files can be imported
+or scanned for tests and not cause issues during these operations, until the functions are used at least.
+
+
+
+# 3. CMake
+
+## 3.1 General
+
+...
+
+
+
+
+# 4. Tests
+
+## 4.1 General
+
+...
+
+
+
+# 5. Etc
+
+## 5.1 General
+
+...
+
diff --git a/pyphare/pyphare/cpp/__init__.py b/pyphare/pyphare/cpp/__init__.py
index c66b11d38..e654bef75 100644
--- a/pyphare/pyphare/cpp/__init__.py
+++ b/pyphare/pyphare/cpp/__init__.py
@@ -3,27 +3,10 @@
#
-import json
-
-# continue to use override if set
-_cpp_lib_override = None
-
-
def cpp_lib(override=None):
import importlib
- global _cpp_lib_override
- if override is not None:
- _cpp_lib_override = override
- if _cpp_lib_override is not None:
- return importlib.import_module(_cpp_lib_override)
-
- if not __debug__:
- return importlib.import_module("pybindlibs.cpp")
- try:
- return importlib.import_module("pybindlibs.cpp_dbg")
- except ImportError:
- return importlib.import_module("pybindlibs.cpp")
+ return importlib.import_module("pybindlibs.cpp")
def cpp_etc_lib():
@@ -37,6 +20,8 @@ def build_config():
def build_config_as_json():
+ import json
+
return json.dumps(build_config())
diff --git a/pyphare/pyphare/pharein/diagnostics.py b/pyphare/pyphare/pharein/diagnostics.py
index bd0bce293..61eee5645 100644
--- a/pyphare/pyphare/pharein/diagnostics.py
+++ b/pyphare/pyphare/pharein/diagnostics.py
@@ -307,7 +307,7 @@ def __init__(self, **kwargs):
class ParticleDiagnostics(Diagnostics):
- particle_quantities = ["space_box", "domain", "levelGhost", "patchGhost"]
+ particle_quantities = ["space_box", "domain", "levelGhost"]
type = "particle"
def __init__(self, **kwargs):
diff --git a/pyphare/pyphare/pharesee/hierarchy/fromh5.py b/pyphare/pyphare/pharesee/hierarchy/fromh5.py
index f79c0cbc3..369ac2379 100644
--- a/pyphare/pyphare/pharesee/hierarchy/fromh5.py
+++ b/pyphare/pyphare/pharesee/hierarchy/fromh5.py
@@ -15,13 +15,13 @@
)
from ...core.gridlayout import GridLayout
from .hierarchy_utils import field_qties
-import h5py
+
from pathlib import Path
from pyphare.core.phare_utilities import listify
h5_time_grp_key = "t"
-particle_files_patterns = ("domain", "patchGhost", "levelGhost")
+particle_files_patterns = ("domain", "levelGhost")
def get_all_available_quantities_from_h5(filepath, time=0, exclude=["tags"], hier=None):
@@ -129,6 +129,8 @@ def h5_filename_from(diagInfo):
def get_times_from_h5(filepath, as_float=True):
+ import h5py # see doc/conventions.md section 2.1.1
+
f = h5py.File(filepath, "r")
if as_float:
times = np.array(sorted([float(s) for s in list(f[h5_time_grp_key].keys())]))
@@ -216,6 +218,7 @@ def add_time_from_h5(hier, filepath, time, **kwargs):
# add times to 'hier'
# we may have a different selection box for that time as for already existing times
# but we need to keep them, per time
+ import h5py # see doc/conventions.md section 2.1.1
h5f = h5py.File(filepath, "r")
selection_box = kwargs.get("selection_box", None)
@@ -239,6 +242,8 @@ def add_data_from_h5(hier, filepath, time):
if not hier.has_time(time):
raise ValueError("time does not exist in hierarchy")
+ import h5py # see doc/conventions.md section 2.1.1
+
h5f = h5py.File(filepath, "r")
# force using the hierarchy selection box at that time if existing
@@ -260,6 +265,8 @@ def new_from_h5(filepath, times, **kwargs):
# loads all datasets from the filepath h5 file as patchdatas
# we authorize user to pass only one selection box for all times
# but in this case they're all the same
+ import h5py # see doc/conventions.md section 2.1.1
+
selection_box = kwargs.get("selection_box", [None] * len(times))
if none_iterable(selection_box) and all_iterables(times):
selection_box = [selection_box] * len(times)
diff --git a/pyphare/pyphare/pharesee/hierarchy/fromsim.py b/pyphare/pyphare/pharesee/hierarchy/fromsim.py
index ecdc24600..aa13ea2cc 100644
--- a/pyphare/pyphare/pharesee/hierarchy/fromsim.py
+++ b/pyphare/pyphare/pharesee/hierarchy/fromsim.py
@@ -90,7 +90,7 @@ def hierarchy_from_sim(simulator, qty, pop=""):
# domain... while looping on the patchGhost items, we need to search in
# the already created patches which one to which add the patchGhost particles
- for ghostParticles in ["patchGhost", "levelGhost"]:
+ for ghostParticles in ["levelGhost"]:
if ghostParticles in populationdict:
for dwpatch in populationdict[ghostParticles]:
v = np.asarray(dwpatch.data.v)
diff --git a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py
index b7fbcc220..d3856873d 100644
--- a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py
+++ b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py
@@ -93,24 +93,14 @@ def merge_particles(hierarchy):
(pdname, pd) for pdname, pd in pdatas.items() if "domain" in pdname
][0]
- pghost_pdatas = [
- (pdname, pd)
- for pdname, pd in pdatas.items()
- if "patchGhost" in pdname
- ]
lghost_pdatas = [
(pdname, pd)
for pdname, pd in pdatas.items()
if "levelGhost" in pdname
]
- pghost_pdata = pghost_pdatas[0] if pghost_pdatas else None
lghost_pdata = lghost_pdatas[0] if lghost_pdatas else None
- if pghost_pdata is not None:
- domain_pdata[1].dataset.add(pghost_pdata[1].dataset)
- del pdatas[pghost_pdata[0]]
-
if lghost_pdata is not None:
domain_pdata[1].dataset.add(lghost_pdata[1].dataset)
del pdatas[lghost_pdata[0]]
diff --git a/pyphare/pyphare/pharesee/run/run.py b/pyphare/pyphare/pharesee/run/run.py
index 30f6a25f8..f1513293b 100644
--- a/pyphare/pyphare/pharesee/run/run.py
+++ b/pyphare/pyphare/pharesee/run/run.py
@@ -202,7 +202,7 @@ def GetParticleCount(self, time, **kwargs):
return c
def GetMass(self, pop_name, **kwargs):
- list_of_qty = ["density", "flux", "domain", "levelGhost", "patchGhost"]
+ list_of_qty = ["density", "flux", "domain", "levelGhost"]
list_of_mass = []
import h5py
diff --git a/pyphare/pyphare/pharesee/run/utils.py b/pyphare/pyphare/pharesee/run/utils.py
index e9cb5b57d..d6ffac24c 100644
--- a/pyphare/pyphare/pharesee/run/utils.py
+++ b/pyphare/pyphare/pharesee/run/utils.py
@@ -378,7 +378,7 @@ def _compute_pressure(patch_datas, **kwargs):
Myy = patch_datas["Myy"].dataset[:]
Myz = patch_datas["Myz"].dataset[:]
Mzz = patch_datas["Mzz"].dataset[:]
- massDensity = patch_datas["rho"].dataset[:]
+ massDensity = patch_datas["value"].dataset[:]
Vix = patch_datas["Vx"].dataset[:]
Viy = patch_datas["Vy"].dataset[:]
Viz = patch_datas["Vz"].dataset[:]
@@ -417,10 +417,10 @@ def _compute_pop_pressure(patch_datas, **kwargs):
Myy = patch_datas[popname + "_Myy"].dataset[:]
Myz = patch_datas[popname + "_Myz"].dataset[:]
Mzz = patch_datas[popname + "_Mzz"].dataset[:]
- Fx = patch_datas[popname + "_Fx"].dataset[:]
- Fy = patch_datas[popname + "_Fy"].dataset[:]
- Fz = patch_datas[popname + "_Fz"].dataset[:]
- N = patch_datas[popname + "_rho"].dataset[:]
+ Fx = patch_datas["x"].dataset[:]
+ Fy = patch_datas["y"].dataset[:]
+ Fz = patch_datas["z"].dataset[:]
+ N = patch_datas["value"].dataset[:]
mass = kwargs["mass"]
diff --git a/pyphare/pyphare/simulator/simulator.py b/pyphare/pyphare/simulator/simulator.py
index 435d7b01d..bb812bd7f 100644
--- a/pyphare/pyphare/simulator/simulator.py
+++ b/pyphare/pyphare/simulator/simulator.py
@@ -3,6 +3,7 @@
#
import os
+import sys
import datetime
import atexit
import time as timem
@@ -91,7 +92,7 @@ def __init__(self, simulation, auto_dump=True, **kwargs):
self.cpp_sim = None # BE
self.cpp_dw = None # DRAGONS, i.e. use weakrefs if you have to ref these.
self.post_advance = kwargs.get("post_advance", None)
-
+ self.initialized = False
self.print_eol = "\n"
if kwargs.get("print_one_line", True):
self.print_eol = "\r"
@@ -132,7 +133,6 @@ def setup(self):
)
return self
except Exception:
- import sys
import traceback
print('Exception caught in "Simulator.setup()": {}'.format(sys.exc_info()))
@@ -140,11 +140,9 @@ def setup(self):
raise ValueError("Error in Simulator.setup(), see previous error")
def initialize(self):
- if self.cpp_sim is not None:
- raise ValueError(
- "Simulator already initialized: requires reset to re-initialize"
- )
try:
+ if self.initialized:
+ return
if self.cpp_hier is None:
self.setup()
@@ -152,12 +150,11 @@ def initialize(self):
return self
self.cpp_sim.initialize()
+ self.initialized = True
self._auto_dump() # first dump might be before first advance
return self
except Exception:
- import sys
-
print(
'Exception caught in "Simulator.initialize()": {}'.format(
sys.exc_info()[0]
@@ -166,8 +163,6 @@ def initialize(self):
raise ValueError("Error in Simulator.initialize(), see previous error")
def _throw(self, e):
- import sys
-
print_rank0(e)
sys.exit(1)
@@ -264,6 +259,7 @@ def reset(self):
self.cpp_dw = None
self.cpp_sim = None
self.cpp_hier = None
+ self.initialized = False
if "samrai" in life_cycles:
type(life_cycles["samrai"]).reset()
return self
@@ -289,7 +285,7 @@ def interp_order(self):
return self.cpp_sim.interp_order # constexpr static value
def _check_init(self):
- if self.cpp_sim is None:
+ if not self.initialized:
self.initialize()
def _log_to_file(self):
diff --git a/src/amr/CMakeLists.txt b/src/amr/CMakeLists.txt
index 83eed7bbd..f10a223aa 100644
--- a/src/amr/CMakeLists.txt
+++ b/src/amr/CMakeLists.txt
@@ -11,7 +11,7 @@ set( SOURCES_INC
data/field/coarsening/field_coarsen_index_weight.hpp
data/field/coarsening/coarsen_weighter.hpp
data/field/coarsening/default_field_coarsener.hpp
- data/field/coarsening/magnetic_field_coarsener.hpp
+ data/field/coarsening/electric_field_coarsener.hpp
data/field/field_data.hpp
data/field/field_data_factory.hpp
data/field/field_geometry.hpp
@@ -20,6 +20,7 @@ set( SOURCES_INC
data/field/refine/field_linear_refine.hpp
data/field/refine/field_refiner.hpp
data/field/refine/magnetic_field_refiner.hpp
+ data/field/refine/magnetic_field_regrider.hpp
data/field/refine/electric_field_refiner.hpp
data/field/refine/linear_weighter.hpp
data/field/refine/field_refine_operator.hpp
diff --git a/src/amr/data/field/coarsening/default_field_coarsener.hpp b/src/amr/data/field/coarsening/default_field_coarsener.hpp
index ff1356d7f..e3ed866d1 100644
--- a/src/amr/data/field/coarsening/default_field_coarsener.hpp
+++ b/src/amr/data/field/coarsening/default_field_coarsener.hpp
@@ -2,20 +2,21 @@
#define PHARE_DEFAULT_FIELD_COARSENER_HPP
-#include "core/def/phare_mpi.hpp"
+#include "core/def/phare_mpi.hpp" // IWYU pragma: keep
#include "core/def.hpp"
-#include "core/data/grid/gridlayoutdefs.hpp"
#include "core/utilities/constants.hpp"
#include "core/utilities/point/point.hpp"
+#include "core/data/grid/gridlayoutdefs.hpp"
#include "amr/data/field/coarsening/field_coarsen_index_weight.hpp"
#include "amr/resources_manager/amr_utils.hpp"
#include
-#include
#include
+#include
+
@@ -157,4 +158,6 @@ namespace amr
} // namespace PHARE
+
+
#endif
diff --git a/src/amr/data/field/coarsening/magnetic_field_coarsener.hpp b/src/amr/data/field/coarsening/electric_field_coarsener.hpp
similarity index 56%
rename from src/amr/data/field/coarsening/magnetic_field_coarsener.hpp
rename to src/amr/data/field/coarsening/electric_field_coarsener.hpp
index 39d816413..ce48961e5 100644
--- a/src/amr/data/field/coarsening/magnetic_field_coarsener.hpp
+++ b/src/amr/data/field/coarsening/electric_field_coarsener.hpp
@@ -1,15 +1,13 @@
-#ifndef PHARE_MAGNETIC_FIELD_COARSENER
-#define PHARE_MAGNETIC_FIELD_COARSENER
-
-
-#include "core/def/phare_mpi.hpp"
+#ifndef PHARE_FLUX_SUM_COARSENER
+#define PHARE_FLUX_SUM_COARSENER
#include "core/data/grid/gridlayoutdefs.hpp"
-#include "core/hybrid/hybrid_quantities.hpp"
#include "core/utilities/constants.hpp"
+#include "amr/resources_manager/amr_utils.hpp"
#include
+#include
#include
namespace PHARE::amr
@@ -32,13 +30,13 @@ using core::dirZ;
*
*/
template
-class MagneticFieldCoarsener
+class ElectricFieldCoarsener
{
public:
- MagneticFieldCoarsener(std::array const centering,
+ ElectricFieldCoarsener(std::array const centering,
SAMRAI::hier::Box const& sourceBox,
SAMRAI::hier::Box const& destinationBox,
- SAMRAI::hier::IntVector const& ratio)
+ SAMRAI::hier::IntVector const& /*ratio*/)
: centering_{centering}
, sourceBox_{sourceBox}
, destinationBox_{destinationBox}
@@ -55,78 +53,92 @@ class MagneticFieldCoarsener
core::Point fineStartIndex;
- fineStartIndex[dirX] = coarseIndex[dirX] * this->ratio_;
-
- if constexpr (dimension > 1)
+ for (auto i = std::size_t{0}; i < dimension; ++i)
{
- fineStartIndex[dirY] = coarseIndex[dirY] * this->ratio_;
- if constexpr (dimension > 2)
- {
- fineStartIndex[dirZ] = coarseIndex[dirZ] * this->ratio_;
- }
+ fineStartIndex[i] = coarseIndex[i] * this->ratio_;
}
fineStartIndex = AMRToLocal(fineStartIndex, sourceBox_);
coarseIndex = AMRToLocal(coarseIndex, destinationBox_);
- // the following kinda assumes where B is, i.e. Yee layout centering
- // as it only does faces pirmal-dual, dual-primal and dual-dual
-
if constexpr (dimension == 1)
{
- // in 1D div(B) is automatically satisfied so using this coarsening
- // opertor is probably not better than the default one, but we do that
- // for a kind of consistency...
- // coarse flux is equal to fine flux and we're 1D so there is flux partitioned
- // only for By and Bz, Bx is equal to the fine value
-
- if (centering_[dirX] == core::QtyCentering::primal) // bx
- {
- coarseField(coarseIndex[dirX]) = fineField(fineStartIndex[dirX]);
- }
- else if (centering_[dirX] == core::QtyCentering::dual) // by and bz
+ if (centering_[dirX] == core::QtyCentering::dual) // ex
{
coarseField(coarseIndex[dirX])
= 0.5 * (fineField(fineStartIndex[dirX] + 1) + fineField(fineStartIndex[dirX]));
}
+ else if (centering_[dirX] == core::QtyCentering::primal) // ey, ez
+ {
+ coarseField(coarseIndex[dirX]) = fineField(fineStartIndex[dirX]);
+ }
}
if constexpr (dimension == 2)
{
- if (centering_[dirX] == core::QtyCentering::primal
- and centering_[dirY] == core::QtyCentering::dual)
+ if (centering_[dirX] == core::QtyCentering::dual
+ and centering_[dirY] == core::QtyCentering::primal) // ex
{
coarseField(coarseIndex[dirX], coarseIndex[dirY])
= 0.5
* (fineField(fineStartIndex[dirX], fineStartIndex[dirY])
- + fineField(fineStartIndex[dirX], fineStartIndex[dirY] + 1));
+ + fineField(fineStartIndex[dirX] + 1, fineStartIndex[dirY]));
}
- else if (centering_[dirX] == core::QtyCentering::dual
- and centering_[dirY] == core::QtyCentering::primal)
+ else if (centering_[dirX] == core::QtyCentering::primal
+ and centering_[dirY] == core::QtyCentering::dual) // ey
{
coarseField(coarseIndex[dirX], coarseIndex[dirY])
= 0.5
* (fineField(fineStartIndex[dirX], fineStartIndex[dirY])
- + fineField(fineStartIndex[dirX] + 1, fineStartIndex[dirY]));
+ + fineField(fineStartIndex[dirX], fineStartIndex[dirY] + 1));
}
- else if (centering_[dirX] == core::QtyCentering::dual
- and centering_[dirY] == core::QtyCentering::dual)
+ else if (centering_[dirX] == core::QtyCentering::primal
+ and centering_[dirY] == core::QtyCentering::primal) // ez
{
coarseField(coarseIndex[dirX], coarseIndex[dirY])
- = 0.25
- * (fineField(fineStartIndex[dirX], fineStartIndex[dirY])
- + fineField(fineStartIndex[dirX] + 1, fineStartIndex[dirY])
- + fineField(fineStartIndex[dirX], fineStartIndex[dirY] + 1)
- + fineField(fineStartIndex[dirX] + 1, fineStartIndex[dirY] + 1));
+ = fineField(fineStartIndex[dirX], fineStartIndex[dirY]);
}
else
{
- throw std::runtime_error("no magnetic field should end up here");
+ throw std::runtime_error("no electric field should end up here");
}
}
else if constexpr (dimension == 3)
{
- throw std::runtime_error("Not Implemented yet");
+ if (centering_[dirX] == core::QtyCentering::dual
+ and centering_[dirY] == core::QtyCentering::primal
+ and centering_[dirZ] == core::QtyCentering::primal) // ex
+ {
+ coarseField(coarseIndex[dirX], coarseIndex[dirY], coarseIndex[dirZ])
+ = 0.5
+ * (fineField(fineStartIndex[dirX], fineStartIndex[dirY], fineStartIndex[dirZ])
+ + fineField(fineStartIndex[dirX] + 1, fineStartIndex[dirY],
+ fineStartIndex[dirZ]));
+ }
+ else if (centering_[dirX] == core::QtyCentering::primal
+ and centering_[dirY] == core::QtyCentering::dual
+ and centering_[dirZ] == core::QtyCentering::primal) // ey
+ {
+ coarseField(coarseIndex[dirX], coarseIndex[dirY], coarseIndex[dirZ])
+ = 0.5
+ * (fineField(fineStartIndex[dirX], fineStartIndex[dirY], fineStartIndex[dirZ])
+ + fineField(fineStartIndex[dirX], fineStartIndex[dirY] + 1,
+ fineStartIndex[dirZ]));
+ }
+ else if (centering_[dirX] == core::QtyCentering::primal
+ and centering_[dirY] == core::QtyCentering::primal
+ and centering_[dirZ] == core::QtyCentering::dual) // ez
+ {
+ coarseField(coarseIndex[dirX], coarseIndex[dirY], coarseIndex[dirZ])
+ = 0.5
+ * (fineField(fineStartIndex[dirX], fineStartIndex[dirY], fineStartIndex[dirZ])
+ + fineField(fineStartIndex[dirX], fineStartIndex[dirY],
+ fineStartIndex[dirZ] + 1));
+ }
+ else
+ {
+ throw std::runtime_error("no electric field should end up here");
+ }
}
}
@@ -136,5 +148,7 @@ class MagneticFieldCoarsener
SAMRAI::hier::Box const destinationBox_;
static int constexpr ratio_ = 2;
};
+
} // namespace PHARE::amr
+
#endif
diff --git a/src/amr/data/field/coarsening/field_coarsen_operator.hpp b/src/amr/data/field/coarsening/field_coarsen_operator.hpp
index 02ff02029..dfb1bc107 100644
--- a/src/amr/data/field/coarsening/field_coarsen_operator.hpp
+++ b/src/amr/data/field/coarsening/field_coarsen_operator.hpp
@@ -1,20 +1,89 @@
#ifndef PHARE_FIELD_DATA_COARSEN_HPP
#define PHARE_FIELD_DATA_COARSEN_HPP
-
-#include "core/def/phare_mpi.hpp"
+#include "core/def/phare_mpi.hpp" // IWYU pragma: keep
+#include "core/utilities/constants.hpp"
+#include "core/utilities/point/point.hpp"
+#include "amr/data/tensorfield/tensor_field_data.hpp"
#include "amr/data/field/field_data.hpp"
#include "amr/data/field/field_geometry.hpp"
+
#include "default_field_coarsener.hpp"
-#include "core/utilities/constants.hpp"
-#include "core/utilities/point/point.hpp"
#include
#include
#include
+namespace PHARE::amr
+{
+
+
+template
+void coarsen_field(Dst& destinationField, auto& sourceField, auto& intersectionBox, auto& coarsener)
+{
+ auto constexpr static dimension = Dst::dimension;
+
+ // now we can loop over the intersection box
+
+ core::Point startIndex;
+ core::Point endIndex;
+
+ startIndex[dirX] = intersectionBox.lower(dirX);
+ endIndex[dirX] = intersectionBox.upper(dirX);
+
+ if constexpr (dimension > 1)
+ {
+ startIndex[dirY] = intersectionBox.lower(dirY);
+ endIndex[dirY] = intersectionBox.upper(dirY);
+ }
+ if constexpr (dimension > 2)
+ {
+ startIndex[dirZ] = intersectionBox.lower(dirZ);
+ endIndex[dirZ] = intersectionBox.upper(dirZ);
+ }
+
+ if constexpr (dimension == 1)
+ {
+ for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
+ {
+ coarsener(sourceField, destinationField, {{ix}});
+ }
+ }
+
+
+ else if constexpr (dimension == 2)
+ {
+ for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
+ {
+ for (int iy = startIndex[dirY]; iy <= endIndex[dirY]; ++iy)
+ {
+ coarsener(sourceField, destinationField, {{ix, iy}});
+ }
+ }
+ }
+
+
+ else if constexpr (dimension == 3)
+ {
+ for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
+ {
+ for (int iy = startIndex[dirY]; iy <= endIndex[dirY]; ++iy)
+ {
+ for (int iz = startIndex[dirZ]; iz <= endIndex[dirZ]; ++iz)
+
+ {
+ coarsener(sourceField, destinationField, {{ix, iy, iz}});
+ }
+ }
+ }
+ } // end 3D
+}
+
+} // namespace PHARE::amr
+
+
namespace PHARE
{
namespace amr
@@ -30,9 +99,6 @@ namespace amr
*/
class FieldCoarsenOperator : public SAMRAI::hier::CoarsenOperator
{
- static constexpr std::size_t n_ghosts
- = GridLayoutT::template nbrGhosts();
-
public:
static constexpr std::size_t dimension = GridLayoutT::dimension;
using FieldDataT = FieldData;
@@ -79,15 +145,15 @@ namespace amr
- /** @brief given a coarseBox, coarse data from the fine patch on the intersection of this
- * box and the box of the destination (the box of the coarse patch).
+ /** @brief given a coarseBox, coarse data from the fine patch on the intersection of
+ * this box and the box of the destination (the box of the coarse patch).
*
* This method will extract fieldData from the two patches, and then
* get the Field and GridLayout encapsulated into the fieldData.
* With the help of FieldGeometry, transform the coarseBox to the correct index.
* After that we can now create FieldCoarsen with the indexAndWeight implementation
- * selected. Finnaly loop over the indexes in the box, and apply the coarsening defined in
- * FieldCoarsen operator
+ * selected. Finaly loop over the indexes in the box, and apply the coarsening defined
+ * in FieldCoarsen operator
*
*/
void coarsen(SAMRAI::hier::Patch& destinationPatch, SAMRAI::hier::Patch const& sourcePatch,
@@ -106,87 +172,135 @@ namespace amr
// in coarseIt operator
auto const& qty = destinationField.physicalQuantity();
-
-
// We get different boxes : destination , source, restrictBoxes
// and transform them in the correct indexing.
auto destPData = destinationPatch.getPatchData(destinationId);
auto srcPData = sourcePatch.getPatchData(sourceId);
+ auto destGBox = FieldGeometryT::toFieldBox(destPData->getGhostBox(), qty, destLayout);
+ auto srcGBox = FieldGeometryT::toFieldBox(srcPData->getGhostBox(), qty, sourceLayout);
+ auto coarseLayout = FieldGeometryT::layoutFromBox(coarseBox, destLayout);
+ auto coarseFieldBox = FieldGeometryT::toFieldBox(coarseBox, qty, coarseLayout);
+ auto const intersectionBox = destGBox * coarseFieldBox;
+ // We can now create the coarsening operator
+ FieldCoarsenerPolicy coarsener{destLayout.centering(qty), srcGBox, destGBox, ratio};
- auto destGBox = FieldGeometryT::toFieldBox(destPData->getGhostBox(), qty, destLayout);
- auto srcGBox = FieldGeometryT::toFieldBox(srcPData->getGhostBox(), qty, sourceLayout);
+ coarsen_field(destinationField, sourceField, intersectionBox, coarsener);
+ }
+ };
+} // namespace amr
+} // namespace PHARE
- auto coarseLayout = FieldGeometryT::layoutFromBox(coarseBox, destLayout);
- auto coarseFieldBox = FieldGeometryT::toFieldBox(coarseBox, qty, coarseLayout);
- auto const intersectionBox = destGBox * coarseFieldBox;
+namespace PHARE::amr
+{
- // We can now create the coarsening operator
- FieldCoarsenerPolicy coarsener{destLayout.centering(qty), srcGBox, destGBox, ratio};
+template
+class TensorFieldCoarsenOperator : public SAMRAI::hier::CoarsenOperator
+{
+public:
+ static constexpr std::size_t dimension = GridLayoutT::dimension;
+ using TensorFieldDataT = TensorFieldData;
+ using FieldDataT = FieldData;
- // now we can loop over the intersection box
+ static constexpr std::size_t N = TensorFieldDataT::N;
- core::Point startIndex;
- core::Point endIndex;
+ TensorFieldCoarsenOperator()
+ : SAMRAI::hier::CoarsenOperator("FieldDataCoarsenOperator")
+ {
+ }
- startIndex[dirX] = intersectionBox.lower(dirX);
- endIndex[dirX] = intersectionBox.upper(dirX);
+ TensorFieldCoarsenOperator(TensorFieldCoarsenOperator const&) = delete;
+ TensorFieldCoarsenOperator(TensorFieldCoarsenOperator&&) = delete;
+ TensorFieldCoarsenOperator& operator=(TensorFieldCoarsenOperator const&) = delete;
+ TensorFieldCoarsenOperator&& operator=(TensorFieldCoarsenOperator&&) = delete;
- if constexpr (dimension > 1)
- {
- startIndex[dirY] = intersectionBox.lower(dirY);
- endIndex[dirY] = intersectionBox.upper(dirY);
- }
- if constexpr (dimension > 2)
- {
- startIndex[dirZ] = intersectionBox.lower(dirZ);
- endIndex[dirZ] = intersectionBox.upper(dirZ);
- }
- if constexpr (dimension == 1)
- {
- for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
- {
- coarsener(sourceField, destinationField, {{ix}});
- }
- }
+ virtual ~TensorFieldCoarsenOperator() = default;
- else if constexpr (dimension == 2)
- {
- for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
- {
- for (int iy = startIndex[dirY]; iy <= endIndex[dirY]; ++iy)
- {
- coarsener(sourceField, destinationField, {{ix, iy}});
- }
- }
- }
+ /** @brief return the priority of the operator
+ * this return 0, meaning that this operator have the most priority
+ */
+ int getOperatorPriority() const override { return 0; }
- else if constexpr (dimension == 3)
- {
- for (int ix = startIndex[dirX]; ix <= endIndex[dirX]; ++ix)
- {
- for (int iy = startIndex[dirY]; iy <= endIndex[dirY]; ++iy)
- {
- for (int iz = startIndex[dirZ]; iz <= endIndex[dirZ]; ++iz)
-
- {
- coarsener(sourceField, destinationField, {{ix, iy, iz}});
- }
- }
- }
- } // end 3D
+ /** @brief Return the stencil width associated with the coarsening operator.
+ *
+ * The SAMRAI transfer routines guarantee that the source patch will contain
+ * sufficient ghostCell data surrounding the interior to satisfy the stencil
+ * width requirements for each coarsening operator.
+ *
+ * In our case, we allow a RF up to 10, so having 5 ghost width is sufficient
+ */
+ SAMRAI::hier::IntVector getStencilWidth(SAMRAI::tbox::Dimension const& dim) const override
+ {
+ return SAMRAI::hier::IntVector{dim, 2};
+ }
+
+
+
+
+ /** @brief given a coarseBox, coarse data from the fine patch on the intersection of
+ * this box and the box of the destination (the box of the coarse patch).
+ *
+ * This method will extract fieldData from the two patches, and then
+ * get the Field and GridLayout encapsulated into the fieldData.
+ * With the help of FieldGeometry, transform the coarseBox to the correct index.
+ * After that we can now create FieldCoarsen with the indexAndWeight implementation
+ * selected. Finnaly loop over the indexes in the box, and apply the coarsening defined
+ * in FieldCoarsen operator
+ *
+ */
+ void coarsen(SAMRAI::hier::Patch& destinationPatch, SAMRAI::hier::Patch const& sourcePatch,
+ int const destinationId, int const sourceId, SAMRAI::hier::Box const& coarseBox,
+ SAMRAI::hier::IntVector const& ratio) const override
+ {
+ auto& destinationFields = TensorFieldDataT::getFields(destinationPatch, destinationId);
+ auto const& sourceFields = TensorFieldDataT::getFields(sourcePatch, sourceId);
+ auto const& sourceLayout = TensorFieldDataT::getLayout(sourcePatch, sourceId);
+ auto const& destLayout = TensorFieldDataT::getLayout(destinationPatch, destinationId);
+
+
+ // we assume that quantity are the same
+ // note that an assertion will be raised in coarseIt operator
+
+ for (std::uint16_t c = 0; c < N; ++c)
+ {
+ auto const& qty = destinationFields[c].physicalQuantity();
+ using FieldGeometryT = FieldGeometry>;
+
+
+ // We get different boxes : destination , source, restrictBoxes
+ // and transform them in the correct indexing.
+ auto const& destPData = destinationPatch.getPatchData(destinationId);
+ auto const& srcPData = sourcePatch.getPatchData(sourceId);
+ auto const& destGBox
+ = FieldGeometryT::toFieldBox(destPData->getGhostBox(), qty, destLayout);
+ auto const& srcGBox
+ = FieldGeometryT::toFieldBox(srcPData->getGhostBox(), qty, sourceLayout);
+ auto const& coarseLayout = FieldGeometryT::layoutFromBox(coarseBox, destLayout);
+ auto const& coarseFieldBox = FieldGeometryT::toFieldBox(coarseBox, qty, coarseLayout);
+ auto const intersectionBox = destGBox * coarseFieldBox;
+ // We can now create the coarsening operator
+ FieldCoarsenerPolicy coarsener{destLayout.centering(qty), srcGBox, destGBox, ratio};
+
+ coarsen_field(destinationFields[c], sourceFields[c], intersectionBox, coarsener);
}
- };
-} // namespace amr
-} // namespace PHARE
+ }
+};
+
+template
+using VecFieldCoarsenOperator
+ = TensorFieldCoarsenOperator<1, GridLayoutT, FieldT, FieldCoarsenerPolicy, PhysicalQuantity>;
+
+} // namespace PHARE::amr
#endif
diff --git a/src/amr/data/field/field_data.hpp b/src/amr/data/field/field_data.hpp
index 56801c93c..9f5bd107b 100644
--- a/src/amr/data/field/field_data.hpp
+++ b/src/amr/data/field/field_data.hpp
@@ -1,23 +1,20 @@
#ifndef PHARE_SRC_AMR_FIELD_FIELD_DATA_HPP
#define PHARE_SRC_AMR_FIELD_FIELD_DATA_HPP
+#include "core/def/phare_mpi.hpp" // IWYU pragma: keep
-
-#include "core/def/phare_mpi.hpp"
-
-#include
-#include
-#include
-
+#include "core/logger.hpp"
#include "core/data/grid/gridlayout.hpp"
#include "core/data/grid/gridlayout_impl.hpp"
#include "amr/resources_manager/amr_utils.hpp"
#include "field_geometry.hpp"
+#include "core/data/field/field_box.hpp"
-#include "core/logger.hpp"
-#include
+#include
+#include
+#include
namespace PHARE
{
@@ -40,13 +37,17 @@ namespace amr
typename PhysicalQuantity = decltype(std::declval().physicalQuantity())>
class FieldData : public SAMRAI::hier::PatchData
{
- using Super = SAMRAI::hier::PatchData;
+ using Super = SAMRAI::hier::PatchData;
+ using value_type = Grid_t::value_type;
+ using SetEqualOp = core::Equals;
public:
static constexpr std::size_t dimension = GridLayoutT::dimension;
static constexpr std::size_t interp_order = GridLayoutT::interp_order;
using Geometry = FieldGeometry;
using gridlayout_type = GridLayoutT;
+ static constexpr auto NO_ROTATE = SAMRAI::hier::Transformation::NO_ROTATE;
+
/*** \brief Construct a FieldData from information associated to a patch
*
@@ -126,24 +127,19 @@ namespace amr
// quantity_ using the source gridlayout to accomplish that we get the interior box,
// from the FieldData.
- SAMRAI::hier::Box sourceBox = Geometry::toFieldBox(fieldSource.getGhostBox(), quantity_,
- fieldSource.gridLayout);
-
+ SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox(
+ fieldSource.getGhostBox(), quantity_, fieldSource.gridLayout);
- SAMRAI::hier::Box destinationBox
+ SAMRAI::hier::Box const destinationBox
= Geometry::toFieldBox(this->getGhostBox(), quantity_, this->gridLayout);
// Given the two boxes in correct space we just have to intersect them
- SAMRAI::hier::Box intersectionBox = sourceBox * destinationBox;
+ SAMRAI::hier::Box const intersectionBox = sourceBox * destinationBox;
if (!intersectionBox.empty())
{
- auto const& sourceField = fieldSource.field;
- auto& destinationField = field;
-
// We can copy field from the source to the destination on the correct region
- copy_(intersectionBox, sourceBox, destinationBox, fieldSource, sourceField,
- destinationField);
+ copy_(intersectionBox, sourceBox, destinationBox, fieldSource, field);
}
}
@@ -209,7 +205,7 @@ namespace amr
*/
std::size_t getDataStreamSize(const SAMRAI::hier::BoxOverlap& overlap) const final
{
- return getDataStreamSize_(overlap);
+ return getDataStreamSize_(overlap);
}
@@ -223,37 +219,29 @@ namespace amr
{
PHARE_LOG_SCOPE(3, "packStream");
- // getDataStreamSize_ mean that we want to apply the transformation
- std::size_t expectedSize = getDataStreamSize_(overlap) / sizeof(double);
- std::vector buffer;
- buffer.reserve(expectedSize);
-
auto& fieldOverlap = dynamic_cast(overlap);
SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation();
- if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE)
- {
- SAMRAI::hier::BoxContainer const& boxContainer
- = fieldOverlap.getDestinationBoxContainer();
- for (auto const& box : boxContainer)
- {
- auto const& source = field;
- SAMRAI::hier::Box sourceBox
- = Geometry::toFieldBox(getGhostBox(), quantity_, gridLayout);
+ if (transformation.getRotation() != NO_ROTATE)
+ throw std::runtime_error("Rotations are not supported in PHARE");
- SAMRAI::hier::Box packBox{box};
+ std::vector buffer;
+ buffer.reserve(getDataStreamSize_(overlap) / sizeof(double));
- // Since the transformation, allow to transform the source box,
- // into the destination box space, and that the box in the boxContainer
- // are in destination space, we have to use the inverseTransform
- // to get into source space
- transformation.inverseTransform(packBox);
- packBox = packBox * sourceBox;
+ for (auto const& box : fieldOverlap.getDestinationBoxContainer())
+ {
+ SAMRAI::hier::Box packBox{box};
- internals_.packImpl(buffer, source, packBox, sourceBox);
- }
+ // Since the transformation, allow to transform the source box,
+ // into the destination box space, and that the box in the boxContainer
+ // are in destination space, we have to use the inverseTransform
+ // to get into source space
+ transformation.inverseTransform(packBox);
+
+ core::FieldBox src{field, gridLayout,
+ phare_box_from(packBox)};
+ src.append_to(buffer);
}
- // throw, we don't do rotations in phare....
// Once we have fill the buffer, we send it on the stream
stream.pack(buffer.data(), buffer.size());
@@ -261,50 +249,43 @@ namespace amr
-
- /*** \brief Unserialize data contained on the stream, that comes from a region covered by
- * the overlap, and fill the data where is needed.
+ /*** \brief Unserialize data contained on the stream, that comes from a region covered
+ * by the overlap, and fill the data where is needed.
*/
void unpackStream(SAMRAI::tbox::MessageStream& stream,
const SAMRAI::hier::BoxOverlap& overlap) final
{
- PHARE_LOG_SCOPE(3, "unpackStream");
-
- // For unpacking we need to know how much element we will need to
- // extract
- std::size_t expectedSize = getDataStreamSize(overlap) / sizeof(double);
+ unpackStream(stream, overlap, field);
+ }
- std::vector buffer;
- buffer.resize(expectedSize, 0.);
+ template
+ void unpackStream(SAMRAI::tbox::MessageStream& stream,
+ const SAMRAI::hier::BoxOverlap& overlap, Grid_t& dst_grid)
+ {
+ PHARE_LOG_SCOPE(3, "unpackStream");
auto& fieldOverlap = dynamic_cast(overlap);
- // We flush a portion of the stream on the buffer.
- stream.unpack(buffer.data(), expectedSize);
-
- SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation();
- if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE)
- {
- // Here the seek counter will be used to index buffer
- std::size_t seek = 0;
-
- SAMRAI::hier::BoxContainer const& boxContainer
- = fieldOverlap.getDestinationBoxContainer();
- for (auto const& box : boxContainer)
- {
- // For unpackStream, there is no transformation needed, since all the box
- // are on the destination space
+ if (fieldOverlap.getTransformation().getRotation() != NO_ROTATE)
+ throw std::runtime_error("Rotations are not supported in PHARE");
- auto& source = field;
- SAMRAI::hier::Box destination
- = Geometry::toFieldBox(getGhostBox(), quantity_, gridLayout);
+ // For unpacking we need to know how much element we will need to extract
+ std::vector buffer(getDataStreamSize(overlap) / sizeof(value_type), 0.);
+ // We flush a portion of the stream on the buffer.
+ stream.unpack(buffer.data(), buffer.size());
- SAMRAI::hier::Box packBox{box * destination};
-
+ // Here the seek counter will be used to index buffer
+ std::size_t seek = 0;
- internals_.unpackImpl(seek, buffer, source, packBox, destination);
- }
+ // For unpackStream, there is no transformation needed, since all the box
+ // are on the destination space
+ for (auto const& sambox : fieldOverlap.getDestinationBoxContainer())
+ {
+ auto const box = phare_box_from(sambox);
+ core::FieldBox dst{dst_grid, gridLayout, box};
+ dst.template set_from(buffer, seek);
+ seek += box.size();
}
}
@@ -337,7 +318,9 @@ namespace amr
}
-
+ void sum(SAMRAI::hier::PatchData const& src, SAMRAI::hier::BoxOverlap const& overlap);
+ void unpackStreamAndSum(SAMRAI::tbox::MessageStream& stream,
+ SAMRAI::hier::BoxOverlap const& overlap);
GridLayoutT gridLayout;
Grid_t field;
@@ -351,28 +334,34 @@ namespace amr
/*** \brief copy data from the intersection box
*
*/
+
+ template
void copy_(SAMRAI::hier::Box const& intersectBox, SAMRAI::hier::Box const& sourceBox,
- SAMRAI::hier::Box const& destinationBox,
- [[maybe_unused]] FieldData const& source, Grid_t const& fieldSource,
+ SAMRAI::hier::Box const& destinationBox, FieldData const& source,
Grid_t& fieldDestination)
{
- // First we represent the intersection that is defined in AMR space to the local space
- // of the source
-
- SAMRAI::hier::Box localSourceBox{AMRToLocal(intersectBox, sourceBox)};
-
- // Then we represent the intersection into the local space of the destination
- SAMRAI::hier::Box localDestinationBox{AMRToLocal(intersectBox, destinationBox)};
-
-
- // We can finally perform the copy of the element in the correct range
- internals_.copyImpl(localSourceBox, fieldSource, localDestinationBox, fieldDestination);
+ // First we represent the intersection that is defined in AMR space to the local
+ // space of the source Then we represent the intersection into the local space of
+ // the destination We can finally perform the copy of the element in the correct
+ // range
+
+ core::FieldBox dst{
+ fieldDestination, gridLayout,
+ as_unsigned_phare_box(AMRToLocal(intersectBox, destinationBox))};
+ core::FieldBox const src{
+ source.field, source.gridLayout,
+ as_unsigned_phare_box(AMRToLocal(intersectBox, sourceBox))};
+ operate_on_fields(dst, src);
}
-
-
void copy_(FieldData const& source, FieldOverlap const& overlap)
+ {
+ copy_(source, overlap, field);
+ }
+
+ template
+ void copy_(FieldData const& source, FieldOverlap const& overlap, Grid_t& dst)
{
// Here the first step is to get the transformation from the overlap
// we transform the box from the source, and from the destination
@@ -384,7 +373,7 @@ namespace amr
SAMRAI::hier::Transformation const& transformation = overlap.getTransformation();
- if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE)
+ if (transformation.getRotation() == NO_ROTATE)
{
SAMRAI::hier::BoxContainer const& boxList = overlap.getDestinationBoxContainer();
@@ -395,29 +384,21 @@ namespace amr
{
for (auto const& box : boxList)
{
- SAMRAI::hier::Box sourceBox = Geometry::toFieldBox(
+ SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox(
source.getGhostBox(), quantity_, source.gridLayout);
-
- SAMRAI::hier::Box destinationBox = Geometry::toFieldBox(
+ SAMRAI::hier::Box const destinationBox = Geometry::toFieldBox(
this->getGhostBox(), quantity_, this->gridLayout);
-
SAMRAI::hier::Box transformedSource{sourceBox};
transformation.transform(transformedSource);
-
- SAMRAI::hier::Box intersectionBox{box * transformedSource * destinationBox};
-
+ SAMRAI::hier::Box const intersectionBox{box * transformedSource
+ * destinationBox};
if (!intersectionBox.empty())
- {
- Grid_t const& sourceField = source.field;
- Grid_t& destinationField = field;
-
- copy_(intersectionBox, transformedSource, destinationBox, source,
- sourceField, destinationField);
- }
+ copy_(intersectionBox, transformedSource, destinationBox,
+ source, dst);
}
}
}
@@ -429,292 +410,66 @@ namespace amr
/*** \brief Compute the maximum amount of memory needed to hold FieldData information on
- * the specified overlap, this version work on the source, or the destination
- * depending on withTransform parameter
+ * the specified overlap
*/
- template
std::size_t getDataStreamSize_(SAMRAI::hier::BoxOverlap const& overlap) const
{
// The idea here is to tell SAMRAI the maximum memory will be used by our type
// on a given region.
-
// throws on failure
auto& fieldOverlap = dynamic_cast(overlap);
if (fieldOverlap.isOverlapEmpty())
- {
return 0;
- }
// TODO: see FieldDataFactory todo of the same function
SAMRAI::hier::BoxContainer const& boxContainer
= fieldOverlap.getDestinationBoxContainer();
- return boxContainer.getTotalSizeOfBoxes() * sizeof(typename Grid_t::type);
- }
-
-
- FieldDataInternals internals_;
- }; // namespace PHARE
-
-
-
-
- // 1D internals implementation
- template
- class FieldDataInternals
- {
- public:
- void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source,
- SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const
- {
- std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0));
- std::uint32_t xDestinationStart
- = static_cast(localDestinationBox.lower(0));
-
- std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0));
- std::uint32_t xDestinationEnd
- = static_cast(localDestinationBox.upper(0));
-
- for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart;
- xSource <= xSourceEnd && xDestination <= xDestinationEnd;
- ++xSource, ++xDestination)
- {
- destination(xDestination) = source(xSource);
- }
- }
-
-
-
- void packImpl(std::vector& buffer, Grid_t const& source,
- SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& sourceBox) const
- {
- int xStart = overlap.lower(0) - sourceBox.lower(0);
- int xEnd = overlap.upper(0) - sourceBox.lower(0);
-
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- buffer.push_back(source(xi));
- }
- }
-
-
-
- void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source,
- SAMRAI::hier::Box const& overlap,
- SAMRAI::hier::Box const& destination) const
- {
- int xStart = overlap.lower(0) - destination.lower(0);
- int xEnd = overlap.upper(0) - destination.lower(0);
-
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- source(xi) = buffer[seek];
- ++seek;
- }
- }
- };
-
-
-
- // 2D internals implementation
- template
- class FieldDataInternals
- {
- public:
- void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source,
- SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const
- {
- std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0));
- std::uint32_t xDestinationStart
- = static_cast(localDestinationBox.lower(0));
-
- std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0));
- std::uint32_t xDestinationEnd
- = static_cast(localDestinationBox.upper(0));
-
- std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1));
- std::uint32_t yDestinationStart
- = static_cast(localDestinationBox.lower(1));
-
- std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1));
- std::uint32_t yDestinationEnd
- = static_cast(localDestinationBox.upper(1));
-
- for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart;
- xSource <= xSourceEnd && xDestination <= xDestinationEnd;
- ++xSource, ++xDestination)
- {
- for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart;
- ySource <= ySourceEnd && yDestination <= yDestinationEnd;
- ++ySource, ++yDestination)
- {
- destination(xDestination, yDestination) = source(xSource, ySource);
- }
- }
- }
-
-
-
-
- void packImpl(std::vector& buffer, Grid_t const& source,
- SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const
-
- {
- int xStart = overlap.lower(0) - destination.lower(0);
- int xEnd = overlap.upper(0) - destination.lower(0);
-
- int yStart = overlap.lower(1) - destination.lower(1);
- int yEnd = overlap.upper(1) - destination.lower(1);
-
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- for (int yi = yStart; yi <= yEnd; ++yi)
- {
- buffer.push_back(source(xi, yi));
- }
- }
- }
-
-
-
-
- void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source,
- SAMRAI::hier::Box const& overlap,
- SAMRAI::hier::Box const& destination) const
- {
- int xStart = overlap.lower(0) - destination.lower(0);
- int xEnd = overlap.upper(0) - destination.lower(0);
-
- int yStart = overlap.lower(1) - destination.lower(1);
- int yEnd = overlap.upper(1) - destination.lower(1);
-
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- for (int yi = yStart; yi <= yEnd; ++yi)
- {
- source(xi, yi) = buffer[seek];
- ++seek;
- }
- }
+ return boxContainer.getTotalSizeOfBoxes() * sizeof(value_type);
}
};
- // 3D internals implementation
- template
- class FieldDataInternals
- {
- public:
- void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source,
- SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const
- {
- std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0));
- std::uint32_t xDestinationStart
- = static_cast(localDestinationBox.lower(0));
-
- std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0));
- std::uint32_t xDestinationEnd
- = static_cast(localDestinationBox.upper(0));
-
- std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1));
- std::uint32_t yDestinationStart
- = static_cast(localDestinationBox.lower(1));
-
- std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1));
- std::uint32_t yDestinationEnd
- = static_cast(localDestinationBox.upper(1));
-
- std::uint32_t zSourceStart = static_cast(localSourceBox.lower(2));
- std::uint32_t zDestinationStart
- = static_cast(localDestinationBox.lower(2));
-
- std::uint32_t zSourceEnd = static_cast(localSourceBox.upper(2));
- std::uint32_t zDestinationEnd
- = static_cast(localDestinationBox.upper(2));
-
- for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart;
- xSource <= xSourceEnd && xDestination <= xDestinationEnd;
- ++xSource, ++xDestination)
- {
- for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart;
- ySource <= ySourceEnd && yDestination <= yDestinationEnd;
- ++ySource, ++yDestination)
- {
- for (std::uint32_t zSource = zSourceStart, zDestination = zDestinationStart;
- zSource <= zSourceEnd && zDestination <= zDestinationEnd;
- ++zSource, ++zDestination)
- {
- destination(xDestination, yDestination, zDestination)
- = source(xSource, ySource, zSource);
- }
- }
- }
- }
-
-
+} // namespace amr
+} // namespace PHARE
- void packImpl(std::vector& buffer, Grid_t const& source,
- SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const
- {
- int xStart = overlap.lower(0) - destination.lower(0);
- int xEnd = overlap.upper(0) - destination.lower(0);
- int yStart = overlap.lower(1) - destination.lower(1);
- int yEnd = overlap.upper(1) - destination.lower(1);
+namespace PHARE::amr
+{
- int zStart = overlap.lower(2) - destination.lower(2);
- int zEnd = overlap.upper(2) - destination.lower(2);
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- for (int yi = yStart; yi <= yEnd; ++yi)
- {
- for (int zi = zStart; zi <= zEnd; ++zi)
- {
- buffer.push_back(source(xi, yi, zi));
- }
- }
- }
- }
+template
+void FieldData::unpackStreamAndSum(
+ SAMRAI::tbox::MessageStream& stream, SAMRAI::hier::BoxOverlap const& overlap)
+{
+ using PlusEqualOp = core::PlusEquals;
+ unpackStream(stream, overlap, field);
+}
- void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source,
- SAMRAI::hier::Box const& overlap,
- SAMRAI::hier::Box const& destination) const
- {
- int xStart = overlap.lower(0) - destination.lower(0);
- int xEnd = overlap.upper(0) - destination.lower(0);
+template
+void FieldData::sum(SAMRAI::hier::PatchData const& src,
+ SAMRAI::hier::BoxOverlap const& overlap)
+{
+ using PlusEqualOp = core::PlusEquals;
- int yStart = overlap.lower(1) - destination.lower(1);
- int yEnd = overlap.upper(1) - destination.lower(1);
+ TBOX_ASSERT_OBJDIM_EQUALITY2(*this, src);
- int zStart = overlap.lower(2) - destination.lower(2);
- int zEnd = overlap.upper(2) - destination.lower(2);
+ auto& fieldOverlap = dynamic_cast(overlap);
+ auto& fieldSource = dynamic_cast(src);
- for (int xi = xStart; xi <= xEnd; ++xi)
- {
- for (int yi = yStart; yi <= yEnd; ++yi)
- {
- for (int zi = zStart; zi <= zEnd; ++zi)
- {
- source(xi, yi, zi) = buffer[seek];
- ++seek;
- }
- }
- }
- }
- };
+ copy_(fieldSource, fieldOverlap, field);
+}
-} // namespace amr
-} // namespace PHARE
+} // namespace PHARE::amr
#endif
diff --git a/src/amr/data/field/field_geometry.hpp b/src/amr/data/field/field_geometry.hpp
index fc424915c..44cee95b1 100644
--- a/src/amr/data/field/field_geometry.hpp
+++ b/src/amr/data/field/field_geometry.hpp
@@ -1,8 +1,6 @@
#ifndef PHARE_SRC_AMR_FIELD_FIELD_GEOMETRY_HPP
#define PHARE_SRC_AMR_FIELD_FIELD_GEOMETRY_HPP
-#include
-#include
#include "core/def/phare_mpi.hpp"
@@ -17,6 +15,7 @@
#include
#include
+#include
namespace PHARE
{
@@ -28,8 +27,6 @@ namespace amr
// generic BoxGeometry into the specific geometry but cannot cast into
// the FieldGeometry below because it does not have the GridLayoutT and
// PhysicalQuantity for template arguments.
- // this class is thus used instead and provide the method pureInteriorFieldBox()
- // used in FieldFillPattern::calculateOverlap()
template
class FieldGeometryBase : public SAMRAI::hier::BoxGeometry
{
@@ -43,11 +40,10 @@ namespace amr
, ghostFieldBox_{ghostFieldBox}
, interiorFieldBox_{interiorFieldBox}
, centerings_{centerings}
- , pureInteriorFieldBox_{pureInteriorBox_(interiorFieldBox, centerings)}
{
}
- auto const& pureInteriorFieldBox() const { return pureInteriorFieldBox_; }
+ auto const& interiorFieldBox() const { return interiorFieldBox_; }
SAMRAI::hier::Box const patchBox;
@@ -55,22 +51,6 @@ namespace amr
SAMRAI::hier::Box const ghostFieldBox_;
SAMRAI::hier::Box const interiorFieldBox_;
std::array const centerings_;
- SAMRAI::hier::Box const pureInteriorFieldBox_;
-
- private:
- static SAMRAI::hier::Box
- pureInteriorBox_(SAMRAI::hier::Box const& interiorFieldBox,
- std::array const& centerings)
- {
- auto noSharedNodeBox{interiorFieldBox};
- SAMRAI::hier::IntVector growth(SAMRAI::tbox::Dimension{dimension});
- for (auto dir = 0u; dir < dimension; ++dir)
- {
- growth[dir] = (centerings[dir] == core::QtyCentering::primal) ? -1 : 0;
- }
- noSharedNodeBox.grow(growth);
- return noSharedNodeBox;
- }
};
template
diff --git a/src/amr/data/field/field_variable.hpp b/src/amr/data/field/field_variable.hpp
index 9d9e82c04..ea85b011f 100644
--- a/src/amr/data/field/field_variable.hpp
+++ b/src/amr/data/field/field_variable.hpp
@@ -29,13 +29,18 @@ namespace amr
*
* FieldVariable represent a data on a patch, it does not contain the data itself,
* after creation, one need to register it with a context : see registerVariableAndContext.
+ *
+ *
+ * Note that `fineBoundaryRepresentsVariable` is set to false so that
+ * coarse-fine interfaces are handled such that copy happens **before**
+ * refining. See https://github.com/LLNL/SAMRAI/issues/292
*/
FieldVariable(std::string const& name, PhysicalQuantity qty,
- bool fineBoundaryRepresentsVariable = true)
- : SAMRAI::hier::Variable(
- name,
- std::make_shared>(
- fineBoundaryRepresentsVariable, computeDataLivesOnPatchBorder_(qty), name, qty))
+ bool fineBoundaryRepresentsVariable = false)
+ : SAMRAI::hier::Variable(name,
+ std::make_shared>(
+ fineBoundaryRepresentsVariable,
+ computeDataLivesOnPatchBorder_(qty), name, qty))
, fineBoundaryRepresentsVariable_{fineBoundaryRepresentsVariable}
, dataLivesOnPatchBorder_{computeDataLivesOnPatchBorder_(qty)}
{
diff --git a/src/amr/data/field/field_variable_fill_pattern.hpp b/src/amr/data/field/field_variable_fill_pattern.hpp
index c890a2944..9f0ee56e9 100644
--- a/src/amr/data/field/field_variable_fill_pattern.hpp
+++ b/src/amr/data/field/field_variable_fill_pattern.hpp
@@ -1,43 +1,32 @@
#ifndef PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP
#define PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP
-#include
-
+#include "amr/data/tensorfield/tensor_field_overlap.hpp"
+#include "core/logger.hpp"
#include "core/def/phare_mpi.hpp"
-#include "SAMRAI/xfer/VariableFillPattern.h"
+#include
+#include "core/data/tensorfield/tensorfield.hpp"
+#include
+#include "amr/data/field/field_geometry.hpp"
+#include "amr/data/tensorfield/tensor_field_geometry.hpp"
+
+#include
+#include "SAMRAI/xfer/VariableFillPattern.h"
#include "core/utilities/types.hpp"
-#include "core/utilities/mpi_utils.hpp"
-#include "amr/data/field/refine/field_refine_operator.hpp"
+
+#include
+#include
namespace PHARE::amr
{
/*
- This class is used from multiple schedules
- To know which schedule we are coming from, we have `std::optional opt_overwrite_interior_`
-
- the modes are :
-
- 1. To synchronize primal nodes on shared patch borders
- e.g. hybrid_hybrid_messenger_strategy.hpp
- HybridHybridMessengerStrategy::magneticSharedNodes_
-
- in this case, the fillPattern is constructed
- with "std::optional opt_overwrite_interior_ == std::nullopt",
- we set the forwarding flag of "bool overwrite_interior" to true by default
- and it is only set to false for one of the 2 patches involved in the overlap
- so that only one process assigns its value to the shared border node
- We also remove the exclusive interior of the src patch to isolate only shared primal
- nodes.
-
- 2. To synchronize pure ghost values from src domain values
- in that case, the optional is set to "false" and overwrite_interior takes this value
- none of the two patches overwrites the shared border nodes and only pure ghost nodes are
- filled.
+ This class is used to synchronize pure ghost values from src domain values
+ in that case, we default overwrite_interior to "false" so none of the two patches overwrites
+ the shared border nodes and only pure ghost nodes are filled.
Notes on shared-node overwrite interior: https://github.com/LLNL/SAMRAI/issues/170
-
*/
// This class is mostly a copy of BoxGeometryVariableFillPattern
template
@@ -46,19 +35,11 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern
constexpr static std::size_t dim = dimension;
public:
- FieldFillPattern(std::optional overwrite_interior)
- : opt_overwrite_interior_{overwrite_interior}
- {
- }
+ // defaulted param makes this the default constructor also
+ FieldFillPattern(bool overwrite_interior = false)
+ : overwrite_interior_{overwrite_interior}
- static auto make_shared(std::shared_ptr const& samrai_op)
{
- auto const& op = dynamic_cast(*samrai_op);
-
- if (op.node_only)
- return std::make_shared>(std::nullopt);
-
- return std::make_shared>(false);
}
@@ -69,51 +50,64 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern
SAMRAI::hier::BoxGeometry const& src_geometry,
SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask,
SAMRAI::hier::Box const& fill_box, bool const fn_overwrite_interior,
- SAMRAI::hier::Transformation const& transformation) const
+ SAMRAI::hier::Transformation const& transformation) const override
{
#ifndef DEBUG_CHECK_DIM_ASSERTIONS
NULL_USE(dst_patch_box);
#endif
TBOX_ASSERT_OBJDIM_EQUALITY2(dst_patch_box, src_mask);
- bool overwrite_interior = true; // replace func param
- assert(fn_overwrite_interior == overwrite_interior);
+ assert(fn_overwrite_interior == true); // expect default as true
- if (opt_overwrite_interior_) // not node only
- {
- // this sets overwrite_interior to false
- overwrite_interior = *opt_overwrite_interior_;
- }
+ return dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, overwrite_interior_,
- // opt_overwrite_interior_ is nullopt : assume primal node shared border schedule
- else
- {
- // cast into the Base class to get the pureInteriorFieldBox method
- // see field_geometry.hpp for more explanations about why this base class exists
- auto& dst_cast = dynamic_cast const&>(dst_geometry);
- auto& src_cast = dynamic_cast const&>(src_geometry);
+ transformation);
+ }
- if (src_cast.patchBox.getGlobalId().getOwnerRank()
- != dst_cast.patchBox.getGlobalId().getOwnerRank())
- overwrite_interior
- = src_cast.patchBox.getGlobalId() > dst_cast.patchBox.getGlobalId();
+ /*
+ *************************************************************************
+ *
+ * Compute BoxOverlap that specifies data to be filled by refinement
+ * operator.
+ *
+ *************************************************************************
+ */
+ std::shared_ptr
+ computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes,
+ SAMRAI::hier::BoxContainer const& node_fill_boxes,
+ SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box,
+ SAMRAI::hier::PatchDataFactory const& pdf) const override
+ {
+ NULL_USE(node_fill_boxes);
- auto basic_overlap = dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box,
- overwrite_interior, transformation);
- auto& overlap = dynamic_cast(*basic_overlap);
- auto destinationBoxes = overlap.getDestinationBoxContainer();
- destinationBoxes.removeIntersections(src_cast.pureInteriorFieldBox());
+ /*
+ * For this (default) case, the overlap is simply the intersection of
+ * fill_boxes and data_box.
+ */
+ SAMRAI::hier::Transformation transformation(
+ SAMRAI::hier::IntVector::getZero(patch_box.getDim()));
- return std::make_shared(destinationBoxes, overlap.getTransformation());
- }
+ SAMRAI::hier::BoxContainer overlap_boxes(fill_boxes);
+ overlap_boxes.intersectBoxes(data_box);
- // overwrite_interior is always false here
- return dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, overwrite_interior,
- transformation);
+ auto geom = pdf.getBoxGeometry(patch_box);
+ auto basic_overlap
+ = pdf.getBoxGeometry(patch_box)->setUpOverlap(overlap_boxes, transformation);
+
+ if (overwrite_interior_)
+ // if (true)
+ return basic_overlap;
+
+ auto& overlap = dynamic_cast(*basic_overlap);
+ auto destinationBoxes = overlap.getDestinationBoxContainer();
+ auto& casted = dynamic_cast const&>(*geom);
+ destinationBoxes.removeIntersections(casted.interiorFieldBox());
+
+ return std::make_shared(destinationBoxes, overlap.getTransformation());
}
- std::string const& getPatternName() const { return s_name_id; }
+ std::string const& getPatternName() const override { return s_name_id; }
private:
FieldFillPattern(FieldFillPattern const&) = delete;
@@ -121,7 +115,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern
static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN";
- SAMRAI::hier::IntVector const& getStencilWidth()
+ SAMRAI::hier::IntVector const& getStencilWidth() override
{
TBOX_ERROR("getStencilWidth() should not be\n"
<< "called. This pattern creates overlaps based on\n"
@@ -134,37 +128,276 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern
return SAMRAI::hier::IntVector::getZero(SAMRAI::tbox::Dimension(1));
}
- /*
- *************************************************************************
- *
- * Compute BoxOverlap that specifies data to be filled by refinement
- * operator.
- *
- *************************************************************************
- */
+ bool overwrite_interior_;
+};
+
+
+template
+class TensorFieldFillPattern : public SAMRAI::xfer::VariableFillPattern
+{
+ static constexpr std::size_t N = core::detail::tensor_field_dim_from_rank();
+
+public:
+ TensorFieldFillPattern(bool overwrite_interior = false)
+ : scalar_fill_pattern_{overwrite_interior}
+ , overwrite_interior_{overwrite_interior}
+ {
+ }
+
+ ~TensorFieldFillPattern() override = default;
+
+ std::shared_ptr
+ calculateOverlap(const SAMRAI::hier::BoxGeometry& dst_geometry,
+ const SAMRAI::hier::BoxGeometry& src_geometry,
+ const SAMRAI::hier::Box& dst_patch_box, const SAMRAI::hier::Box& src_mask,
+ const SAMRAI::hier::Box& fill_box, bool const fn_overwrite_interior,
+ const SAMRAI::hier::Transformation& transformation) const override
+ {
+ return dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, overwrite_interior_,
+ transformation);
+ }
+
std::shared_ptr
computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes,
SAMRAI::hier::BoxContainer const& node_fill_boxes,
SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box,
- SAMRAI::hier::PatchDataFactory const& pdf) const
+ SAMRAI::hier::PatchDataFactory const& pdf) const override
{
- NULL_USE(node_fill_boxes);
-
- /*
- * For this (default) case, the overlap is simply the intersection of
- * fill_boxes and data_box.
- */
SAMRAI::hier::Transformation transformation(
SAMRAI::hier::IntVector::getZero(patch_box.getDim()));
SAMRAI::hier::BoxContainer overlap_boxes(fill_boxes);
overlap_boxes.intersectBoxes(data_box);
- return pdf.getBoxGeometry(patch_box)->setUpOverlap(overlap_boxes, transformation);
+
+ auto basic_overlap
+ = pdf.getBoxGeometry(patch_box)->setUpOverlap(overlap_boxes, transformation);
+
+ if (overwrite_interior_)
+ return basic_overlap;
+
+ auto geom = pdf.getBoxGeometry(patch_box);
+ auto& casted = dynamic_cast const&>(*geom);
+ auto& toverlap = dynamic_cast const&>(*basic_overlap);
+ auto&& interiorTensorFieldBox = casted.interiorTensorFieldBox();
+
+ auto overlaps = core::for_N([&](auto i) {
+ auto& overlap = toverlap[i];
+ auto& interiorFieldBox = interiorTensorFieldBox[i];
+ auto destinationBoxes = overlap->getDestinationBoxContainer();
+ destinationBoxes.removeIntersections(interiorFieldBox);
+
+ return std::make_shared(destinationBoxes, overlap->getTransformation());
+ });
+
+ return std::make_shared>(std::move(overlaps));
}
- std::optional opt_overwrite_interior_{nullptr};
+ std::string const& getPatternName() const override { return s_name_id; }
+
+private:
+ TensorFieldFillPattern(TensorFieldFillPattern const&) = delete;
+ TensorFieldFillPattern& operator=(TensorFieldFillPattern const&) = delete;
+
+ static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN";
+
+ SAMRAI::hier::IntVector const& getStencilWidth() override
+ {
+ TBOX_ERROR("getStencilWidth() should not be called for TensorFieldFillPattern.");
+ return SAMRAI::hier::IntVector::getZero(SAMRAI::tbox::Dimension(1));
+ }
+
+ FieldFillPattern scalar_fill_pattern_;
+ bool overwrite_interior_;
};
+
+// We use this fill pattern to sum the contributions of border fields like rho and flux
+/** \brief VariableFillPattern that is used to fill incomplete ghost domain moment nodes
+ *
+ * After deposition of domain particles, some domain and ghost nodes lack contributions
+ * from particle that exist on a neighboring patch.
+ * The extent of incomplete nodes in the ghost layer and in domain depends on interp order.
+ *
+ * Example, at interpolation order 1, only the border node will be incomplete after
+ * depositing domain particles since these hit only the two primal nodes surronding its position.
+ * However, we deposit also leaving domain particles before they are sent to patchGhost particles
+ * and shipped to neighrboring patches.
+ * Leaving particles can be found in the first ghost cell from domain, so the first primal
+ * ghost node from domain will also have some incomplete contribution.
+ *
+ * At order 1, thus, there is an overlap of 3 primal nodes that are incomplete:
+ * the shared border node and the first domain and first ghost.
+ *
+ * ghost cells <-|-> patch
+ * + + +
+ * | leaving | domain particles
+ * | particles |
+ *
+ *
+ * As a first try and to keep it simple, this fill pattern simply creates the overlap
+ * that is the intersection of the field ghost boxes of the source and destination patch datas.
+ * That is, at interpolation 1 we have 2 ghost cells thus it is 5 nodes that overlap
+ * even though the outermost ghost should have 0 contribution.
+ *
+ * ghost cells <-|-> patch
+ * + + + + +
+ * ^ | leaving | domain particles
+ * | | particles |
+ * 0
+ * */
+template // ASSUMED ALL PRIMAL!
+class FieldGhostInterpOverlapFillPattern : public SAMRAI::xfer::VariableFillPattern
+{
+ std::size_t constexpr static dim = Gridlayout_t::dimension;
+ using FieldGeometry_t = FieldGeometryBase;
+
+public:
+ FieldGhostInterpOverlapFillPattern() {}
+ ~FieldGhostInterpOverlapFillPattern() override {}
+
+ std::shared_ptr
+ calculateOverlap(SAMRAI::hier::BoxGeometry const& _dst_geometry,
+ SAMRAI::hier::BoxGeometry const& _src_geometry,
+ SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask,
+ SAMRAI::hier::Box const& fill_box, bool const overwrite_interior,
+ SAMRAI::hier::Transformation const& transformation) const override
+ {
+ PHARE_LOG_SCOPE(3, "FieldGhostInterpOverlapFillPattern::calculateOverlap");
+
+ // Skip if src and dst are the same
+ if (phare_box_from(dst_patch_box) == phare_box_from(src_mask))
+ return std::make_shared(SAMRAI::hier::BoxContainer{}, transformation);
+
+ if (dynamic_cast(&_dst_geometry))
+ return calculateOverlap(dynamic_cast(_dst_geometry),
+ dynamic_cast(_src_geometry),
+ dst_patch_box, src_mask, fill_box, overwrite_interior,
+ transformation);
+ else
+ throw std::runtime_error("bad cast");
+ }
+
+
+ std::shared_ptr static calculateOverlap(
+ auto const& dst_geometry, auto const& src_geometry, SAMRAI::hier::Box const& dst_patch_box,
+ SAMRAI::hier::Box const& src_mask, SAMRAI::hier::Box const& fill_box,
+ bool const overwrite_interior, SAMRAI::hier::Transformation const& transformation)
+ {
+ auto const _primal_ghost_box = [](auto const& box) {
+ auto gb = grow(box, Gridlayout_t::nbrGhosts());
+ gb.upper += 1;
+ return gb;
+ };
+
+ auto const src_ghost_box = [&]() {
+ auto const box = phare_box_from(src_geometry.patchBox);
+ auto const primal_ghost_box = _primal_ghost_box(box);
+ return amr::shift(primal_ghost_box, transformation);
+ }();
+
+ auto const dst_ghost_box = [&]() {
+ auto const box = phare_box_from(dst_geometry.patchBox);
+ return _primal_ghost_box(box);
+ }();
+
+
+ SAMRAI::hier::BoxContainer dest;
+ if (auto overlap = dst_ghost_box * src_ghost_box)
+ dest.push_back(samrai_box_from(*overlap));
+
+ return std::make_shared(dest, transformation);
+ }
+
+ std::string const& getPatternName() const override { return s_name_id; }
+
+private:
+ static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN";
+
+ SAMRAI::hier::IntVector const& getStencilWidth() override
+ {
+ throw std::runtime_error("never called");
+ }
+
+
+ std::shared_ptr
+ computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes,
+ SAMRAI::hier::BoxContainer const& node_fill_boxes,
+ SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box,
+ SAMRAI::hier::PatchDataFactory const& pdf) const override
+ {
+ throw std::runtime_error("no refinement supported or expected");
+ }
+};
+
+template // ASSUMED ALL PRIMAL!
+class TensorFieldGhostInterpOverlapFillPattern : public SAMRAI::xfer::VariableFillPattern
+{
+ std::size_t constexpr static dim = Gridlayout_t::dimension;
+ static constexpr auto N = core::detail::tensor_field_dim_from_rank();
+
+ using TensorFieldGeometry_t = TensorFieldGeometryBase;
+
+public:
+ TensorFieldGhostInterpOverlapFillPattern() {}
+ ~TensorFieldGhostInterpOverlapFillPattern() override {}
+
+ std::shared_ptr
+ calculateOverlap(SAMRAI::hier::BoxGeometry const& _dst_geometry,
+ SAMRAI::hier::BoxGeometry const& _src_geometry,
+ SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask,
+ SAMRAI::hier::Box const& fill_box, bool const overwrite_interior,
+ SAMRAI::hier::Transformation const& transformation) const override
+ {
+ PHARE_LOG_SCOPE(3, "TensorFieldGhostInterpOverlapFillPattern::calculateOverlap");
+
+ // Skip if src and dst are the same
+ if (phare_box_from(dst_patch_box) == phare_box_from(src_mask))
+ {
+ auto overlaps = core::for_N([&](auto /*i*/) {
+ return std::make_shared(SAMRAI::hier::BoxContainer{}, transformation);
+ });
+ return std::make_shared>(std::move(overlaps));
+ }
+
+ if (dynamic_cast(&_dst_geometry))
+ {
+ auto overlaps = core::for_N([&](auto /*i*/) {
+ auto overlap = FieldGhostInterpOverlapFillPattern::calculateOverlap(
+ dynamic_cast(_dst_geometry),
+ dynamic_cast(_src_geometry), dst_patch_box,
+ src_mask, fill_box, overwrite_interior, transformation);
+
+ return std::dynamic_pointer_cast(overlap);
+ });
+ return std::make_shared>(std::move(overlaps));
+ }
+
+ else
+ throw std::runtime_error("bad cast");
+ }
+
+ std::string const& getPatternName() const override { return s_name_id; }
+
+private:
+ static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN";
+
+ SAMRAI::hier::IntVector const& getStencilWidth() override
+ {
+ throw std::runtime_error("never called");
+ }
+
+ std::shared_ptr
+ computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes,
+ SAMRAI::hier::BoxContainer const& node_fill_boxes,
+ SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box,
+ SAMRAI::hier::PatchDataFactory const& pdf) const override
+ {
+ throw std::runtime_error("no refinement supported or expected");
+ }
+};
+
+
+
} // namespace PHARE::amr
#endif /* PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_H */
diff --git a/src/amr/data/field/refine/electric_field_refiner.hpp b/src/amr/data/field/refine/electric_field_refiner.hpp
index aef026e62..4ed495e9e 100644
--- a/src/amr/data/field/refine/electric_field_refiner.hpp
+++ b/src/amr/data/field/refine/electric_field_refiner.hpp
@@ -94,7 +94,8 @@ class ElectricFieldRefiner
//
// therefore in all cases in 1D we just copy the coarse value
//
- fineField(locFineIdx[dirX]) = coarseField(locCoarseIdx[dirX]);
+ if (std::isnan(fineField(locFineIdx[dirX])))
+ fineField(locFineIdx[dirX]) = coarseField(locCoarseIdx[dirX]);
}
template
@@ -119,14 +120,16 @@ class ElectricFieldRefiner
{
// we're on a fine edge shared with coarse mesh
// take the coarse face value
- fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
}
else
{
// we're on a fine edge in between two coarse edges
// we take the average
- fineField(ilfx, ilfy)
- = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx, ilcy + 1));
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy)
+ = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx, ilcy + 1));
}
}
// Ey
@@ -140,14 +143,16 @@ class ElectricFieldRefiner
// both fine Ey e.g. at j=100 and 101 will take j=50 on coarse
// so no need to look at whether jfine is even or odd
// just take the value at the local coarse index
- fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
}
else
{
// we're on a fine edge in between two coarse ones
// we take the average
- fineField(ilfx, ilfy)
- = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy));
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy)
+ = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy));
}
}
// and this is now Ez
@@ -156,19 +161,29 @@ class ElectricFieldRefiner
{
if (onCoarseXFace_(fineIndex) and onCoarseYFace_(fineIndex))
{
- fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy) = coarseField(ilcx, ilcy);
}
else if (onCoarseXFace_(fineIndex))
- fineField(ilfx, ilfy)
- = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx, ilcy + 1));
+ {
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy)
+ = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx, ilcy + 1));
+ }
else if (onCoarseYFace_(fineIndex))
- fineField(ilfx, ilfy)
- = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy));
+ {
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy)
+ = 0.5 * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy));
+ }
else
- fineField(ilfx, ilfy)
- = 0.25
- * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy)
- + coarseField(ilcx, ilcy + 1) + coarseField(ilcx + 1, ilcy + 1));
+ {
+ if (std::isnan(fineField(ilfx, ilfy)))
+ fineField(ilfx, ilfy)
+ = 0.25
+ * (coarseField(ilcx, ilcy) + coarseField(ilcx + 1, ilcy)
+ + coarseField(ilcx, ilcy + 1) + coarseField(ilcx + 1, ilcy + 1));
+ }
}
}
@@ -197,33 +212,37 @@ class ElectricFieldRefiner
// just copy the coarse value
if (onCoarseYFace_(fineIndex) and onCoarseZFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
}
// we share the Y face but not the Z face
// we must be one of the 2 X fine edges on a Y face
// thus we take the average of the two surrounding edges at Z and Z+DZ
else if (onCoarseYFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz)
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy, ilcz + 1));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy, ilcz + 1));
}
// we share a Z face but not the Y face
// we must be one of the 2 X fine edges on a Z face
// we thus take the average of the two X edges at y and y+dy
else if (onCoarseZFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz)
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz));
}
else
{
// we don't share any face thus we're on one of the 2 middle X edges
// we take the average of the 4 surrounding X averages
- fineField(ilfx, ilfy, ilfz)
- = 0.25 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz))
- + 0.25
- * (coarseField(ilcx, ilcy, ilcz + 1)
- + coarseField(ilcx, ilcy + 1, ilcz + 1));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.25 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz))
+ + 0.25
+ * (coarseField(ilcx, ilcy, ilcz + 1)
+ + coarseField(ilcx, ilcy + 1, ilcz + 1));
}
}
// now this is Ey
@@ -235,7 +254,8 @@ class ElectricFieldRefiner
if (onCoarseXFace_(fineIndex) and onCoarseZFace_(fineIndex))
{
// we thus just copy the coarse value
- fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
}
// now we only have same X face, but not (else) the Z face
// so we're a new fine Y edge in between two coarse Y edges
@@ -247,27 +267,30 @@ class ElectricFieldRefiner
// this means we are on a Y edge that lies in between 2 coarse edges
// at z and z+dz
// take the average of these 2 coarse value
- fineField(ilfx, ilfy, ilfz)
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy, ilcz + 1));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy, ilcz + 1));
}
// we're on a Z coarse face, but not on a X coarse face
// we thus must be one of the 2 Y edges on a Z face
// and thus we take the average of the 2 Y edges at X and X+dX
else if (onCoarseZFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz)
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz));
}
// now we're not on any of the coarse faces
// so we must be one of the two Y edge in the middle of the cell
// we thus average over the 4 Y edges of the coarse cell
else
{
- fineField(ilfx, ilfy, ilfz)
- = 0.25
- * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz)
- + coarseField(ilcx, ilcy, ilcz + 1)
- + coarseField(ilcx + 1, ilcy, ilcz + 1));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.25
+ * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz)
+ + coarseField(ilcx, ilcy, ilcz + 1)
+ + coarseField(ilcx + 1, ilcy, ilcz + 1));
}
}
// now let's do Ez
@@ -279,34 +302,38 @@ class ElectricFieldRefiner
// we thus copy the coarse value
if (onCoarseXFace_(fineIndex) and onCoarseYFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz) = coarseField(ilcx, ilcy, ilcz);
}
// here we're on a coarse X face, but not a Y face
// we must be 1 of the 2 Z edges on a X face
// thus we average the 2 surrounding Z coarse edges at Y and Y+dY
else if (onCoarseXFace_(fineIndex))
{
- fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx, ilcy + 1, ilcz));
}
// here we're on a coarse Y face, but not a X face
// we must be 1 of the 2 Z edges on a Y face
// thus we average the 2 surrounding Z coarse edges at X and X+dX
else if (onCoarseYFace_(fineIndex))
{
- fineField(ilfx, ilfy, ilfz)
- = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.5 * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz));
}
// we're not on any coarse face thus must be one of the 2 Z edges
// in the middle of the coarse cell
// we therefore take the average of the 4 surrounding Z edges
else
{
- fineField(ilfx, ilfy, ilfz)
- = 0.25
- * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz)
- + coarseField(ilcx, ilcy + 1, ilcz + 1)
- + coarseField(ilcx + 1, ilcy + 1, ilcz));
+ if (std::isnan(fineField(ilfx, ilfy, ilfz)))
+ fineField(ilfx, ilfy, ilfz)
+ = 0.25
+ * (coarseField(ilcx, ilcy, ilcz) + coarseField(ilcx + 1, ilcy, ilcz)
+ + coarseField(ilcx, ilcy + 1, ilcz + 1)
+ + coarseField(ilcx + 1, ilcy + 1, ilcz));
}
}
}
diff --git a/src/amr/data/field/refine/field_refine_operator.hpp b/src/amr/data/field/refine/field_refine_operator.hpp
index 7ba73cf41..e3ad9db88 100644
--- a/src/amr/data/field/refine/field_refine_operator.hpp
+++ b/src/amr/data/field/refine/field_refine_operator.hpp
@@ -2,43 +2,96 @@
#define PHARE_FIELD_REFINE_OPERATOR_HPP
+#include "amr/data/tensorfield/tensor_field_data.hpp"
#include "core/def/phare_mpi.hpp"
#include "core/def.hpp"
#include "amr/data/field/field_data.hpp"
-#include "amr/data/field/field_geometry.hpp"
-#include "core/data/grid/gridlayout.hpp"
+
+#include "core/hybrid/hybrid_quantities.hpp"
#include "field_linear_refine.hpp"
-#include "field_refiner.hpp"
-#include
#include
+#include
+
#include
-#include
namespace PHARE::amr
{
-class AFieldRefineOperator
+
+using core::dirX;
+using core::dirY;
+using core::dirZ;
+
+
+
+template
+void refine_field(Dst& destinationField, auto& sourceField, auto& intersectionBox, auto& refiner)
{
-public:
- AFieldRefineOperator(bool b)
- : node_only{b}
+ auto constexpr static dimension = Dst::dimension;
+
+ if constexpr (dimension == 1)
{
+ int iStartX = intersectionBox.lower(dirX);
+ int iEndX = intersectionBox.upper(dirX);
+
+ for (int ix = iStartX; ix <= iEndX; ++ix)
+ {
+ refiner(sourceField, destinationField, {{ix}});
+ }
}
- virtual ~AFieldRefineOperator() {}
- bool const node_only = false;
-};
-using core::dirX;
-using core::dirY;
-using core::dirZ;
+
+ else if constexpr (dimension == 2)
+ {
+ int iStartX = intersectionBox.lower(dirX);
+ int iStartY = intersectionBox.lower(dirY);
+
+ int iEndX = intersectionBox.upper(dirX);
+ int iEndY = intersectionBox.upper(dirY);
+
+ for (int ix = iStartX; ix <= iEndX; ++ix)
+ {
+ for (int iy = iStartY; iy <= iEndY; ++iy)
+ {
+ refiner(sourceField, destinationField, {{ix, iy}});
+ }
+ }
+ }
+
+
+
+
+ else if constexpr (dimension == 3)
+ {
+ int iStartX = intersectionBox.lower(dirX);
+ int iStartY = intersectionBox.lower(dirY);
+ int iStartZ = intersectionBox.lower(dirZ);
+
+ int iEndX = intersectionBox.upper(dirX);
+ int iEndY = intersectionBox.upper(dirY);
+ int iEndZ = intersectionBox.upper(dirZ);
+
+ for (int ix = iStartX; ix <= iEndX; ++ix)
+ {
+ for (int iy = iStartY; iy <= iEndY; ++iy)
+ {
+ for (int iz = iStartZ; iz <= iEndZ; ++iz)
+ {
+ refiner(sourceField, destinationField, {{ix, iy, iz}});
+ }
+ }
+ }
+ }
+}
+
template
-class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRefineOperator
+class FieldRefineOperator : public SAMRAI::hier::RefineOperator
{
public:
static constexpr std::size_t dimension = GridLayoutT::dimension;
@@ -48,7 +101,7 @@ class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRe
FieldRefineOperator(bool node_only = false)
: SAMRAI::hier::RefineOperator{"FieldRefineOperator"}
- , AFieldRefineOperator{node_only}
+
{
}
@@ -95,13 +148,9 @@ class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRe
auto const& sourceField = FieldDataT::getField(source, sourceId);
auto const& srcLayout = FieldDataT::getLayout(source, sourceId);
-
// We assume that quantity are all the same.
- // Note that an assertion will be raised
- // in refineIt operator
- auto const& qty = destinationField.physicalQuantity();
-
-
+ // Note that an assertion will be raised in refineIt operator
+ auto const& qty = destinationField.physicalQuantity();
auto const destData = destination.getPatchData(destinationId);
auto const srcData = source.getPatchData(sourceId);
@@ -110,78 +159,111 @@ class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRe
auto const sourceFieldBox
= FieldGeometry::toFieldBox(srcData->getGhostBox(), qty, srcLayout);
-
FieldRefinerPolicy refiner{destLayout.centering(qty), destFieldBox, sourceFieldBox, ratio};
-
for (auto const& box : overlapBoxes)
{
// we compute the intersection with the destination,
- // and then we apply the refine operation on each fine
- // index.
+ // and then we apply the refine operation on each fine index.
auto intersectionBox = destFieldBox * box;
+ refine_field(destinationField, sourceField, intersectionBox, refiner);
+ }
+ }
+};
+template
+class TensorFieldRefineOperator : public SAMRAI::hier::RefineOperator
+{
+public:
+ static constexpr std::size_t dimension = GridLayoutT::dimension;
+ using GridLayoutImpl = GridLayoutT::implT;
+ using TensorFieldDataT = TensorFieldData;
+ using TensorFieldOverlap_t = TensorFieldOverlap;
- if constexpr (dimension == 1)
- {
- int iStartX = intersectionBox.lower(dirX);
- int iEndX = intersectionBox.upper(dirX);
+ static constexpr std::size_t N = TensorFieldDataT::N;
- for (int ix = iStartX; ix <= iEndX; ++ix)
- {
- refiner(sourceField, destinationField, {{ix}});
- }
- }
+ TensorFieldRefineOperator(bool node_only = false)
+ : SAMRAI::hier::RefineOperator{"FieldRefineOperator"}
+ {
+ }
+ virtual ~TensorFieldRefineOperator() = default;
+ /** This implementation have the top priority for refine operation
+ *
+ */
+ NO_DISCARD int getOperatorPriority() const override { return 0; }
- else if constexpr (dimension == 2)
- {
- int iStartX = intersectionBox.lower(dirX);
- int iStartY = intersectionBox.lower(dirY);
+ /**
+ * @brief This operator needs to have at least 1 ghost cell to work properly
+ *
+ */
+ NO_DISCARD SAMRAI::hier::IntVector
+ getStencilWidth(SAMRAI::tbox::Dimension const& dim) const override
+ {
+ return SAMRAI::hier::IntVector::getOne(dim);
+ }
- int iEndX = intersectionBox.upper(dirX);
- int iEndY = intersectionBox.upper(dirY);
- for (int ix = iStartX; ix <= iEndX; ++ix)
- {
- for (int iy = iStartY; iy <= iEndY; ++iy)
- {
- refiner(sourceField, destinationField, {{ix, iy}});
- }
- }
- }
+ /**
+ * @brief Given a set of box on a fine patch, compute the interpolation from
+ * a coarser patch that is underneath the fine box.
+ * Since we get our boxes from a FieldOverlap, we know that they are in correct
+ * Field Indexes
+ *
+ */
+ void refine(SAMRAI::hier::Patch& destination, SAMRAI::hier::Patch const& source,
+ int const destinationId, int const sourceId,
+ SAMRAI::hier::BoxOverlap const& destinationOverlap,
+ SAMRAI::hier::IntVector const& ratio) const override
+ {
+ auto const& destinationTensorFieldOverlap
+ = dynamic_cast(destinationOverlap);
+ auto const& srcData = source.getPatchData(sourceId);
+ auto const& destData = destination.getPatchData(destinationId);
+ auto& destinationFields = TensorFieldDataT::getFields(destination, destinationId);
+ auto const& destLayout = TensorFieldDataT::getLayout(destination, destinationId);
+ auto const& sourceFields = TensorFieldDataT::getFields(source, sourceId);
+ auto const& srcLayout = TensorFieldDataT::getLayout(source, sourceId);
+ // We assume that quantity are all the same.
+ // Note that an assertion will be raised in refineIt operator
+ for (std::uint16_t c = 0; c < N; ++c)
+ {
+ auto const& overlapBoxes
+ = destinationTensorFieldOverlap[c]->getDestinationBoxContainer();
+ auto const& qty = destinationFields[c].physicalQuantity();
+ using FieldGeometry = FieldGeometry>;
- else if constexpr (dimension == 3)
- {
- int iStartX = intersectionBox.lower(dirX);
- int iStartY = intersectionBox.lower(dirY);
- int iStartZ = intersectionBox.lower(dirZ);
+ auto const destFieldBox
+ = FieldGeometry::toFieldBox(destData->getGhostBox(), qty, destLayout);
+ auto const sourceFieldBox
+ = FieldGeometry::toFieldBox(srcData->getGhostBox(), qty, srcLayout);
- int iEndX = intersectionBox.upper(dirX);
- int iEndY = intersectionBox.upper(dirY);
- int iEndZ = intersectionBox.upper(dirZ);
+ FieldRefinerPolicy refiner{destLayout.centering(qty), destFieldBox, sourceFieldBox,
+ ratio};
- for (int ix = iStartX; ix <= iEndX; ++ix)
- {
- for (int iy = iStartY; iy <= iEndY; ++iy)
- {
- for (int iz = iStartZ; iz <= iEndZ; ++iz)
- {
- refiner(sourceField, destinationField, {{ix, iy, iz}});
- }
- }
- }
+ for (auto const& box : overlapBoxes)
+ {
+ // we compute the intersection with the destination,
+ // and then we apply the refine operation on each fine index.
+ auto intersectionBox = destFieldBox * box;
+ refine_field(destinationFields[c], sourceFields[c], intersectionBox, refiner);
}
}
}
};
+
+template
+using VecFieldRefineOperator
+ = TensorFieldRefineOperator<1, GridLayoutT, FieldT, FieldRefinerPolicy>;
+
+
} // namespace PHARE::amr
diff --git a/src/amr/data/field/refine/field_refiner.hpp b/src/amr/data/field/refine/field_refiner.hpp
index 89661c08f..7703d279f 100644
--- a/src/amr/data/field/refine/field_refiner.hpp
+++ b/src/amr/data/field/refine/field_refiner.hpp
@@ -91,7 +91,8 @@ namespace amr
{
fieldValue += sourceField(xStartIndex + iShiftX) * leftRightWeights[iShiftX];
}
- destinationField(fineIndex[dirX]) = fieldValue;
+ if (std::isnan(destinationField(fineIndex[dirX])))
+ destinationField(fineIndex[dirX]) = fieldValue;
}
@@ -119,7 +120,8 @@ namespace amr
fieldValue += Yinterp * xLeftRightWeights[iShiftX];
}
- destinationField(fineIndex[dirX], fineIndex[dirY]) = fieldValue;
+ if (std::isnan(destinationField(fineIndex[dirX], fineIndex[dirY])))
+ destinationField(fineIndex[dirX], fineIndex[dirY]) = fieldValue;
}
@@ -157,7 +159,9 @@ namespace amr
fieldValue += Yinterp * xLeftRightWeights[iShiftX];
}
- destinationField(fineIndex[dirX], fineIndex[dirY], fineIndex[dirZ]) = fieldValue;
+ if (std::isnan(destinationField(fineIndex[dirX], fineIndex[dirY], fineIndex[dirZ])))
+ destinationField(fineIndex[dirX], fineIndex[dirY], fineIndex[dirZ])
+ = fieldValue;
}
}
diff --git a/src/amr/data/field/refine/magnetic_field_regrider.hpp b/src/amr/data/field/refine/magnetic_field_regrider.hpp
new file mode 100644
index 000000000..263e9cd41
--- /dev/null
+++ b/src/amr/data/field/refine/magnetic_field_regrider.hpp
@@ -0,0 +1,202 @@
+#ifndef PHARE_MAGNETIC_FIELD_REGRIDER_HPP
+#define PHARE_MAGNETIC_FIELD_REGRIDER_HPP
+
+
+#include "core/def/phare_mpi.hpp"
+
+#include
+
+#include "amr/resources_manager/amr_utils.hpp"
+#include "core/utilities/constants.hpp"
+#include "core/data/grid/gridlayoutdefs.hpp"
+#include "core/utilities/point/point.hpp"
+
+#include
+#include
+
+namespace PHARE::amr
+{
+
+using core::dirX;
+using core::dirY;
+using core::dirZ;
+
+/** \brief Refines the magnetic components from a coarse mesh to fine faces shared with the coarse
+ * ones.
+ *
+ * This refinement operator works for magnetic field components dispatched following the Yee layout.
+ * It sets the values of fine components only on faces shared with coarse faces.
+ * The fine faces values are set equal to that of the coarse shared one (order 0 interpolation).
+ * inner fine faces are set by the MagneticRefinePatchStrategy
+ */
+template
+class MagneticFieldRegrider
+{
+public:
+ MagneticFieldRegrider(std::array const& centering,
+ SAMRAI::hier::Box const& destinationGhostBox,
+ SAMRAI::hier::Box const& sourceGhostBox,
+ SAMRAI::hier::IntVector const& ratio)
+ : fineBox_{destinationGhostBox}
+ , coarseBox_{sourceGhostBox}
+ , centerings_{centering}
+ {
+ }
+
+
+ // magnetic field refinement is made so to conserve the divergence of B
+ // it simply copies the value of the magnetic field existing on a coarse face
+ // onto the 2 (1D), 4 (2/3D) colocated fine faces. This way the total flux on
+ // these fine faces equals that on the overlaped coarse face.
+ // see fujimoto et al. 2011 : doi:10.1016/j.jcp.2011.08.002
+ template
+ void operator()(FieldT const& coarseField, FieldT& fineField,
+ core::Point fineIndex)
+ {
+ TBOX_ASSERT(coarseField.physicalQuantity() == fineField.physicalQuantity());
+
+ auto locFineIdx = AMRToLocal(fineIndex, fineBox_);
+ auto coarseIdx = toCoarseIndex(fineIndex);
+ auto locCoarseIdx = AMRToLocal(coarseIdx, coarseBox_);
+
+
+ if constexpr (dimension == 1)
+ {
+ // if primal, i.e. Bx :
+ // if even fine index, we're on top of coarse, we take 100% coarse overlaped fieldValue
+ // e.g. fineIndex==100, we take coarse[100/2]
+ // if odd fine index, we take 50% of surrounding coarse nodes
+ // e.g. fineIndex == 101, we take 0.5(coarse(101/2)+coarse(101/2+1))
+ //
+ // 49 50 51 52
+ // o o o o Bx on coarse
+ // x x x x o x x Bx on fine
+ // 98 99 100 101 102 103 104
+ //
+ //
+ if (centerings_[0] == core::QtyCentering::primal)
+ {
+ if (fineIndex[0] % 2 == 0 && std::isnan(fineField(locFineIdx[dirX])))
+ {
+ fineField(locFineIdx[dirX]) = coarseField(locCoarseIdx[dirX]);
+ }
+ }
+ // dual case, By, Bz
+ // 49 50 51
+ // o + o + o + o Byz on coarse : +
+ // o + o + o + o + o + o + o Byz on fine : +
+ // 98 99 100 101 102 103
+ //
+ // 100 takes 50 = 100/2
+ // 101 takes 50 = 101/2
+ else
+ {
+ if (std::isnan(fineField(locFineIdx[dirX])))
+ fineField(locFineIdx[dirX]) = coarseField(locCoarseIdx[dirX]);
+ }
+ }
+
+
+
+
+ else if constexpr (dimension == 2)
+ {
+ if (centerings_[dirX] == core::QtyCentering::primal
+ and centerings_[dirY] == core::QtyCentering::dual)
+ {
+ // Bx
+ if (fineIndex[dirX] % 2 == 0
+ && std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY])))
+ {
+ // we're on a coarse X face
+ // take the coarse face value
+ fineField(locFineIdx[dirX], locFineIdx[dirY])
+ = coarseField(locCoarseIdx[dirX], locCoarseIdx[dirY]);
+ }
+ }
+ else if (centerings_[dirX] == core::QtyCentering::dual
+ and centerings_[dirY] == core::QtyCentering::primal)
+ {
+ // By
+ if (fineIndex[dirY] % 2 == 0
+ && std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY])))
+ {
+ // we're on a coarse Y face
+ // take the coarse face value
+ fineField(locFineIdx[dirX], locFineIdx[dirY])
+ = coarseField(locCoarseIdx[dirX], locCoarseIdx[dirY]);
+ }
+ }
+ else if (centerings_[dirX] == core::QtyCentering::dual
+ and centerings_[dirY] == core::QtyCentering::dual)
+ {
+ // Bz
+ // we're always on a coarse Z face since there is no dual in z
+ // all 4 fine Bz take the coarse Z value
+ if (std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY])))
+ fineField(locFineIdx[dirX], locFineIdx[dirY])
+ = coarseField(locCoarseIdx[dirX], locCoarseIdx[dirY]);
+ }
+ }
+
+
+ else if constexpr (dimension == 3)
+ {
+ auto ix = locCoarseIdx[dirX];
+ auto iy = locCoarseIdx[dirY];
+ auto iz = locCoarseIdx[dirZ];
+
+ if (centerings_[dirX] == core::QtyCentering::primal
+ and centerings_[dirY] == core::QtyCentering::dual
+ and centerings_[dirZ] == core::QtyCentering::dual)
+ {
+ // Bx
+ if (fineIndex[dirX] % 2 == 0
+ && std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])))
+ {
+ // we're on a coarse X face
+ // take the coarse face value
+ fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])
+ = coarseField(ix, iy, iz);
+ }
+ }
+ else if (centerings_[dirX] == core::QtyCentering::dual
+ and centerings_[dirY] == core::QtyCentering::primal
+ and centerings_[dirZ] == core::QtyCentering::dual)
+ {
+ // By
+ if (fineIndex[dirY] % 2 == 0
+ && std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])))
+ {
+ // we're on a coarse Y face
+ // take the coarse face value
+ fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])
+ = coarseField(ix, iy, iz);
+ }
+ }
+ else if (centerings_[dirX] == core::QtyCentering::dual
+ and centerings_[dirY] == core::QtyCentering::dual
+ and centerings_[dirZ] == core::QtyCentering::primal)
+ {
+ // Bz
+ if (fineIndex[dirZ] % 2 == 0
+ && std::isnan(fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])))
+ {
+ // we're on a coarse X face
+ // take the coarse face value
+ fineField(locFineIdx[dirX], locFineIdx[dirY], locFineIdx[dirZ])
+ = coarseField(ix, iy, iz);
+ }
+ }
+ }
+ }
+
+private:
+ SAMRAI::hier::Box const fineBox_;
+ SAMRAI::hier::Box const coarseBox_;
+ std::array const centerings_;
+};
+} // namespace PHARE::amr
+
+
+#endif // !PHARE_MAGNETIC_FIELD_REFINER_HPP
diff --git a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp
index 4028f1a32..d8771016f 100644
--- a/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp
+++ b/src/amr/data/field/refine/magnetic_refine_patch_strategy.hpp
@@ -1,6 +1,7 @@
#ifndef PHARE_AMR_MAGNETIC_REFINE_PATCH_STRATEGY_HPP
#define PHARE_AMR_MAGNETIC_REFINE_PATCH_STRATEGY_HPP
+#include "amr/data/field/field_geometry.hpp"
#include "core/utilities/constants.hpp"
#include "core/utilities/index/index.hpp"
@@ -9,6 +10,7 @@
#include "SAMRAI/hier/PatchLevel.h"
#include "SAMRAI/xfer/RefinePatchStrategy.h"
+#include "core/utilities/types.hpp"
#include
#include
@@ -19,35 +21,28 @@ using core::dirX;
using core::dirY;
using core::dirZ;
-template
+template
class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
{
public:
- using Geometry = typename FieldDataT::Geometry;
- using gridlayout_type = typename FieldDataT::gridlayout_type;
+ using Geometry = typename TensorFieldDataT::Geometry;
+ using gridlayout_type = typename TensorFieldDataT::gridlayout_type;
- static constexpr std::size_t dimension = FieldDataT::dimension;
+ static constexpr std::size_t N = TensorFieldDataT::N;
+ static constexpr std::size_t dimension = TensorFieldDataT::dimension;
MagneticRefinePatchStrategy(ResMan& resourcesManager)
: rm_{resourcesManager}
- , bx_id_{-1}
- , by_id_{-1}
- , bz_id_{-1}
+ , b_id_{-1}
{
}
void assertIDsSet() const
{
- assert(bx_id_ >= 0 && by_id_ >= 0 && bz_id_ >= 0
- && "MagneticRefinePatchStrategy: IDs must be registered before use");
+ assert(b_id_ >= 0 && "MagneticRefinePatchStrategy: IDs must be registered before use");
}
- void registerIDs(int bx_id, int by_id, int bz_id)
- {
- bx_id_ = bx_id;
- by_id_ = by_id;
- bz_id_ = bz_id;
- }
+ void registerIDs(int const b_id) { b_id_ = b_id; }
void setPhysicalBoundaryConditions(SAMRAI::hier::Patch& patch, double const fill_time,
const SAMRAI::hier::IntVector& ghost_width_to_fill) override
@@ -67,46 +62,48 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
{
}
- // We compute the values of the new fine magnetic faces using what was already refined, ie the
- // values on the old coarse faces.
+ // We compute the values of the new fine magnetic faces using what was already refined, ie
+ // the values on the old coarse faces.
void postprocessRefine(SAMRAI::hier::Patch& fine, SAMRAI::hier::Patch const& coarse,
SAMRAI::hier::Box const& fine_box,
SAMRAI::hier::IntVector const& ratio) override
{
assertIDsSet();
- auto& bx = FieldDataT::getField(fine, bx_id_);
- auto& by = FieldDataT::getField(fine, by_id_);
- auto& bz = FieldDataT::getField(fine, bz_id_);
+ auto& fields = TensorFieldDataT::getFields(fine, b_id_);
+ auto& [bx, by, bz] = fields;
auto layout = PHARE::amr::layoutFromPatch(fine);
auto fineBoxLayout = Geometry::layoutFromBox(fine_box, layout);
- SAMRAI::hier::Box fine_box_x
- = Geometry::toFieldBox(fine_box, bx.physicalQuantity(), fineBoxLayout);
- SAMRAI::hier::Box fine_box_y
- = Geometry::toFieldBox(fine_box, by.physicalQuantity(), fineBoxLayout);
- SAMRAI::hier::Box fine_box_z
- = Geometry::toFieldBox(fine_box, bz.physicalQuantity(), fineBoxLayout);
+ auto fine_field_box = core::for_N([&](auto i) {
+ using PhysicalQuantity = std::decay_t;
+
+ return FieldGeometry::toFieldBox(
+ fine_box, fields[i].physicalQuantity(), fineBoxLayout);
+ });
if constexpr (dimension == 1)
{
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_x)))
+ // if we ever go to c++23 we could use std::views::zip to iterate both on the local and
+ // global indices instead of passing the box to do an amr to local inside the function,
+ // which is not obvious at call site
+ for (auto const& i : phare_box_from(fine_field_box[dirX]))
{
- postprocessBx1d(bx, i);
+ postprocessBx1d(bx, layout, i);
}
}
else if constexpr (dimension == 2)
{
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_x)))
+ for (auto const& i : phare_box_from(fine_field_box[dirX]))
{
- postprocessBx2d(bx, by, i);
+ postprocessBx2d(bx, by, layout, i);
}
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_y)))
+ for (auto const& i : phare_box_from(fine_field_box[dirY]))
{
- postprocessBy2d(bx, by, i);
+ postprocessBy2d(bx, by, layout, i);
}
}
@@ -114,46 +111,49 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
{
auto meshSize = layout.meshSize();
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_x)))
+ for (auto const& i : phare_box_from(fine_field_box[dirX]))
{
- postprocessBx3d(bx, by, bz, meshSize, i);
+ postprocessBx3d(bx, by, bz, meshSize, layout, i);
}
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_y)))
+ for (auto const& i : phare_box_from(fine_field_box[dirY]))
{
- postprocessBy3d(bx, by, bz, meshSize, i);
+ postprocessBy3d(bx, by, bz, meshSize, layout, i);
}
- for (auto const& i : layout.AMRToLocal(phare_box_from(fine_box_z)))
+ for (auto const& i : phare_box_from(fine_field_box[dirZ]))
{
- postprocessBz3d(bx, by, bz, meshSize, i);
+ postprocessBz3d(bx, by, bz, meshSize, layout, i);
}
}
}
- static void postprocessBx1d(auto& bx, core::MeshIndex idx)
+ static void postprocessBx1d(auto& bx, auto const& layout, core::Point idx)
{
- auto ix = idx[dirX];
- if (ix % 2 == 1)
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ if (idx[dirX] % 2 != 0)
bx(ix) = 0.5 * (bx(ix - 1) + bx(ix + 1));
}
- static void postprocessBx2d(auto& bx, auto& by, core::MeshIndex idx)
+ static void postprocessBx2d(auto& bx, auto& by, auto const& layout,
+ core::Point idx)
{
- auto ix = idx[dirX];
- auto iy = idx[dirY];
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ auto iy = locIdx[dirY];
// | <- here with offset = 1
// -- --
// | <- or here with offset = 0
- if (ix % 2 == 1)
+ if (idx[dirX] % 2 != 0)
{
// If dual no offset, ie primal for the field we are actually
// modifying, but dual for the field we are indexing to compute
// second and third order terms, then the formula reduces to offset
// = 1
int xoffset = 1;
- int yoffset = (iy % 2 == 0) ? 0 : 1;
+ int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1;
bx(ix, iy) = 0.5 * (bx(ix - 1, iy) + bx(ix + 1, iy))
+ 0.25
@@ -164,16 +164,18 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
}
}
- static void postprocessBy2d(auto& bx, auto& by, core::MeshIndex idx)
+ static void postprocessBy2d(auto& bx, auto& by, auto const& layout,
+ core::Point idx)
{
- auto ix = idx[dirX];
- auto iy = idx[dirY];
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ auto iy = locIdx[dirY];
// |
// here with offset = 0 -> -- -- <- or here with offset = 1
// |
- if (iy % 2 == 1)
+ if (idx[dirY] % 2 != 0)
{
- int xoffset = (ix % 2 == 0) ? 0 : 1;
+ int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1;
int yoffset = 1;
by(ix, iy) = 0.5 * (by(ix, iy - 1) + by(ix, iy + 1))
@@ -186,21 +188,22 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
}
static void postprocessBx3d(auto& bx, auto& by, auto& bz, auto const& meshSize,
- core::MeshIndex idx)
+ auto const& layout, core::Point idx)
{
auto Dx = meshSize[dirX];
auto Dy = meshSize[dirY];
auto Dz = meshSize[dirZ];
- auto ix = idx[dirX];
- auto iy = idx[dirY];
- auto iz = idx[dirZ];
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ auto iy = locIdx[dirY];
+ auto iz = locIdx[dirZ];
- if (ix % 2 == 1)
+ if (idx[dirX] % 2 != 0)
{
int xoffset = 1;
- int yoffset = (iy % 2 == 0) ? 0 : 1;
- int zoffset = (iz % 2 == 0) ? 0 : 1;
+ int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1;
+ int zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1;
bx(ix, iy, iz)
= 0.5 * (bx(ix - 1, iy, iz) + bx(ix + 1, iy, iz))
@@ -244,21 +247,22 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
};
static void postprocessBy3d(auto& bx, auto& by, auto& bz, auto const& meshSize,
- core::MeshIndex idx)
+ auto const& layout, core::Point idx)
{
auto Dx = meshSize[dirX];
auto Dy = meshSize[dirY];
auto Dz = meshSize[dirZ];
- auto ix = idx[dirX];
- auto iy = idx[dirY];
- auto iz = idx[dirZ];
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ auto iy = locIdx[dirY];
+ auto iz = locIdx[dirZ];
- if (iy % 2 == 1)
+ if (idx[dirY] % 2 != 0)
{
- int xoffset = (ix % 2 == 0) ? 0 : 1;
+ int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1;
int yoffset = 1;
- int zoffset = (iz % 2 == 0) ? 0 : 1;
+ int zoffset = (idx[dirZ] % 2 == 0) ? 0 : 1;
by(ix, iy, iz)
= 0.5 * (by(ix, iy - 1, iz) + by(ix, iy + 1, iz))
@@ -302,20 +306,21 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
};
static void postprocessBz3d(auto& bx, auto& by, auto& bz, auto const& meshSize,
- core::MeshIndex idx)
+ auto const& layout, core::Point idx)
{
auto Dx = meshSize[dirX];
auto Dy = meshSize[dirY];
auto Dz = meshSize[dirZ];
- auto ix = idx[dirX];
- auto iy = idx[dirY];
- auto iz = idx[dirZ];
+ auto locIdx = layout.AMRToLocal(idx);
+ auto ix = locIdx[dirX];
+ auto iy = locIdx[dirY];
+ auto iz = locIdx[dirZ];
- if (iz % 2 == 1)
+ if (idx[dirZ] % 2 != 0)
{
- int xoffset = (ix % 2 == 0) ? 0 : 1;
- int yoffset = (iy % 2 == 0) ? 0 : 1;
+ int xoffset = (idx[dirX] % 2 == 0) ? 0 : 1;
+ int yoffset = (idx[dirY] % 2 == 0) ? 0 : 1;
int zoffset = 1;
bz(ix, iy, iz)
@@ -375,9 +380,7 @@ class MagneticRefinePatchStrategy : public SAMRAI::xfer::RefinePatchStrategy
static constexpr std::array ijk_factor_{-1, 1};
ResMan& rm_;
- int bx_id_;
- int by_id_;
- int bz_id_;
+ int b_id_;
};
} // namespace PHARE::amr
diff --git a/src/amr/data/field/time_interpolate/field_linear_time_interpolate.hpp b/src/amr/data/field/time_interpolate/field_linear_time_interpolate.hpp
index ab857fa62..6fbb416b8 100644
--- a/src/amr/data/field/time_interpolate/field_linear_time_interpolate.hpp
+++ b/src/amr/data/field/time_interpolate/field_linear_time_interpolate.hpp
@@ -9,18 +9,94 @@
#include "amr/data/field/field_data.hpp"
#include "amr/data/field/field_geometry.hpp"
+#include "amr/data/tensorfield/tensor_field_data.hpp"
#include "core/def/phare_mpi.hpp"
#include
+#include
namespace PHARE::amr
{
+
using core::dirX;
using core::dirY;
using core::dirZ;
+template
+void linear_time_interpolate(Dst& fieldDest, auto& fieldSrcOld, auto& fieldSrcNew, auto&&... args)
+{
+ auto static constexpr dim = Dst::dimension;
+
+ auto const& [localDestBox, localSrcBox, alpha] = std::forward_as_tuple(args...);
+
+ if constexpr (dim == 1)
+ {
+ auto const iDestStartX = localDestBox.lower(dirX);
+ auto const iDestEndX = localDestBox.upper(dirX);
+
+ auto const iSrcStartX = localSrcBox.lower(dirX);
+
+ for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
+ {
+ fieldDest(ix) = (1. - alpha) * fieldSrcOld(ixSrc) + alpha * fieldSrcNew(ixSrc);
+ }
+ }
+ else if constexpr (dim == 2)
+ {
+ auto const iDestStartX = localDestBox.lower(dirX);
+ auto const iDestEndX = localDestBox.upper(dirX);
+ auto const iDestStartY = localDestBox.lower(dirY);
+ auto const iDestEndY = localDestBox.upper(dirY);
+
+ auto const iSrcStartX = localSrcBox.lower(dirX);
+ auto const iSrcStartY = localSrcBox.lower(dirY);
+
+ for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
+ {
+ for (auto iy = iDestStartY, iySrc = iSrcStartY; iy <= iDestEndY; ++iy, ++iySrc)
+ {
+ fieldDest(ix, iy)
+ = (1. - alpha) * fieldSrcOld(ixSrc, iySrc) + alpha * fieldSrcNew(ixSrc, iySrc);
+ }
+ }
+ }
+ else if constexpr (dim == 3)
+ {
+ auto const iDestStartX = localDestBox.lower(dirX);
+ auto const iDestEndX = localDestBox.upper(dirX);
+ auto const iDestStartY = localDestBox.lower(dirY);
+ auto const iDestEndY = localDestBox.upper(dirY);
+ auto const iDestStartZ = localDestBox.lower(dirZ);
+ auto const iDestEndZ = localDestBox.upper(dirZ);
+
+ auto const iSrcStartX = localSrcBox.lower(dirX);
+ auto const iSrcStartY = localSrcBox.lower(dirY);
+ auto const iSrcStartZ = localSrcBox.lower(dirZ);
+
+ for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
+ {
+ for (auto iy = iDestStartY, iySrc = iSrcStartY; iy <= iDestEndY; ++iy, ++iySrc)
+ {
+ for (auto iz = iDestStartZ, izSrc = iSrcStartZ; iz <= iDestEndZ; ++iz, ++izSrc)
+ {
+ fieldDest(ix, iy, iz) = (1. - alpha) * fieldSrcOld(ixSrc, iySrc, izSrc)
+ + alpha * fieldSrcNew(ixSrc, iySrc, izSrc);
+ }
+ }
+ }
+ }
+
+ //
+}
+
+
+} // namespace PHARE::amr
+
+namespace PHARE::amr
+{
+
template
class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator
{
@@ -52,10 +128,10 @@ class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator
auto const& fieldDataSrcOld = dynamic_cast(srcDataOld);
auto const& fieldDataSrcNew = dynamic_cast(srcDataNew);
- double const interpTime = fieldDataDest.getTime();
- double const oldTime = fieldDataSrcOld.getTime();
- double const newTime = fieldDataSrcNew.getTime();
- double const alpha = (interpTime - oldTime) / (newTime - oldTime);
+ auto const& interpTime = fieldDataDest.getTime();
+ auto const& oldTime = fieldDataSrcOld.getTime();
+ auto const& newTime = fieldDataSrcNew.getTime();
+ auto const& alpha = (interpTime - oldTime) / (newTime - oldTime);
auto const& fieldSrcOld = fieldDataSrcOld.field;
auto const& fieldSrcNew = fieldDataSrcNew.field;
@@ -80,65 +156,78 @@ class FieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator
auto const localDestBox = AMRToLocal(finalBox, ghostBox);
auto const localSrcBox = AMRToLocal(finalBox, srcGhostBox);
- if constexpr (dim == 1)
- {
- auto const iDestStartX = localDestBox.lower(dirX);
- auto const iDestEndX = localDestBox.upper(dirX);
+ linear_time_interpolate( //
+ fieldDest, fieldSrcOld, fieldSrcNew, localDestBox, localSrcBox, alpha);
+ }
+};
- auto const iSrcStartX = localSrcBox.lower(dirX);
- for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
- {
- fieldDest(ix) = (1. - alpha) * fieldSrcOld(ixSrc) + alpha * fieldSrcNew(ixSrc);
- }
- }
- else if constexpr (dim == 2)
- {
- auto const iDestStartX = localDestBox.lower(dirX);
- auto const iDestEndX = localDestBox.upper(dirX);
- auto const iDestStartY = localDestBox.lower(dirY);
- auto const iDestEndY = localDestBox.upper(dirY);
+template
+class TensorFieldLinearTimeInterpolate : public SAMRAI::hier::TimeInterpolateOperator
+{
+ static std::size_t constexpr dim = GridLayoutT::dimension;
+ static_assert(dim > 0 && dim <= 3);
- auto const iSrcStartX = localSrcBox.lower(dirX);
- auto const iSrcStartY = localSrcBox.lower(dirY);
+ using TensorFieldDataT = TensorFieldData;
+ static constexpr std::size_t N = TensorFieldDataT::N;
- for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
- {
- for (auto iy = iDestStartY, iySrc = iSrcStartY; iy <= iDestEndY; ++iy, ++iySrc)
- {
- fieldDest(ix, iy) = (1. - alpha) * fieldSrcOld(ixSrc, iySrc)
- + alpha * fieldSrcNew(ixSrc, iySrc);
- }
- }
- }
- else if constexpr (dim == 3)
+public:
+ using GridLayoutImpl = typename GridLayoutT::implT;
+
+ TensorFieldLinearTimeInterpolate()
+ : SAMRAI::hier::TimeInterpolateOperator{"FieldLinearTimeInterpolate"}
+ {
+ }
+
+
+ virtual ~TensorFieldLinearTimeInterpolate() = default;
+
+
+ void timeInterpolate(SAMRAI::hier::PatchData& destData, SAMRAI::hier::Box const& where,
+ SAMRAI::hier::BoxOverlap const& /*overlap*/,
+ SAMRAI::hier::PatchData const& srcDataOld,
+ SAMRAI::hier::PatchData const& srcDataNew) const override
+ {
+ auto& fieldDataDest = dynamic_cast(destData);
+
+ auto const& fieldDataSrcOld = dynamic_cast(srcDataOld);
+ auto const& fieldDataSrcNew = dynamic_cast(srcDataNew);
+
+ auto const& interpTime = fieldDataDest.getTime();
+ auto const& oldTime = fieldDataSrcOld.getTime();
+ auto const& newTime = fieldDataSrcNew.getTime();
+ auto const& alpha = (interpTime - oldTime) / (newTime - oldTime);
+ auto const& fieldSrcOlds = fieldDataSrcOld.grids;
+ auto const& fieldSrcNews = fieldDataSrcNew.grids;
+ auto& fieldDests = fieldDataDest.grids;
+ auto const& layout = fieldDataDest.gridLayout;
+
+ for (std::uint16_t c = 0; c < N; ++c)
{
- auto const iDestStartX = localDestBox.lower(dirX);
- auto const iDestEndX = localDestBox.upper(dirX);
- auto const iDestStartY = localDestBox.lower(dirY);
- auto const iDestEndY = localDestBox.upper(dirY);
- auto const iDestStartZ = localDestBox.lower(dirZ);
- auto const iDestEndZ = localDestBox.upper(dirZ);
-
- auto const iSrcStartX = localSrcBox.lower(dirX);
- auto const iSrcStartY = localSrcBox.lower(dirY);
- auto const iSrcStartZ = localSrcBox.lower(dirZ);
-
- for (auto ix = iDestStartX, ixSrc = iSrcStartX; ix <= iDestEndX; ++ix, ++ixSrc)
- {
- for (auto iy = iDestStartY, iySrc = iSrcStartY; iy <= iDestEndY; ++iy, ++iySrc)
- {
- for (auto iz = iDestStartZ, izSrc = iSrcStartZ; iz <= iDestEndZ; ++iz, ++izSrc)
- {
- fieldDest(ix, iy, iz) = (1. - alpha) * fieldSrcOld(ixSrc, iySrc, izSrc)
- + alpha * fieldSrcNew(ixSrc, iySrc, izSrc);
- }
- }
- }
+ auto const& qty = fieldDests[c].physicalQuantity();
+ using FieldGeometry_t = FieldGeometry>;
+
+ auto const& whereLayout = FieldGeometry_t::layoutFromBox(where, layout);
+ auto const& interpolateBox = FieldGeometry_t::toFieldBox(where, qty, whereLayout);
+ auto const& ghostBox
+ = FieldGeometry_t::toFieldBox(fieldDataDest.getGhostBox(), qty, layout);
+ auto const& finalBox = interpolateBox * ghostBox;
+ auto const& srcGhostBox = FieldGeometry_t::toFieldBox(fieldDataSrcNew.getGhostBox(),
+ qty, fieldDataSrcNew.gridLayout);
+ auto const& localDestBox = AMRToLocal(finalBox, ghostBox);
+ auto const& localSrcBox = AMRToLocal(finalBox, srcGhostBox);
+
+ linear_time_interpolate( //
+ fieldDests[c], fieldSrcOlds[c], fieldSrcNews[c], localDestBox, localSrcBox, alpha);
}
}
};
+template
+using VecFieldLinearTimeInterpolate
+ = TensorFieldLinearTimeInterpolate<1, GridLayoutT, FieldT, PhysicalQuantity>;
+
+
} // namespace PHARE::amr
#endif
diff --git a/src/amr/data/particles/particles_data.hpp b/src/amr/data/particles/particles_data.hpp
index 803247447..259125a46 100644
--- a/src/amr/data/particles/particles_data.hpp
+++ b/src/amr/data/particles/particles_data.hpp
@@ -1,34 +1,32 @@
#ifndef PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP
#define PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP
-#include
-#include
-#include
-#include