diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 76318cbed..0f59e6726 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -16,7 +16,7 @@ jobs: analyze: name: Analyze runs-on: ubuntu-latest - container: ghcr.io/pharchive/phare_dep/ubuntu:22.04 + container: ghcr.io/pharchive/phare_dep/ubuntu:24.04 permissions: actions: read contents: read @@ -35,7 +35,7 @@ jobs: - name: Configure (cpp) if: ${{ matrix.language == 'cpp' }} - run: mkdir build; cd build; cmake .. -Dphare_configurator=ON + run: . /.venv/bin/activate; mkdir build; cd build; cmake .. -Dphare_configurator=ON - name: Initialize CodeQL uses: github/codeql-action/init@v3 @@ -50,7 +50,7 @@ jobs: - name: Build cpp if: ${{ matrix.language == 'cpp' }} - run: cd build; make + run: . /.venv/bin/activate; cd build; make - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 diff --git a/pyphare/pyphare/cpp/__init__.py b/pyphare/pyphare/cpp/__init__.py index c66b11d38..1ce162b09 100644 --- a/pyphare/pyphare/cpp/__init__.py +++ b/pyphare/pyphare/cpp/__init__.py @@ -22,7 +22,7 @@ def cpp_lib(override=None): return importlib.import_module("pybindlibs.cpp") try: return importlib.import_module("pybindlibs.cpp_dbg") - except ImportError: + except (ImportError, ModuleNotFoundError): return importlib.import_module("pybindlibs.cpp") diff --git a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py index 487dc8605..5ffa141c3 100644 --- a/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py +++ b/pyphare/pyphare/pharesee/hierarchy/hierarchy_utils.py @@ -592,6 +592,7 @@ def _compute_scalardiv(patch_datas, **kwargs): @dataclass class EqualityReport: + atol: float failed: List[Tuple[str, Any, Any]] = field(default_factory=lambda: []) def __bool__(self): @@ -602,10 +603,13 @@ def __repr__(self): print(msg) try: if type(ref) is FieldData: - phut.assert_fp_any_all_close(ref[:], cmp[:], atol=1e-16) + phut.assert_fp_any_all_close( + ref[ref.box], cmp[cmp.box], atol=self.atol + ) except AssertionError as e: print(e) - return self.failed[0][0] + + return self.failed[0][0] if self.failed else "==" def __call__(self, reason, ref=None, cmp=None): self.failed.append((reason, ref, cmp)) @@ -622,7 +626,7 @@ def __reversed__(self): def hierarchy_compare(this, that, atol=1e-16): - eqr = EqualityReport() + eqr = EqualityReport(atol) if not isinstance(this, PatchHierarchy) or not isinstance(that, PatchHierarchy): return eqr("class type mismatch") diff --git a/pyphare/pyphare/pharesee/hierarchy/patchdata.py b/pyphare/pyphare/pharesee/hierarchy/patchdata.py index e0727b9a2..22d456f05 100644 --- a/pyphare/pyphare/pharesee/hierarchy/patchdata.py +++ b/pyphare/pyphare/pharesee/hierarchy/patchdata.py @@ -82,7 +82,7 @@ def __repr__(self): def compare(self, that, atol=1e-16): return self.field_name == that.field_name and phut.fp_any_all_close( - self.dataset[:], that.dataset[:], atol=atol + self[self.box], that[that.box], atol=atol ) def __eq__(self, that): diff --git a/pyphare/pyphare/pharesee/particles.py b/pyphare/pyphare/pharesee/particles.py index 3d6fb58b4..f98b016e9 100644 --- a/pyphare/pyphare/pharesee/particles.py +++ b/pyphare/pyphare/pharesee/particles.py @@ -205,12 +205,13 @@ def all_assert_sorted(part1, part2): ) np.testing.assert_array_equal(part1.iCells[idx1], part2.iCells[idx2]) - np.testing.assert_allclose(part1.deltas[idx1], part2.deltas[idx2], atol=deltol) np.testing.assert_allclose(part1.v[idx1, 0], part2.v[idx2, 0], atol=1e-12) np.testing.assert_allclose(part1.v[idx1, 1], part2.v[idx2, 1], atol=1e-12) np.testing.assert_allclose(part1.v[idx1, 2], part2.v[idx2, 2], atol=1e-12) + np.testing.assert_allclose(part1.deltas[idx1], part2.deltas[idx2], atol=deltol) + def any_assert(part1, part2): np.testing.assert_equal(part1.size(), part2.size()) diff --git a/pyphare/pyphare/pharesee/run/run.py b/pyphare/pyphare/pharesee/run/run.py index 003943497..c42856882 100644 --- a/pyphare/pyphare/pharesee/run/run.py +++ b/pyphare/pyphare/pharesee/run/run.py @@ -284,9 +284,19 @@ def all_times(self): time = np.zeros(len(time_keys)) for it, t in enumerate(time_keys): time[it] = float(t) - ts[quantities_per_file[basename]] = time + if basename in quantities_per_file: + ts[quantities_per_file[basename]] = time ff.close() return ts + def all_pops(self): + pops = set() + for file in self.available_diags: + basename = os.path.basename(file).split(".")[0] + if basename.startswith("ions_pop_"): + if any([basename.endswith(k) for k in ["density", "flux", "domain"]]): + pops.add(basename[9:].split("_")[0]) + return list(pops) + def times(self, qty): return self.all_times()[qty] diff --git a/res/cmake/def.cmake b/res/cmake/def.cmake index f86854286..7b6016c16 100644 --- a/res/cmake/def.cmake +++ b/res/cmake/def.cmake @@ -205,12 +205,12 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests endfunction(add_phare_test) function(add_python3_test name file directory) - add_test(NAME py3_${name} COMMAND mpirun -n ${PHARE_MPI_PROCS} ${PHARE_MPIRUN_POSTFIX} python3 -u ${file} WORKING_DIRECTORY ${directory}) + add_test(NAME py3_${name} COMMAND mpirun -n ${PHARE_MPI_PROCS} ${PHARE_MPIRUN_POSTFIX} ${Python_EXECUTABLE} -u ${file} WORKING_DIRECTORY ${directory}) set_exe_paths_(py3_${name}) endfunction(add_python3_test) function(add_mpi_python3_test N name file directory) - add_test(NAME py3_${name}_mpi_n_${N} COMMAND mpirun -n ${N} ${PHARE_MPIRUN_POSTFIX} python3 ${file} WORKING_DIRECTORY ${directory}) + add_test(NAME py3_${name}_mpi_n_${N} COMMAND mpirun -n ${N} ${PHARE_MPIRUN_POSTFIX} ${Python_EXECUTABLE} ${file} WORKING_DIRECTORY ${directory}) set_exe_paths_(py3_${name}_mpi_n_${N}) endfunction(add_mpi_python3_test) @@ -273,7 +273,7 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests else() add_test( NAME py3_${target}_mpi_n_${N} - COMMAND mpirun -n ${N} ${PHARE_MPIRUN_POSTFIX} python3 -u ${file} ${CLI_ARGS} + COMMAND mpirun -n ${N} ${PHARE_MPIRUN_POSTFIX} ${Python_EXECUTABLE} -u ${file} ${CLI_ARGS} WORKING_DIRECTORY ${directory}) set_exe_paths_(py3_${target}_mpi_n_${N}) endif() diff --git a/res/cmake/test.cmake b/res/cmake/test.cmake index 1f7331b1d..2a0df667f 100644 --- a/res/cmake/test.cmake +++ b/res/cmake/test.cmake @@ -34,14 +34,9 @@ if (test AND ${PHARE_EXEC_LEVEL_MIN} GREATER 0) # 0 = no tests add_subdirectory(tests/initializer) + add_subdirectory(tests/amr/data/field) add_subdirectory(tests/amr/data/particles) - add_subdirectory(tests/amr/data/field/coarsening) - add_subdirectory(tests/amr/data/field/copy_pack) - add_subdirectory(tests/amr/data/field/geometry) - add_subdirectory(tests/amr/data/field/overlap) - add_subdirectory(tests/amr/data/field/refine) - add_subdirectory(tests/amr/data/field/variable) - add_subdirectory(tests/amr/data/field/time_interpolate) + add_subdirectory(tests/amr/resources_manager) add_subdirectory(tests/amr/messengers) add_subdirectory(tests/amr/models) diff --git a/src/amr/data/field/field_data.hpp b/src/amr/data/field/field_data.hpp index 385393be7..a3ecfae98 100644 --- a/src/amr/data/field/field_data.hpp +++ b/src/amr/data/field/field_data.hpp @@ -2,22 +2,21 @@ #define PHARE_SRC_AMR_FIELD_FIELD_DATA_HPP - +#include "core/logger.hpp" #include "core/def/phare_mpi.hpp" +#include +#include "core/data/field/field_box.hpp" -#include -#include -#include - -#include "core/data/grid/gridlayout.hpp" -#include "core/data/grid/gridlayout_impl.hpp" +#include #include "amr/resources_manager/amr_utils.hpp" #include "field_geometry.hpp" -#include "core/logger.hpp" +#include +#include + +#include -#include namespace PHARE { @@ -40,12 +39,15 @@ namespace amr typename PhysicalQuantity = decltype(std::declval().physicalQuantity())> class FieldData : public SAMRAI::hier::PatchData { - using Super = SAMRAI::hier::PatchData; + using Super = SAMRAI::hier::PatchData; + using value_type = Grid_t::value_type; + using SetEqualOp = core::Equals; public: static constexpr std::size_t dimension = GridLayoutT::dimension; static constexpr std::size_t interp_order = GridLayoutT::interp_order; using Geometry = FieldGeometry; + static constexpr auto NO_ROTATE = SAMRAI::hier::Transformation::NO_ROTATE; /*** \brief Construct a FieldData from information associated to a patch * @@ -127,24 +129,19 @@ namespace amr // = true). note that we could have stored the ghost box of the field data at // creation - SAMRAI::hier::Box sourceBox + SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox(fieldSource.getBox(), quantity_, fieldSource.gridLayout); - - SAMRAI::hier::Box destinationBox + SAMRAI::hier::Box const destinationBox = Geometry::toFieldBox(this->getBox(), quantity_, this->gridLayout); // Given the two boxes in correct space we just have to intersect them - SAMRAI::hier::Box intersectionBox = sourceBox * destinationBox; + SAMRAI::hier::Box const intersectionBox = sourceBox * destinationBox; if (!intersectionBox.empty()) { - auto const& sourceField = fieldSource.field; - auto& destinationField = field; - // We can copy field from the source to the destination on the correct region - copy_(intersectionBox, sourceBox, destinationBox, fieldSource, sourceField, - destinationField); + copy_(intersectionBox, sourceBox, destinationBox, fieldSource, field); } } @@ -210,7 +207,7 @@ namespace amr */ std::size_t getDataStreamSize(const SAMRAI::hier::BoxOverlap& overlap) const final { - return getDataStreamSize_(overlap); + return getDataStreamSize_(overlap); } @@ -224,37 +221,28 @@ namespace amr { PHARE_LOG_SCOPE(3, "packStream"); - // getDataStreamSize_ mean that we want to apply the transformation - std::size_t expectedSize = getDataStreamSize_(overlap) / sizeof(double); - std::vector buffer; - buffer.reserve(expectedSize); - auto& fieldOverlap = dynamic_cast(overlap); SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) - { - SAMRAI::hier::BoxContainer const& boxContainer - = fieldOverlap.getDestinationBoxContainer(); - for (auto const& box : boxContainer) - { - auto const& source = field; - SAMRAI::hier::Box sourceBox - = Geometry::toFieldBox(getBox(), quantity_, gridLayout); + if (transformation.getRotation() != NO_ROTATE) + return; // throw, we don't do rotations in phare.... - SAMRAI::hier::Box packBox{box}; + std::vector buffer; + buffer.reserve(getDataStreamSize_(overlap) / sizeof(double)); - // Since the transformation, allow to transform the source box, - // into the destination box space, and that the box in the boxContainer - // are in destination space, we have to use the inverseTransform - // to get into source space - transformation.inverseTransform(packBox); - packBox = packBox * sourceBox; + for (auto const& box : fieldOverlap.getDestinationBoxContainer()) + { + SAMRAI::hier::Box packBox{box}; - internals_.packImpl(buffer, source, packBox, sourceBox); - } + // Since the transformation, allow to transform the source box, + // into the destination box space, and that the box in the boxContainer + // are in destination space, we have to use the inverseTransform + // to get into source space + transformation.inverseTransform(packBox); + + core::FieldBox{field, gridLayout, phare_box_from(packBox)} + .append_to(buffer); } - // throw, we don't do rotations in phare.... // Once we have fill the buffer, we send it on the stream stream.pack(buffer.data(), buffer.size()); @@ -262,50 +250,42 @@ namespace amr - /*** \brief Unserialize data contained on the stream, that comes from a region covered by * the overlap, and fill the data where is needed. */ void unpackStream(SAMRAI::tbox::MessageStream& stream, const SAMRAI::hier::BoxOverlap& overlap) final { - PHARE_LOG_SCOPE(3, "unpackStream"); - - // For unpacking we need to know how much element we will need to - // extract - std::size_t expectedSize = getDataStreamSize(overlap) / sizeof(double); + unpackStream(stream, overlap, field); + } - std::vector buffer; - buffer.resize(expectedSize, 0.); + template + void unpackStream(SAMRAI::tbox::MessageStream& stream, + const SAMRAI::hier::BoxOverlap& overlap, Grid_t& dst) + { + PHARE_LOG_SCOPE(3, "unpackStream"); auto& fieldOverlap = dynamic_cast(overlap); - // We flush a portion of the stream on the buffer. - stream.unpack(buffer.data(), expectedSize); - - SAMRAI::hier::Transformation const& transformation = fieldOverlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) - { - // Here the seek counter will be used to index buffer - std::size_t seek = 0; - - SAMRAI::hier::BoxContainer const& boxContainer - = fieldOverlap.getDestinationBoxContainer(); - for (auto const& box : boxContainer) - { - // For unpackStream, there is no transformation needed, since all the box - // are on the destination space + if (fieldOverlap.getTransformation().getRotation() != NO_ROTATE) + return; - auto& source = field; - SAMRAI::hier::Box destination - = Geometry::toFieldBox(getBox(), quantity_, gridLayout); + // For unpacking we need to know how much element we will need to extract + std::vector buffer(getDataStreamSize(overlap) / sizeof(value_type), 0.); + // We flush a portion of the stream on the buffer. + stream.unpack(buffer.data(), buffer.size()); - SAMRAI::hier::Box packBox{box * destination}; - + // Here the seek counter will be used to index buffer + std::size_t seek = 0; - internals_.unpackImpl(seek, buffer, source, packBox, destination); - } + // For unpackStream, there is no transformation needed, since all the box + // are on the destination space + for (auto const& box : fieldOverlap.getDestinationBoxContainer()) + { + core::FieldBox{dst, gridLayout, phare_box_from(box)} + .template op(buffer, seek); + seek += box.size(); } } @@ -338,7 +318,9 @@ namespace amr } - + void sum(SAMRAI::hier::PatchData const& src, SAMRAI::hier::BoxOverlap const& overlap); + void unpackStreamAndSum(SAMRAI::tbox::MessageStream& stream, + SAMRAI::hier::BoxOverlap const& overlap); GridLayoutT gridLayout; Grid_t field; @@ -352,28 +334,33 @@ namespace amr /*** \brief copy data from the intersection box * */ + + template void copy_(SAMRAI::hier::Box const& intersectBox, SAMRAI::hier::Box const& sourceBox, - SAMRAI::hier::Box const& destinationBox, - [[maybe_unused]] FieldData const& source, Grid_t const& fieldSource, + SAMRAI::hier::Box const& destinationBox, FieldData const& source, Grid_t& fieldDestination) { // First we represent the intersection that is defined in AMR space to the local space // of the source - - SAMRAI::hier::Box localSourceBox{AMRToLocal(intersectBox, sourceBox)}; - // Then we represent the intersection into the local space of the destination - SAMRAI::hier::Box localDestinationBox{AMRToLocal(intersectBox, destinationBox)}; - - // We can finally perform the copy of the element in the correct range - internals_.copyImpl(localSourceBox, fieldSource, localDestinationBox, fieldDestination); - } - + core::FieldBox{ + fieldDestination, gridLayout, + phare_lcl_box_from(AMRToLocal(intersectBox, destinationBox))} + .template op(core::FieldBox{ + source.field, source.gridLayout, + phare_lcl_box_from(AMRToLocal(intersectBox, sourceBox))}); + } void copy_(FieldData const& source, FieldOverlap const& overlap) + { + copy_(source, overlap, field); + } + + template + void copy_(FieldData const& source, FieldOverlap const& overlap, Grid_t& dst) { // Here the first step is to get the transformation from the overlap // we transform the box from the source, and from the destination @@ -385,7 +372,7 @@ namespace amr SAMRAI::hier::Transformation const& transformation = overlap.getTransformation(); - if (transformation.getRotation() == SAMRAI::hier::Transformation::NO_ROTATE) + if (transformation.getRotation() == NO_ROTATE) { SAMRAI::hier::BoxContainer const& boxList = overlap.getDestinationBoxContainer(); @@ -396,29 +383,21 @@ namespace amr { for (auto const& box : boxList) { - SAMRAI::hier::Box sourceBox + SAMRAI::hier::Box const sourceBox = Geometry::toFieldBox(source.getBox(), quantity_, source.gridLayout); - - SAMRAI::hier::Box destinationBox + SAMRAI::hier::Box const destinationBox = Geometry::toFieldBox(this->getBox(), quantity_, this->gridLayout); - SAMRAI::hier::Box transformedSource{sourceBox}; transformation.transform(transformedSource); - - SAMRAI::hier::Box intersectionBox{box * transformedSource * destinationBox}; - + SAMRAI::hier::Box const intersectionBox{box * transformedSource + * destinationBox}; if (!intersectionBox.empty()) - { - Grid_t const& sourceField = source.field; - Grid_t& destinationField = field; - - copy_(intersectionBox, transformedSource, destinationBox, source, - sourceField, destinationField); - } + copy_(intersectionBox, transformedSource, destinationBox, + source, dst); } } } @@ -430,292 +409,68 @@ namespace amr /*** \brief Compute the maximum amount of memory needed to hold FieldData information on - * the specified overlap, this version work on the source, or the destination - * depending on withTransform parameter + * the specified overlap */ - template std::size_t getDataStreamSize_(SAMRAI::hier::BoxOverlap const& overlap) const { // The idea here is to tell SAMRAI the maximum memory will be used by our type // on a given region. - // throws on failure auto& fieldOverlap = dynamic_cast(overlap); if (fieldOverlap.isOverlapEmpty()) - { return 0; - } // TODO: see FieldDataFactory todo of the same function SAMRAI::hier::BoxContainer const& boxContainer = fieldOverlap.getDestinationBoxContainer(); - return boxContainer.getTotalSizeOfBoxes() * sizeof(typename Grid_t::type); - } - - - FieldDataInternals internals_; - }; // namespace PHARE - - - - - // 1D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - destination(xDestination) = source(xSource); - } - } - - - - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& sourceBox) const - { - int xStart = overlap.lower(0) - sourceBox.lower(0); - int xEnd = overlap.upper(0) - sourceBox.lower(0); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - buffer.push_back(source(xi)); - } - } - - - - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - source(xi) = buffer[seek]; - ++seek; - } - } - }; - - - - // 2D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1)); - std::uint32_t yDestinationStart - = static_cast(localDestinationBox.lower(1)); - - std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1)); - std::uint32_t yDestinationEnd - = static_cast(localDestinationBox.upper(1)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart; - ySource <= ySourceEnd && yDestination <= yDestinationEnd; - ++ySource, ++yDestination) - { - destination(xDestination, yDestination) = source(xSource, ySource); - } - } - } - - - - - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const - - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - buffer.push_back(source(xi, yi)); - } - } - } - - - - - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); - - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); - - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - source(xi, yi) = buffer[seek]; - ++seek; - } - } + return boxContainer.getTotalSizeOfBoxes() * sizeof(value_type); } }; - // 3D internals implementation - template - class FieldDataInternals - { - public: - void copyImpl(SAMRAI::hier::Box const& localSourceBox, Grid_t const& source, - SAMRAI::hier::Box const& localDestinationBox, Grid_t& destination) const - { - std::uint32_t xSourceStart = static_cast(localSourceBox.lower(0)); - std::uint32_t xDestinationStart - = static_cast(localDestinationBox.lower(0)); - - std::uint32_t xSourceEnd = static_cast(localSourceBox.upper(0)); - std::uint32_t xDestinationEnd - = static_cast(localDestinationBox.upper(0)); - - std::uint32_t ySourceStart = static_cast(localSourceBox.lower(1)); - std::uint32_t yDestinationStart - = static_cast(localDestinationBox.lower(1)); - - std::uint32_t ySourceEnd = static_cast(localSourceBox.upper(1)); - std::uint32_t yDestinationEnd - = static_cast(localDestinationBox.upper(1)); - - std::uint32_t zSourceStart = static_cast(localSourceBox.lower(2)); - std::uint32_t zDestinationStart - = static_cast(localDestinationBox.lower(2)); - - std::uint32_t zSourceEnd = static_cast(localSourceBox.upper(2)); - std::uint32_t zDestinationEnd - = static_cast(localDestinationBox.upper(2)); - - for (std::uint32_t xSource = xSourceStart, xDestination = xDestinationStart; - xSource <= xSourceEnd && xDestination <= xDestinationEnd; - ++xSource, ++xDestination) - { - for (std::uint32_t ySource = ySourceStart, yDestination = yDestinationStart; - ySource <= ySourceEnd && yDestination <= yDestinationEnd; - ++ySource, ++yDestination) - { - for (std::uint32_t zSource = zSourceStart, zDestination = zDestinationStart; - zSource <= zSourceEnd && zDestination <= zDestinationEnd; - ++zSource, ++zDestination) - { - destination(xDestination, yDestination, zDestination) - = source(xSource, ySource, zSource); - } - } - } - } - +} // namespace amr +} // namespace PHARE - void packImpl(std::vector& buffer, Grid_t const& source, - SAMRAI::hier::Box const& overlap, SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); +namespace PHARE::amr +{ - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); - int zStart = overlap.lower(2) - destination.lower(2); - int zEnd = overlap.upper(2) - destination.lower(2); +template +void FieldData::unpackStreamAndSum( + SAMRAI::tbox::MessageStream& stream, SAMRAI::hier::BoxOverlap const& overlap) +{ + using PlusEqualOp = core::PlusEquals; - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - for (int zi = zStart; zi <= zEnd; ++zi) - { - buffer.push_back(source(xi, yi, zi)); - } - } - } - } + auto& fieldOverlap = dynamic_cast(overlap); + unpackStream(stream, overlap, field); +} - void unpackImpl(std::size_t& seek, std::vector const& buffer, Grid_t& source, - SAMRAI::hier::Box const& overlap, - SAMRAI::hier::Box const& destination) const - { - int xStart = overlap.lower(0) - destination.lower(0); - int xEnd = overlap.upper(0) - destination.lower(0); +template +void FieldData::sum(SAMRAI::hier::PatchData const& src, + SAMRAI::hier::BoxOverlap const& overlap) +{ + using PlusEqualOp = core::PlusEquals; - int yStart = overlap.lower(1) - destination.lower(1); - int yEnd = overlap.upper(1) - destination.lower(1); + TBOX_ASSERT_OBJDIM_EQUALITY2(*this, src); - int zStart = overlap.lower(2) - destination.lower(2); - int zEnd = overlap.upper(2) - destination.lower(2); + auto& fieldOverlap = dynamic_cast(overlap); + auto& fieldSource = dynamic_cast(src); - for (int xi = xStart; xi <= xEnd; ++xi) - { - for (int yi = yStart; yi <= yEnd; ++yi) - { - for (int zi = zStart; zi <= zEnd; ++zi) - { - source(xi, yi, zi) = buffer[seek]; - ++seek; - } - } - } - } - }; + copy_(fieldSource, fieldOverlap, field); +} -} // namespace amr -} // namespace PHARE +} // namespace PHARE::amr #endif diff --git a/src/amr/data/field/field_variable_fill_pattern.hpp b/src/amr/data/field/field_variable_fill_pattern.hpp index aee5aa3a1..dad029738 100644 --- a/src/amr/data/field/field_variable_fill_pattern.hpp +++ b/src/amr/data/field/field_variable_fill_pattern.hpp @@ -1,15 +1,17 @@ #ifndef PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP #define PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_HPP -#include #include "core/def/phare_mpi.hpp" +#include + +#include +#include "amr/data/field/field_geometry.hpp" +#include #include "SAMRAI/xfer/VariableFillPattern.h" -#include "core/utilities/types.hpp" -#include "core/utilities/mpi_utils.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" +#include namespace PHARE::amr { @@ -46,21 +48,11 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern constexpr static std::size_t dim = dimension; public: - FieldFillPattern(std::optional overwrite_interior) + FieldFillPattern(std::optional overwrite_interior = false) : opt_overwrite_interior_{overwrite_interior} { } - static auto make_shared(std::shared_ptr const& samrai_op) - { - auto const& op = dynamic_cast(*samrai_op); - - if (op.node_only) - return std::make_shared>(std::nullopt); - - return std::make_shared>(false); - } - virtual ~FieldFillPattern() {} @@ -69,7 +61,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern SAMRAI::hier::BoxGeometry const& src_geometry, SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, SAMRAI::hier::Box const& fill_box, bool const fn_overwrite_interior, - SAMRAI::hier::Transformation const& transformation) const + SAMRAI::hier::Transformation const& transformation) const override { #ifndef DEBUG_CHECK_DIM_ASSERTIONS NULL_USE(dst_patch_box); @@ -83,29 +75,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern { // this sets overwrite_interior to false overwrite_interior = *opt_overwrite_interior_; - } - - // opt_overwrite_interior_ is nullopt : assume primal node shared border schedule - else - { - // cast into the Base class to get the pureInteriorFieldBox method - // see field_geometry.hpp for more explanations about why this base class exists - auto& dst_cast = dynamic_cast const&>(dst_geometry); - auto& src_cast = dynamic_cast const&>(src_geometry); - - if (src_cast.patchBox.getGlobalId().getOwnerRank() - != dst_cast.patchBox.getGlobalId().getOwnerRank()) - overwrite_interior - = src_cast.patchBox.getGlobalId() > dst_cast.patchBox.getGlobalId(); - - auto basic_overlap = dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, - overwrite_interior, transformation); - auto& overlap = dynamic_cast(*basic_overlap); - - auto destinationBoxes = overlap.getDestinationBoxContainer(); - destinationBoxes.removeIntersections(src_cast.pureInteriorFieldBox()); - - return std::make_shared(destinationBoxes, overlap.getTransformation()); + assert(overwrite_interior == false); // never true } // overwrite_interior is always false here @@ -113,15 +83,15 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern transformation); } - std::string const& getPatternName() const { return s_name_id; } + std::string const& getPatternName() const override { return s_name_id; } private: FieldFillPattern(FieldFillPattern const&) = delete; FieldFillPattern& operator=(FieldFillPattern const&) = delete; - static const inline std::string s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; + static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; - SAMRAI::hier::IntVector const& getStencilWidth() + SAMRAI::hier::IntVector const& getStencilWidth() override { TBOX_ERROR("getStencilWidth() should not be\n" << "called. This pattern creates overlaps based on\n" @@ -146,7 +116,7 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, SAMRAI::hier::BoxContainer const& node_fill_boxes, SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, - SAMRAI::hier::PatchDataFactory const& pdf) const + SAMRAI::hier::PatchDataFactory const& pdf) const override { NULL_USE(node_fill_boxes); @@ -162,9 +132,78 @@ class FieldFillPattern : public SAMRAI::xfer::VariableFillPattern return pdf.getBoxGeometry(patch_box)->setUpOverlap(overlap_boxes, transformation); } - std::optional opt_overwrite_interior_{nullptr}; + std::optional opt_overwrite_interior_{std::nullopt}; +}; + + + + +template // ASSUMED ALL PRIMAL! +class FieldGhostInterpOverlapFillPattern : public SAMRAI::xfer::VariableFillPattern +{ + std::size_t constexpr static dim = Gridlayout_t::dimension; + using FieldGeometry_t = FieldGeometryBase; + +public: + FieldGhostInterpOverlapFillPattern() {} + ~FieldGhostInterpOverlapFillPattern() override {} + + std::shared_ptr + calculateOverlap(SAMRAI::hier::BoxGeometry const& dst_geometry, + SAMRAI::hier::BoxGeometry const& src_geometry, + SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, + SAMRAI::hier::Box const& fill_box, bool const overwrite_interior, + SAMRAI::hier::Transformation const& transformation) const override + { + PHARE_LOG_SCOPE(3, "FieldGhostInterpOverlapFillPattern::calculateOverlap"); + + if (phare_box_from(dst_patch_box) == phare_box_from(src_mask)) + return std::make_shared(SAMRAI::hier::BoxContainer{}, transformation); + + auto const _primal_ghost_box = [](auto const& box) { + auto gb = grow(box, Gridlayout_t::nbrGhosts()); + gb.upper += 1; + return gb; + }; + + auto const src_ghost_box + = core::shift(_primal_ghost_box(phare_box_from( + dynamic_cast(src_geometry).patchBox)), + as_point(transformation)); + + auto const dst_ghost_box = _primal_ghost_box( + phare_box_from(dynamic_cast(dst_geometry).patchBox)); + + SAMRAI::hier::BoxContainer dest; + if (auto overlap = dst_ghost_box * src_ghost_box) + dest.push_back(samrai_box_from(*overlap)); + + return std::make_shared(dest, transformation); + } + + std::string const& getPatternName() const override { return s_name_id; } + +private: + static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; + + SAMRAI::hier::IntVector const& getStencilWidth() override + { + throw std::runtime_error("never called"); + } + + + std::shared_ptr + computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, + SAMRAI::hier::BoxContainer const& node_fill_boxes, + SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, + SAMRAI::hier::PatchDataFactory const& pdf) const override + { + throw std::runtime_error("no refinement supported or expected"); + } }; + + } // namespace PHARE::amr #endif /* PHARE_SRC_AMR_FIELD_FIELD_VARIABLE_FILL_PATTERN_H */ diff --git a/src/amr/data/field/refine/field_refine_operator.hpp b/src/amr/data/field/refine/field_refine_operator.hpp index 436195b2d..9136e3a9d 100644 --- a/src/amr/data/field/refine/field_refine_operator.hpp +++ b/src/amr/data/field/refine/field_refine_operator.hpp @@ -6,30 +6,23 @@ #include "core/def.hpp" #include "amr/data/field/field_data.hpp" -#include "amr/data/field/field_geometry.hpp" -#include "core/data/grid/gridlayout.hpp" + #include "field_linear_refine.hpp" -#include "field_refiner.hpp" -#include #include +#include + #include -#include namespace PHARE::amr { -class AFieldRefineOperator +class AFieldRefineOperator // castable templateless identifier { public: - AFieldRefineOperator(bool b) - : node_only{b} - { - } + AFieldRefineOperator() {} virtual ~AFieldRefineOperator() {} - - bool const node_only = false; }; @@ -48,7 +41,7 @@ class FieldRefineOperator : public SAMRAI::hier::RefineOperator, public AFieldRe FieldRefineOperator(bool node_only = false) : SAMRAI::hier::RefineOperator{"FieldRefineOperator"} - , AFieldRefineOperator{node_only} + , AFieldRefineOperator{} { } diff --git a/src/amr/data/particles/particles_data.hpp b/src/amr/data/particles/particles_data.hpp index 803247447..6ec30f630 100644 --- a/src/amr/data/particles/particles_data.hpp +++ b/src/amr/data/particles/particles_data.hpp @@ -1,12 +1,6 @@ #ifndef PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP #define PHARE_SRC_AMR_DATA_PARTICLES_PARTICLES_DATA_HPP -#include -#include -#include -#include -#include - #include "core/def/phare_mpi.hpp" @@ -25,10 +19,14 @@ #include "core/data/particles/particle_packer.hpp" #include "amr/resources_manager/amr_utils.hpp" #include "amr/utilities/box/amr_box.hpp" -#include "core/utilities/point/point.hpp" -#include "core/logger.hpp" +#include +#include +#include +#include +#include +#include namespace PHARE @@ -37,45 +35,6 @@ namespace amr { - template - NO_DISCARD inline bool isInBox(SAMRAI::hier::Box const& box, Particle const& particle) - { - constexpr auto dim = Particle::dimension; - - auto const& iCell = particle.iCell; - - auto const& lower = box.lower(); - auto const& upper = box.upper(); - - - if (iCell[0] >= lower(0) && iCell[0] <= upper(0)) - { - if constexpr (dim > 1) - { - if (iCell[1] >= lower(1) && iCell[1] <= upper(1)) - { - if constexpr (dim > 2) - { - if (iCell[2] >= lower(2) && iCell[2] <= upper(2)) - { - return true; - } - } - else - { - return true; - } - } - } - else - { - return true; - } - } - return false; - } - - /** @brief ParticlesData is a concrete SAMRAI::hier::PatchData subclass to store Particle data * * This class encapsulates particle storage known by the module core, and by being derived @@ -170,7 +129,6 @@ namespace amr }; putParticles("domainParticles", domainParticles); - putParticles("patchGhostParticles", patchGhostParticles); putParticles("levelGhostParticles", levelGhostParticles); putParticles("levelGhostParticlesNew", levelGhostParticlesNew); putParticles("levelGhostParticlesOld", levelGhostParticlesOld); @@ -215,7 +173,6 @@ namespace amr }; getParticles("domainParticles", domainParticles); - getParticles("patchGhostParticles", patchGhostParticles); getParticles("levelGhostParticles", levelGhostParticles); getParticles("levelGhostParticlesNew", levelGhostParticlesNew); getParticles("levelGhostParticlesOld", levelGhostParticlesOld); @@ -270,13 +227,26 @@ namespace amr * the copy must account for the intersection with the boxes within the overlap * The copy is done between the source patch data and myself */ + + template + void copy_from_ghost(Args&&... args); + + void copy(SAMRAI::hier::PatchData const& source, SAMRAI::hier::BoxOverlap const& overlap) override { PHARE_LOG_SCOPE(3, "ParticlesData::copy with overlap"); // casts throw on failure - auto& pSource = dynamic_cast(source); + auto& pSource = dynamic_cast(source); + + if (auto casted = dynamic_cast(&overlap)) + { + copy_from_ghost(pSource, *casted); + return; + } + // else + auto& pOverlap = dynamic_cast(overlap); SAMRAI::hier::Transformation const& transformation = pOverlap.getTransformation(); @@ -303,9 +273,23 @@ namespace amr std::size_t getDataStreamSize(SAMRAI::hier::BoxOverlap const& overlap) const override { + if (auto casted = dynamic_cast(&overlap)) + { + auto& pOverlap = *casted; + auto& transformation = pOverlap.getTransformation(); + auto const& offset = as_point(transformation); + auto const& noffset = offset * -1; + std::size_t numberParticles = 0; + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + numberParticles += patchGhostParticles.nbr_particles_in( + shift(phare_box_from(overlapBox), noffset)); + return sizeof(std::size_t) + numberParticles * sizeof(Particle_t); + } + // else + auto const& pOverlap{dynamic_cast(overlap)}; - return countNumberParticlesIn_(pOverlap) * sizeof(Particle_t); + return sizeof(std::size_t) + countNumberParticlesIn_(pOverlap) * sizeof(Particle_t); } @@ -316,22 +300,33 @@ namespace amr * that lie in the boxes of the given overlap, and pack them to a stream. * * Streaming particles means that we have to take particles with iCell on a local source - * index space , communicate them, and load them at destination with iCell on a destination - * local index space. To do that we need to: + * index space , communicate them, and load them at destination with iCell on a + * destination local index space. To do that we need to: * * 1- translate source iCell to source AMR index space * 2- Apply the offset to shift this AMR index on top of the destination cells * 3- pack and communicate particles - * 4- move back iCell from the shifted AMR index space to the local destination index space + * 4- move back iCell from the shifted AMR index space to the local destination index + * space * * Note that step 2 could be done upon reception of the pack, we chose to do it before. * */ + + template + void pack_from_ghost(Args&&... args) const; + void packStream(SAMRAI::tbox::MessageStream& stream, SAMRAI::hier::BoxOverlap const& overlap) const override { PHARE_LOG_SCOPE(3, "ParticleData::packStream"); + if (auto casted = dynamic_cast(&overlap)) + { + pack_from_ghost(stream, *casted); + return; + } + auto const& pOverlap{dynamic_cast(overlap)}; std::vector outBuffer; @@ -356,22 +351,32 @@ namespace amr /** - * @brief unpackStream is the function that unpacks a stream of particles to our particle - * arrays. + * @brief unpackStream is the function that unpacks a stream of particles to our + * particle arrays. * * We get a stream and an overlap. The overlap contains boxes where to put particles and * transformation from source to destination AMR indexes. * - * By convention chosen in patckStream, packed particles have their iCell in our AMR index - * space. This means that before putting them into our local arrays, we need to apply - * AMRToLocal() to get the proper shift to apply to them + * By convention chosen in patckStream, packed particles have their iCell in our AMR + * index space. This means that before putting them into our local arrays, we need to + * apply AMRToLocal() to get the proper shift to apply to them * */ + + template + void unpack_from_ghost(Args&&... args); + void unpackStream(SAMRAI::tbox::MessageStream& stream, SAMRAI::hier::BoxOverlap const& overlap) override { PHARE_LOG_SCOPE(3, "ParticleData::unpackStream"); + if (auto casted = dynamic_cast(&overlap)) + { + unpack_from_ghost(stream, *casted); + return; + } + auto const& pOverlap{dynamic_cast(overlap)}; if (!pOverlap.isOverlapEmpty()) @@ -402,8 +407,8 @@ namespace amr { // our goal here is : // 1/ to check if each particle is in the intersect of the overlap boxes - // and our ghostBox 2/ if yes, check if these particles should go within the - // interior array or ghost array + // and our ghostBox 2/ if yes, check if these particles should go within + // the interior array or ghost array auto const intersect = getGhostBox() * overlapBox; for (auto const& particle : particleArray) @@ -446,9 +451,9 @@ namespace amr private: - //! interiorLocalBox_ is the box, in local index space, that goes from the first to the last - //! cell in our patch physical domain, i.e. "from dual physical start index to dual physical - //! end index" + //! interiorLocalBox_ is the box, in local index space, that goes from the first to the + //! last cell in our patch physical domain, i.e. "from dual physical start index to dual + //! physical end index" SAMRAI::hier::Box interiorLocalBox_; std::string name_; @@ -671,8 +676,93 @@ namespace amr } } }; -} // namespace amr + +} // namespace amr } // namespace PHARE + +namespace PHARE::amr +{ + +template +template +void ParticlesData::copy_from_ghost(Args&&... args) +{ + PHARE_LOG_SCOPE(3, "ParticlesData::copy_from_ghost"); + + auto&& [pSource, pOverlap] = std::forward_as_tuple(args...); + auto& src_particles = pSource.patchGhostParticles; + auto& dst_particles = domainParticles; + auto const& offset = as_point(pOverlap.getTransformation()); + auto const& noffset = offset * -1; + + auto const offseter = [&](auto const& particle) { + auto shiftedParticle{particle}; + for (std::size_t idir = 0; idir < dim; ++idir) + shiftedParticle.iCell[idir] += offset[idir]; + return shiftedParticle; + }; + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + src_particles.export_particles(shift(phare_box_from(overlapBox), noffset), + dst_particles, offseter); +} + + + +template +template +void ParticlesData::pack_from_ghost(Args&&... args) const +{ + PHARE_LOG_SCOPE(3, "ParticlesData::pack_from_ghost"); + + auto&& [stream, pOverlap] = std::forward_as_tuple(args...); + + if (pOverlap.isOverlapEmpty()) + { + constexpr std::size_t zero = 0; + stream << zero; + return; + } + + std::vector outBuffer; + auto& src_particles = patchGhostParticles; + auto const& offset = as_point(pOverlap.getTransformation()); + auto const& noffset = offset * -1; + + auto const offseter = [&](auto const& particle) { + auto shiftedParticle{particle}; + for (std::size_t idir = 0; idir < dim; ++idir) + shiftedParticle.iCell[idir] += offset[idir]; + return shiftedParticle; + }; + + for (auto const& overlapBox : pOverlap.getDestinationBoxContainer()) + src_particles.export_particles(shift(phare_box_from(overlapBox), noffset), outBuffer, + offseter); + + stream << outBuffer.size(); + stream.growBufferAsNeeded(); + stream.pack(outBuffer.data(), outBuffer.size()); +} + + +template +template +void ParticlesData::unpack_from_ghost(Args&&... args) +{ + PHARE_LOG_SCOPE(3, "ParticlesData::unpack_from_ghost"); + + auto&& [stream, pOverlap] = std::forward_as_tuple(args...); + std::size_t numberParticles = 0; + stream >> numberParticles; + std::vector particleArray(numberParticles); + stream.unpack(particleArray.data(), numberParticles); + + for (auto const& p : particleArray) + domainParticles.push_back(p); +} + +} // namespace PHARE::amr + #endif diff --git a/src/amr/data/particles/particles_variable_fill_pattern.hpp b/src/amr/data/particles/particles_variable_fill_pattern.hpp new file mode 100644 index 000000000..4ad1a0250 --- /dev/null +++ b/src/amr/data/particles/particles_variable_fill_pattern.hpp @@ -0,0 +1,110 @@ +#ifndef PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_HPP +#define PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_HPP + +#include "core/logger.hpp" +#include "core/def/phare_mpi.hpp" +#include + +#include +#include +#include +#include +#include "SAMRAI/xfer/VariableFillPattern.h" + +#include +#include + +namespace PHARE::amr +{ +class ParticlesDomainOverlap : public SAMRAI::pdat::CellOverlap +{ + using Super = SAMRAI::pdat::CellOverlap; + +public: + ParticlesDomainOverlap(SAMRAI::hier::BoxContainer const& boxes, + SAMRAI::hier::Transformation const& transformation) + : Super{boxes, transformation} + { + } + + ~ParticlesDomainOverlap() = default; +}; + + + +template +class ParticleDomainFromGhostFillPattern : public SAMRAI::xfer::VariableFillPattern +{ + std::size_t constexpr static dim = GridLayout_t::dimension; + bool constexpr static overwrite_interior = false; + +public: + ParticleDomainFromGhostFillPattern() {} + + virtual ~ParticleDomainFromGhostFillPattern() {} + + std::shared_ptr + calculateOverlap(SAMRAI::hier::BoxGeometry const& dst_geometry, + SAMRAI::hier::BoxGeometry const& src_geometry, + SAMRAI::hier::Box const& dst_patch_box, SAMRAI::hier::Box const& src_mask, + SAMRAI::hier::Box const& fill_box, bool const fn_overwrite_interior, + SAMRAI::hier::Transformation const& transformation) const override + { + PHARE_LOG_SCOPE(3, "ParticleDomainFromGhostFillPattern::calculateOverlap"); +#ifndef DEBUG_CHECK_DIM_ASSERTIONS + NULL_USE(dst_patch_box); +#endif + TBOX_ASSERT_OBJDIM_EQUALITY2(dst_patch_box, src_mask); + + auto basic_overlap = dst_geometry.calculateOverlap(src_geometry, src_mask, fill_box, + overwrite_interior, transformation); + + auto& cell_overlap = dynamic_cast(*basic_overlap); + + SAMRAI::hier::BoxContainer boxes; + for (auto const& box : cell_overlap.getDestinationBoxContainer()) + { + auto const ghost_overlap + = grow(phare_box_from(box), GridLayout_t::nbrParticleGhosts()); + auto const domain_overlap = ghost_overlap * phare_box_from(dst_patch_box); + boxes.pushBack(samrai_box_from(*domain_overlap)); + } + + return std::make_shared(boxes, cell_overlap.getTransformation()); + } + + std::string const& getPatternName() const override { return s_name_id; } + +private: + ParticleDomainFromGhostFillPattern(ParticleDomainFromGhostFillPattern const&) = delete; + ParticleDomainFromGhostFillPattern& operator=(ParticleDomainFromGhostFillPattern const&) + = delete; + + static inline std::string const s_name_id = "BOX_GEOMETRY_FILL_PATTERN"; + + SAMRAI::hier::IntVector const& getStencilWidth() override + { + TBOX_ERROR("getStencilWidth() should not be\n" + << "called. This pattern creates overlaps based on\n" + << "the BoxGeometry objects and is not restricted to a\n" + << "specific stencil.\n"); + + return SAMRAI::hier::IntVector::getZero(SAMRAI::tbox::Dimension(1)); + } + + + std::shared_ptr + computeFillBoxesOverlap(SAMRAI::hier::BoxContainer const& fill_boxes, + SAMRAI::hier::BoxContainer const& node_fill_boxes, + SAMRAI::hier::Box const& patch_box, SAMRAI::hier::Box const& data_box, + SAMRAI::hier::PatchDataFactory const& pdf) const override + { + PHARE_LOG_SCOPE(2, "ParticleDomainFromGhostFillPattern::computeFillBoxesOverlap"); + + throw std::runtime_error("no refinement supported or expected"); + } +}; + +} // namespace PHARE::amr + +#endif /* PHARE_SRC_AMR_PARTICLES_PARTICLES_VARIABLE_FILL_PATTERN_H */ diff --git a/src/amr/level_initializer/hybrid_level_initializer.hpp b/src/amr/level_initializer/hybrid_level_initializer.hpp index e0ef8386a..8bc9ab0ca 100644 --- a/src/amr/level_initializer/hybrid_level_initializer.hpp +++ b/src/amr/level_initializer/hybrid_level_initializer.hpp @@ -79,8 +79,7 @@ namespace solver } } - // now all particles are here - // we must compute moments. + // now all particles are here, we must compute moments. for (auto& patch : level) { @@ -91,7 +90,18 @@ namespace solver core::resetMoments(ions); core::depositParticles(ions, layout, interpolate_, core::DomainDeposit{}); - core::depositParticles(ions, layout, interpolate_, core::PatchGhostDeposit{}); + } + + hybMessenger.fillFluxBorders(hybridModel.state.ions, level, initDataTime); + hybMessenger.fillDensityBorders(hybridModel.state.ions, level, initDataTime); + + + for (auto& patch : level) + { + auto& ions = hybridModel.state.ions; + auto& resourcesManager = hybridModel.resourcesManager; + auto dataOnPatch = resourcesManager->setOnPatch(*patch, ions); + auto layout = amr::layoutFromPatch(*patch); if (!isRootLevel(levelNumber)) { diff --git a/src/amr/messengers/communicator.hpp b/src/amr/messengers/communicator.hpp index 821e8a54d..df16eb24d 100644 --- a/src/amr/messengers/communicator.hpp +++ b/src/amr/messengers/communicator.hpp @@ -52,6 +52,11 @@ namespace amr public: + Communicator() {} + virtual ~Communicator() {} + Communicator(Communicator const&) = delete; + Communicator(Communicator&&) = default; + // we have an algorithm for each quantity, like Bx, By, Bz // even if they are to be refined/synced together. // the reason is that SAMRAI assumes that all Variables registered diff --git a/src/amr/messengers/field_sum_transaction.hpp b/src/amr/messengers/field_sum_transaction.hpp new file mode 100644 index 000000000..f6beca80c --- /dev/null +++ b/src/amr/messengers/field_sum_transaction.hpp @@ -0,0 +1,234 @@ +#ifndef PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP +#define PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP + +#include "core/logger.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace PHARE::amr +{ + + +template +class FieldBorderSumTransaction : public SAMRAI::tbox::Transaction +{ +public: + FieldBorderSumTransaction(std::shared_ptr const& dst_level, + std::shared_ptr const& src_level, + std::shared_ptr const& overlap, + SAMRAI::hier::Box const& dst_node, SAMRAI::hier::Box const& src_node, + SAMRAI::xfer::RefineClasses::Data const** refine_data, int item_id) + : d_dst_level(dst_level) + , d_src_level(src_level) + , d_overlap(overlap) + , d_dst_node(dst_node) + , d_src_node(src_node) + , d_refine_data(refine_data) + , d_item_id(item_id) + , d_incoming_bytes(0) + , d_outgoing_bytes(0) + { + TBOX_ASSERT(dst_level); + TBOX_ASSERT(src_level); + TBOX_ASSERT(overlap); + TBOX_ASSERT(dst_node.getLocalId() >= 0); + TBOX_ASSERT(src_node.getLocalId() >= 0); + TBOX_ASSERT(item_id >= 0); + TBOX_ASSERT(refine_data[item_id] != 0); + + TBOX_ASSERT_OBJDIM_EQUALITY4(*dst_level, *src_level, dst_node, src_node); + } + + virtual ~FieldBorderSumTransaction() {} + + + virtual bool canEstimateIncomingMessageSize(); + + virtual size_t computeIncomingMessageSize(); + + virtual size_t computeOutgoingMessageSize(); + + virtual int getSourceProcessor(); + + virtual int getDestinationProcessor(); + + virtual void packStream(SAMRAI::tbox::MessageStream& stream); + + virtual void unpackStream(SAMRAI::tbox::MessageStream& stream); + + virtual void copyLocalData(); + + virtual void printClassData(std::ostream& stream) const; + +private: + std::shared_ptr d_dst_level; + std::shared_ptr d_src_level; + std::shared_ptr d_overlap; + SAMRAI::hier::Box d_dst_node; + SAMRAI::hier::Box d_src_node; + SAMRAI::xfer::RefineClasses::Data const** d_refine_data; + int d_item_id; + size_t d_incoming_bytes; + size_t d_outgoing_bytes; +}; + + +template +bool FieldBorderSumTransaction::canEstimateIncomingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::canEstimateIncomingMessageSize"); + bool can_estimate = false; + if (getSourceProcessor() == d_src_level->getBoxLevel()->getMPI().getRank()) + { + can_estimate = d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->canEstimateStreamSizeFromBox(); + } + else + { + can_estimate = d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch) + ->canEstimateStreamSizeFromBox(); + } + return can_estimate; +} + + +template +size_t FieldBorderSumTransaction::computeIncomingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::computeIncomingMessageSize"); + d_incoming_bytes = d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch) + ->getDataStreamSize(*d_overlap); + return d_incoming_bytes; +} + +template +size_t FieldBorderSumTransaction::computeOutgoingMessageSize() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::computeOutgoingMessageSize"); + d_outgoing_bytes = d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->getDataStreamSize(*d_overlap); + return d_outgoing_bytes; +} + +template +int FieldBorderSumTransaction::getSourceProcessor() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::getSourceProcessor"); + return d_src_node.getOwnerRank(); +} + +template +int FieldBorderSumTransaction::getDestinationProcessor() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::getDestinationProcessor"); + return d_dst_node.getOwnerRank(); +} + +template +void FieldBorderSumTransaction::packStream(SAMRAI::tbox::MessageStream& stream) +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::packStream"); + d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src) + ->packStream(stream, *d_overlap); +} + +template +void FieldBorderSumTransaction::unpackStream(SAMRAI::tbox::MessageStream& stream) +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::unpackStream"); + std::shared_ptr onode_dst_data( + SAMRAI_SHARED_PTR_CAST( + d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch))); + TBOX_ASSERT(onode_dst_data); + + onode_dst_data->unpackStreamAndSum(stream, *d_overlap); +} + + +template +void FieldBorderSumTransaction::printClassData(std::ostream& stream) const +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::printClassData"); + throw std::runtime_error("FieldBorderSumTransaction::printClassData!"); +} + +template +void FieldBorderSumTransaction::copyLocalData() +{ + PHARE_LOG_SCOPE(2, "FieldBorderSumTransaction::copyLocalData"); + std::shared_ptr onode_dst_data( + SAMRAI_SHARED_PTR_CAST( + d_dst_level->getPatch(d_dst_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_scratch))); + TBOX_ASSERT(onode_dst_data); + + std::shared_ptr onode_src_data( + SAMRAI_SHARED_PTR_CAST( + d_src_level->getPatch(d_src_node.getGlobalId()) + ->getPatchData(d_refine_data[d_item_id]->d_src))); + TBOX_ASSERT(onode_src_data); + + onode_dst_data->sum(*onode_src_data, *d_overlap); +#if defined(HAVE_RAJA) + tbox::parallel_synchronize(); +#endif +} + + +template +class FieldBorderSumTransactionFactory : public SAMRAI::xfer::RefineTransactionFactory +{ +public: + std::shared_ptr + allocate(std::shared_ptr const& dst_level, + std::shared_ptr const& src_level, + std::shared_ptr const& overlap, + SAMRAI::hier::Box const& dst_node, SAMRAI::hier::Box const& src_node, + SAMRAI::xfer::RefineClasses::Data const** refine_data, int item_id, + SAMRAI::hier::Box const& box, bool use_time_interpolation) const override + { + NULL_USE(box); + NULL_USE(use_time_interpolation); + + TBOX_ASSERT(dst_level); + TBOX_ASSERT(src_level); + TBOX_ASSERT(overlap); + TBOX_ASSERT(dst_node.getLocalId() >= 0); + TBOX_ASSERT(src_node.getLocalId() >= 0); + TBOX_ASSERT(refine_data != 0); + TBOX_ASSERT_OBJDIM_EQUALITY4(*dst_level, *src_level, dst_node, src_node); + + PHARE_LOG_SCOPE(2, "FieldBorderSumTransactionFactory::allocate"); + return std::make_shared>( + dst_level, src_level, overlap, dst_node, src_node, refine_data, item_id); + } + + void + preprocessScratchSpace(std::shared_ptr const& level, double fill_time, + SAMRAI::hier::ComponentSelector const& preprocess_vector) const override + { + PHARE_LOG_SCOPE(2, "FieldBorderSumTransactionFactory::preprocessScratchSpace"); + + // noop + } +}; + +} // namespace PHARE::amr + +#endif // PHARE_AMR_MESSENGERS_FIELD_SUM_TRANSACTION_HPP diff --git a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp index 6c708b40d..0de04f590 100644 --- a/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_hybrid_messenger_strategy.hpp @@ -4,42 +4,42 @@ #include "core/logger.hpp" #include "core/def/phare_mpi.hpp" -#include "SAMRAI/hier/CoarseFineBoundary.h" -#include "SAMRAI/hier/IntVector.h" +#include "core/utilities/point/point.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/hybrid/hybrid_quantities.hpp" +#include "core/data/vecfield/vecfield_component.hpp" +#include "core/numerics/interpolator/interpolator.hpp" + #include "refiner_pool.hpp" #include "synchronizer_pool.hpp" -#include "amr/data/field/coarsening/default_field_coarsener.hpp" -#include "amr/data/field/coarsening/magnetic_field_coarsener.hpp" -#include "amr/data/field/refine/field_refiner.hpp" -#include "amr/data/field/refine/magnetic_field_refiner.hpp" -#include "amr/data/field/refine/electric_field_refiner.hpp" -#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include "amr/data/field/refine/field_refine_operator.hpp" -#include "amr/data/field/coarsening/field_coarsen_operator.hpp" #include "amr/messengers/messenger_info.hpp" +#include "amr/resources_manager/amr_utils.hpp" +#include "amr/data/field/refine/field_refiner.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" #include "amr/messengers/hybrid_messenger_strategy.hpp" -#include "amr/resources_manager/amr_utils.hpp" - -#include "core/numerics/interpolator/interpolator.hpp" -#include "core/hybrid/hybrid_quantities.hpp" -#include "core/data/particles/particle_array.hpp" -#include "core/data/vecfield/vecfield_component.hpp" -#include "core/data/vecfield/vecfield.hpp" -#include "core/utilities/point/point.hpp" - +#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/data/field/refine/field_refine_operator.hpp" +#include "amr/data/field/refine/electric_field_refiner.hpp" +#include "amr/data/field/refine/magnetic_field_refiner.hpp" +#include "amr/data/field/coarsening/field_coarsen_operator.hpp" +#include "amr/data/field/coarsening/default_field_coarsener.hpp" +#include "amr/data/field/coarsening/magnetic_field_coarsener.hpp" +#include "amr/data/particles/particles_variable_fill_pattern.hpp" +#include "amr/data/field/time_interpolate/field_linear_time_interpolate.hpp" -#include +#include #include +#include +#include - -#include -#include +#include +#include #include #include #include -#include +#include + namespace PHARE @@ -92,9 +92,10 @@ namespace amr , resourcesManager_{std::move(manager)} , firstLevel_{firstLevel} { - resourcesManager_->registerResources(Jold_); - resourcesManager_->registerResources(NiOld_); - resourcesManager_->registerResources(ViOld_); + auto const reg + = [&](auto&... args) { (resourcesManager_->registerResources(args), ...); }; + + reg(Jold_, NiOld_, ViOld_, sumVec, sumField); } virtual ~HybridHybridMessengerStrategy() = default; @@ -112,9 +113,11 @@ namespace amr */ void allocate(SAMRAI::hier::Patch& patch, double const allocateTime) const override { - resourcesManager_->allocate(Jold_, patch, allocateTime); - resourcesManager_->allocate(NiOld_, patch, allocateTime); - resourcesManager_->allocate(ViOld_, patch, allocateTime); + auto const alloc = [&](auto&... args) { + (resourcesManager_->allocate(args, patch, allocateTime), ...); + }; + + alloc(Jold_, NiOld_, ViOld_, sumVec, sumField); } @@ -149,38 +152,45 @@ namespace amr { auto const level = hierarchy->getPatchLevel(levelNumber); - magSharedNodesRefiners_.registerLevel(hierarchy, level); - elecSharedNodesRefiners_.registerLevel(hierarchy, level); - currentSharedNodesRefiners_.registerLevel(hierarchy, level); + auto const register_refiners + = [&](auto&... refiners) { (refiners.registerLevel(hierarchy, level), ...); }; - magPatchGhostsRefiners_.registerLevel(hierarchy, level); - magGhostsRefiners_.registerLevel(hierarchy, level); - elecGhostsRefiners_.registerLevel(hierarchy, level); - currentGhostsRefiners_.registerLevel(hierarchy, level); + auto const register_vec + = [&](auto& vec) { std::for_each(vec.begin(), vec.end(), register_refiners); }; - rhoGhostsRefiners_.registerLevel(hierarchy, level); - velGhostsRefiners_.registerLevel(hierarchy, level); + auto const register_vecs = [&](auto&... vecs) { (register_vec(vecs), ...); }; - patchGhostPartRefiners_.registerLevel(hierarchy, level); + register_refiners( // + magPatchGhostsRefiners_, // + magGhostsRefiners_, // + elecGhostsRefiners_, // + currentGhostsRefiners_, // + rhoGhostsRefiners_, // + velGhostsRefiners_, // + domainGhostPartRefiners_ // + ); + register_vecs(popFluxBorderSumRefiners_, popDensityBorderSumRefiners_); // root level is not initialized with a schedule using coarser level data // so we don't create these schedules if root level // TODO this 'if' may not be OK if L0 is regrided if (levelNumber != rootLevelNumber) { - // those are for refinement - magneticInitRefiners_.registerLevel(hierarchy, level); - electricInitRefiners_.registerLevel(hierarchy, level); - domainParticlesRefiners_.registerLevel(hierarchy, level); - lvlGhostPartOldRefiners_.registerLevel(hierarchy, level); - lvlGhostPartNewRefiners_.registerLevel(hierarchy, level); - - // and these for coarsening - magnetoSynchronizers_.registerLevel(hierarchy, level); - electroSynchronizers_.registerLevel(hierarchy, level); - densitySynchronizers_.registerLevel(hierarchy, level); - ionBulkVelSynchronizers_.registerLevel(hierarchy, level); + register_refiners( + // these are for refinement + magneticInitRefiners_, // + electricInitRefiners_, // + domainParticlesRefiners_, // + lvlGhostPartOldRefiners_, // + lvlGhostPartNewRefiners_, // + + // and these for coarsening + magnetoSynchronizers_, // + electroSynchronizers_, // + densitySynchronizers_, // + ionBulkVelSynchronizers_ // + ); } } @@ -198,12 +208,11 @@ namespace amr auto& hybridModel = dynamic_cast(model); auto level = hierarchy->getPatchLevel(levelNumber); - bool isRegriddingL0 = levelNumber == 0 and oldLevel; + bool const isRegriddingL0 = levelNumber == 0 and oldLevel; magneticInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); electricInitRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); domainParticlesRefiners_.regrid(hierarchy, levelNumber, oldLevel, initDataTime); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); // regriding will fill the new level wherever it has points that overlap @@ -219,9 +228,7 @@ namespace amr { auto& B = hybridModel.state.electromag.B; auto& E = hybridModel.state.electromag.E; - // magSharedNodesRefiners_.fill(B, levelNumber, initDataTime); magGhostsRefiners_.fill(B, levelNumber, initDataTime); - // elecSharedNodesRefiners_.fill(E, levelNumber, initDataTime); elecGhostsRefiners_.fill(E, levelNumber, initDataTime); fix_magnetic_divergence_(*hierarchy, levelNumber, B); @@ -285,12 +292,6 @@ namespace amr PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); domainParticlesRefiners_.fill(levelNumber, initDataTime); PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : interior part fill schedule"); - // however we need to call the ghost communicator for patch ghost particles - // since the interior schedules have a restriction to the interior of the patch. - PHARE_LOG_START(3, "hybhybmessengerStrat::initLevel : patch ghost part fill schedule"); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); - PHARE_LOG_STOP(3, "hybhybmessengerStrat::initLevel : patch ghost part fill schedule"); - lvlGhostPartOldRefiners_.fill(levelNumber, initDataTime); @@ -313,7 +314,6 @@ namespace amr void fillElectricGhosts(VecFieldT& E, int const levelNumber, double const fillTime) override { PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillElectricGhosts"); - elecSharedNodesRefiners_.fill(E, levelNumber, fillTime); elecGhostsRefiners_.fill(E, levelNumber, fillTime); } @@ -323,7 +323,6 @@ namespace amr void fillCurrentGhosts(VecFieldT& J, int const levelNumber, double const fillTime) override { PHARE_LOG_SCOPE(3, "HybridHybridMessengerStrategy::fillCurrentGhosts"); - currentSharedNodesRefiners_.fill(J, levelNumber, fillTime); currentGhostsRefiners_.fill(J, levelNumber, fillTime); } @@ -340,15 +339,78 @@ namespace amr { PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonGhostParticles"); - for (auto patch : level) + domainGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); + + visitLevel( + level, *resourcesManager_, + [&]() { + for (auto& pop : ions) + pop.patchGhostParticles().clear(); + }, + ions); + } + + + + void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) override + { + auto constexpr N = core::detail::tensor_field_dim_from_rank<1>(); + using value_type = FieldT::value_type; + + for (std::size_t i = 0; i < ions.size(); ++i) { - auto dataOnPatch = resourcesManager_->setOnPatch(*patch, ions); - for (auto& pop : ions) - { - pop.patchGhostParticles().clear(); - } + visitLevel( + level, *resourcesManager_, + [&]() { + auto& pop = *(ions.begin() + i); + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(sumVec[c].data(), pop.flux()[c].data(), + pop.flux()[c].size() * sizeof(value_type)); + }, + ions, sumVec); + + popFluxBorderSumRefiners_[i].fill(level.getLevelNumber(), fillTime); + + visitLevel( + level, *resourcesManager_, + [&]() { + auto& pop = *(ions.begin() + i); + for (std::uint8_t c = 0; c < N; ++c) + std::memcpy(pop.flux()[c].data(), sumVec[c].data(), + pop.flux()[c].size() * sizeof(value_type)); + }, + ions, sumVec); + } + } + + void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) override + { + using value_type = FieldT::value_type; + + for (std::size_t i = 0; i < ions.size(); ++i) + { + visitLevel( // copy ion pop density to tmp sum field + level, *resourcesManager_, + [&]() { + auto& pop = *(ions.begin() + i); + std::memcpy(sumField.data(), pop.density().data(), + pop.density().size() * sizeof(value_type)); + }, + ions, sumField); + + popDensityBorderSumRefiners_[i].fill(level.getLevelNumber(), fillTime); + + visitLevel( // copy tmp sum field back to ion pop density + level, *resourcesManager_, + [&]() { + auto& pop = *(ions.begin() + i); + std::memcpy(pop.density().data(), sumField.data(), + pop.density().size() * sizeof(value_type)); + }, + ions, sumField); } - patchGhostPartRefiners_.fill(level.getLevelNumber(), fillTime); } @@ -357,15 +419,14 @@ namespace amr /** * @brief fillIonPopMomentGhosts works on moment ghost nodes * - * patch border node moments are completed by the deposition of patch ghost - * particles for all populations level border nodes are completed by the deposition + * level border nodes are completed by the deposition * of level ghost [old,new] particles for all populations, linear time interpolation * is used to get the contribution of old/new particles */ void fillIonPopMomentGhosts(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const afterPushTime) override { - PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); + PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonPopMomentGhosts"); auto alpha = timeInterpCoef_(afterPushTime, level.getLevelNumber()); if (level.getLevelNumber() > 0 and (alpha < 0 or alpha > 1)) @@ -383,12 +444,8 @@ namespace amr for (auto& pop : ions) { - // first thing to do is to project patchGhostParitcles moments - auto& patchGhosts = pop.patchGhostParticles(); - auto& density = pop.density(); - auto& flux = pop.flux(); - - interpolate_(makeRange(patchGhosts), density, flux, layout); + auto& density = pop.density(); + auto& flux = pop.flux(); if (level.getLevelNumber() > 0) // no levelGhost on root level { @@ -413,6 +470,7 @@ namespace amr virtual void fillIonMomentGhosts(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const afterPushTime) override { + PHARE_LOG_SCOPE(1, "HybridHybridMessengerStrategy::fillIonMomentGhosts"); rhoGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); velGhostsRefiners_.fill(level.getLevelNumber(), afterPushTime); } @@ -538,11 +596,7 @@ namespace amr auto& hybridModel = static_cast(model); - elecSharedNodesRefiners_.fill(hybridModel.state.electromag.E, levelNumber, - initDataTime); - elecGhostsRefiners_.fill(hybridModel.state.electromag.E, levelNumber, initDataTime); - patchGhostPartRefiners_.fill(levelNumber, initDataTime); // at some point in the future levelGhostParticles could be filled with injected // particles depending on the domain boundary condition. @@ -590,8 +644,6 @@ namespace amr PHARE_LOG_LINE_STR("postSynchronize level " + std::to_string(levelNumber)) - magSharedNodesRefiners_.fill(hybridModel.state.electromag.B, levelNumber, time); - elecSharedNodesRefiners_.fill(hybridModel.state.electromag.E, levelNumber, time); // we fill magnetic field ghosts only on patch ghost nodes and not on level // ghosts the reason is that 1/ filling ghosts is necessary to prevent mismatch @@ -607,45 +659,40 @@ namespace amr } private: - void registerGhostComms_(std::unique_ptr const& info) + auto makeKeys(auto const& vecFieldNames) { - auto makeKeys = [](auto const& vecFieldNames) { - std::vector keys; - std::transform(std::begin(vecFieldNames), std::end(vecFieldNames), - std::back_inserter(keys), [](auto const& d) { return d.vecName; }); - return keys; - }; - magSharedNodesRefiners_.addStaticRefiners(info->ghostMagnetic, BfieldNodeRefineOp_, - makeKeys(info->ghostMagnetic)); + std::vector keys; + std::transform(std::begin(vecFieldNames), std::end(vecFieldNames), + std::back_inserter(keys), [](auto const& d) { return d.vecName; }); + return keys; + }; + void registerGhostComms_(std::unique_ptr const& info) + { magGhostsRefiners_.addStaticRefiners(info->ghostMagnetic, BfieldRefineOp_, - makeKeys(info->ghostMagnetic)); + makeKeys(info->ghostMagnetic), + defaultFieldFillPattern); magPatchGhostsRefiners_.addStaticRefiner(info->modelMagnetic, BfieldRefineOp_, - info->modelMagnetic.vecName); - - elecSharedNodesRefiners_.addStaticRefiners(info->ghostElectric, EfieldNodeRefineOp_, - makeKeys(info->ghostElectric)); + info->modelMagnetic.vecName, + defaultFieldFillPattern); elecGhostsRefiners_.addStaticRefiners(info->ghostElectric, EfieldRefineOp_, - makeKeys(info->ghostElectric)); - - currentSharedNodesRefiners_.addTimeRefiners(info->ghostCurrent, info->modelCurrent, - core::VecFieldNames{Jold_}, - EfieldNodeRefineOp_, fieldTimeOp_); + makeKeys(info->ghostElectric), + defaultFieldFillPattern); currentGhostsRefiners_.addTimeRefiners(info->ghostCurrent, info->modelCurrent, core::VecFieldNames{Jold_}, EfieldRefineOp_, - fieldTimeOp_); + fieldTimeOp_, defaultFieldFillPattern); rhoGhostsRefiners_.addTimeRefiner(info->modelIonDensity, info->modelIonDensity, NiOld_.name(), fieldRefineOp_, fieldTimeOp_, - info->modelIonDensity); + info->modelIonDensity, defaultFieldFillPattern); velGhostsRefiners_.addTimeRefiners(info->ghostBulkVelocity, info->modelIonBulkVelocity, core::VecFieldNames{ViOld_}, fieldRefineOp_, - fieldTimeOp_); + fieldTimeOp_, defaultFieldFillPattern); } @@ -653,13 +700,6 @@ namespace amr void registerInitComms(std::unique_ptr const& info) { - auto makeKeys = [](auto const& descriptor) { - std::vector keys; - std::transform(std::begin(descriptor), std::end(descriptor), - std::back_inserter(keys), [](auto const& d) { return d.vecName; }); - return keys; - }; - magneticInitRefiners_.addStaticRefiners(info->initMagnetic, BfieldRefineOp_, makeKeys(info->initMagnetic)); @@ -681,10 +721,26 @@ namespace amr info->levelGhostParticlesNew); - patchGhostPartRefiners_.addStaticRefiners(info->patchGhostParticles, nullptr, - info->patchGhostParticles); - } + domainGhostPartRefiners_.addStaticRefiners( + info->patchGhostParticles, nullptr, info->patchGhostParticles, + std::make_shared>()); + + + for (auto const& vecfield : info->ghostFlux) + { + auto pop_flux_vec = std::vector{vecfield}; + popFluxBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + core::VecFieldNames{sumVec}, vecfield, nullptr, sumVec.name(), + std::make_shared>()); + } + for (auto const& field : info->sumBorderFields) + popDensityBorderSumRefiners_.emplace_back(resourcesManager_) + .addStaticRefiner( + sumField.name(), field, nullptr, sumField.name(), + std::make_shared>()); + } @@ -959,6 +1015,10 @@ namespace amr VecFieldT ViOld_{stratName + "_VBulkOld", core::HybridQuantity::Vector::V}; FieldT NiOld_{stratName + "_NiOld", core::HybridQuantity::Scalar::rho}; + VecFieldT sumVec{stratName + "_sumVec", core::HybridQuantity::Vector::V}; + FieldT sumField{stratName + "_sumField", core::HybridQuantity::Scalar::rho}; + + //! ResourceManager shared with other objects (like the HybridModel) std::shared_ptr resourcesManager_; @@ -976,27 +1036,29 @@ namespace amr // these refiners are used to initialize electromagnetic fields when creating // a new level (initLevel) or regridding (regrid) - using InitRefinerPool = RefinerPool; - using SharedNodeRefinerPool = RefinerPool; - using GhostRefinerPool = RefinerPool; - using PatchGhostRefinerPool = RefinerPool; - using InitDomPartRefinerPool = RefinerPool; - using PatchGhostPartRefinerPool = RefinerPool; + using InitRefinerPool = RefinerPool; + using GhostRefinerPool = RefinerPool; + using PatchGhostRefinerPool = RefinerPool; + using InitDomPartRefinerPool = RefinerPool; + using PatchGhostPartRefinerPool = RefinerPool; + using DomainGhostPartRefinerPool = RefinerPool; + using FieldGhostSumRefinerPool = RefinerPool; + using FieldFillPattern_t = FieldFillPattern; + + std::vector popFluxBorderSumRefiners_; + std::vector popDensityBorderSumRefiners_; InitRefinerPool magneticInitRefiners_{resourcesManager_}; InitRefinerPool electricInitRefiners_{resourcesManager_}; //! store communicators for magnetic fields that need ghosts to be filled - SharedNodeRefinerPool magSharedNodesRefiners_{resourcesManager_}; GhostRefinerPool magGhostsRefiners_{resourcesManager_}; PatchGhostRefinerPool magPatchGhostsRefiners_{resourcesManager_}; //! store refiners for electric fields that need ghosts to be filled - SharedNodeRefinerPool elecSharedNodesRefiners_{resourcesManager_}; GhostRefinerPool elecGhostsRefiners_{resourcesManager_}; - GhostRefinerPool currentSharedNodesRefiners_{resourcesManager_}; GhostRefinerPool currentGhostsRefiners_{resourcesManager_}; // moment ghosts @@ -1026,8 +1088,7 @@ namespace amr RefOp_ptr levelGhostParticlesNewOp_{std::make_shared()}; - // this contains refiners for each population to exchange patch ghost particles - PatchGhostPartRefinerPool patchGhostPartRefiners_{resourcesManager_}; + DomainGhostPartRefinerPool domainGhostPartRefiners_{resourcesManager_}; SynchronizerPool densitySynchronizers_{resourcesManager_}; SynchronizerPool ionBulkVelSynchronizers_{resourcesManager_}; @@ -1039,10 +1100,10 @@ namespace amr // see field_variable_fill_pattern.hpp for explanation about this "node_only" flag // note that refinement operator, via the boolean argument, serve as a relay for the // the refinealgorithm to get the correct variablefillpattern - RefOp_ptr BfieldNodeRefineOp_{std::make_shared(/*node_only=*/true)}; RefOp_ptr BfieldRefineOp_{std::make_shared()}; - RefOp_ptr EfieldNodeRefineOp_{std::make_shared(/*node_only=*/true)}; RefOp_ptr EfieldRefineOp_{std::make_shared()}; + std::shared_ptr defaultFieldFillPattern + = std::make_shared>(); // stateless (mostly) std::shared_ptr fieldTimeOp_{std::make_shared()}; diff --git a/src/amr/messengers/hybrid_messenger.hpp b/src/amr/messengers/hybrid_messenger.hpp index 14818ea85..7dc8aeabd 100644 --- a/src/amr/messengers/hybrid_messenger.hpp +++ b/src/amr/messengers/hybrid_messenger.hpp @@ -2,15 +2,12 @@ #define PHARE_HYBRID_MESSENGER_HPP +#include "core/def.hpp" +#include - -#include "core/hybrid/hybrid_quantities.hpp" -#include "amr/messengers/hybrid_messenger_strategy.hpp" #include "amr/messengers/messenger.hpp" #include "amr/messengers/messenger_info.hpp" -#include "amr/messengers/mhd_messenger.hpp" -#include "core/def.hpp" - +#include "amr/messengers/hybrid_messenger_strategy.hpp" @@ -334,6 +331,15 @@ namespace amr void syncIonMoments(IonsT& ions) { strat_->syncIonMoments(ions); } + void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const fillTime) + { + strat_->fillFluxBorders(ions, level, fillTime); + } + + void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, double const fillTime) + { + strat_->fillDensityBorders(ions, level, fillTime); + } /* ------------------------------------------------------------------------- End HybridMessenger Interface @@ -341,11 +347,11 @@ namespace amr - virtual ~HybridMessenger() = default; + private: - const std::unique_ptr strat_; + std::unique_ptr const strat_; }; diff --git a/src/amr/messengers/hybrid_messenger_info.hpp b/src/amr/messengers/hybrid_messenger_info.hpp index e1ae2c4f2..62593c598 100644 --- a/src/amr/messengers/hybrid_messenger_info.hpp +++ b/src/amr/messengers/hybrid_messenger_info.hpp @@ -66,6 +66,9 @@ namespace amr std::vector ghostElectric; std::vector ghostCurrent; std::vector ghostBulkVelocity; + std::vector ghostFlux; + + std::vector sumBorderFields; virtual ~HybridMessengerInfo() = default; }; diff --git a/src/amr/messengers/hybrid_messenger_strategy.hpp b/src/amr/messengers/hybrid_messenger_strategy.hpp index 3afdb5305..1acf06abe 100644 --- a/src/amr/messengers/hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/hybrid_messenger_strategy.hpp @@ -54,7 +54,7 @@ namespace amr = 0; virtual void regrid(std::shared_ptr const& hierarchy, - const int levelNumber, + int const levelNumber, std::shared_ptr const& oldLevel, IPhysicalModel& model, double const initDataTime) = 0; @@ -119,6 +119,13 @@ namespace amr double const time) = 0; + virtual void fillFluxBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) + = 0; + virtual void fillDensityBorders(IonsT& ions, SAMRAI::hier::PatchLevel& level, + double const fillTime) + = 0; + std::string name() const { return stratname_; } diff --git a/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp b/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp index 457a598c8..d4c6f4a61 100644 --- a/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp +++ b/src/amr/messengers/mhd_hybrid_messenger_strategy.hpp @@ -19,7 +19,7 @@ namespace amr using IPhysicalModel = typename HybridModel::Interface; public: - static const std::string stratName; + static std::string const stratName; MHDHybridMessengerStrategy( std::shared_ptr mhdResourcesManager, @@ -66,7 +66,7 @@ namespace amr } void regrid(std::shared_ptr const& /*hierarchy*/, - const int /*levelNumber*/, + int const /*levelNumber*/, std::shared_ptr const& /*oldLevel*/, IPhysicalModel& /*model*/, double const /*initDataTime*/) override { @@ -108,8 +108,20 @@ namespace amr { } + + void fillFluxBorders(IonsT& /*ions*/, SAMRAI::hier::PatchLevel& /*level*/, + double const /*fillTime*/) override + { + // strat_->fillFluxBorders(ions, level, fillTime); + } + void fillDensityBorders(IonsT& /*ions*/, SAMRAI::hier::PatchLevel& /*level*/, + double const /*fillTime*/) override + { + // strat_->fillDensityBorders(ions, level, fillTime); + } + void firstStep(IPhysicalModel& /*model*/, SAMRAI::hier::PatchLevel& /*level*/, - const std::shared_ptr& /*hierarchy*/, + std::shared_ptr const& /*hierarchy*/, double const /*currentTime*/, double const /*prevCoarserTime*/, double const /*newCoarserTime*/) override { @@ -147,7 +159,7 @@ namespace amr }; template - const std::string MHDHybridMessengerStrategy::stratName + std::string const MHDHybridMessengerStrategy::stratName = "MHDModel-HybridModel"; } // namespace amr diff --git a/src/amr/messengers/mhd_messenger.hpp b/src/amr/messengers/mhd_messenger.hpp index ae605de54..1501eec3d 100644 --- a/src/amr/messengers/mhd_messenger.hpp +++ b/src/amr/messengers/mhd_messenger.hpp @@ -48,7 +48,7 @@ namespace amr } - static const std::string stratName; + static std::string const stratName; std::string fineModelName() const override { return MHDModel::model_name; } @@ -77,7 +77,7 @@ namespace amr void regrid(std::shared_ptr const& /*hierarchy*/, - const int /*levelNumber*/, + int const /*levelNumber*/, std::shared_ptr const& /*oldLevel*/, IPhysicalModel& /*model*/, double const /*initDataTime*/) override { @@ -85,7 +85,7 @@ namespace amr void firstStep(IPhysicalModel& /*model*/, SAMRAI::hier::PatchLevel& /*level*/, - const std::shared_ptr& /*hierarchy*/, + std::shared_ptr const& /*hierarchy*/, double const /*currentTime*/, double const /*prevCoarserTIme*/, double const /*newCoarserTime*/) final { @@ -106,7 +106,6 @@ namespace amr } - void synchronize(SAMRAI::hier::PatchLevel& /*level*/) final { // call coarsning schedules... @@ -130,7 +129,7 @@ namespace amr template - const std::string MHDMessenger::stratName = "MHDModel-MHDModel"; + std::string const MHDMessenger::stratName = "MHDModel-MHDModel"; } // namespace amr } // namespace PHARE #endif diff --git a/src/amr/messengers/refiner.hpp b/src/amr/messengers/refiner.hpp index 1a6c2c12d..89a7a37ae 100644 --- a/src/amr/messengers/refiner.hpp +++ b/src/amr/messengers/refiner.hpp @@ -4,7 +4,11 @@ #include "communicator.hpp" #include "core/data/vecfield/vecfield.hpp" -#include "amr/data/field/field_variable_fill_pattern.hpp" +#include "amr/messengers/field_sum_transaction.hpp" + +#include +#include + namespace PHARE::amr { @@ -16,13 +20,21 @@ enum class RefinerType { InitInteriorPart, LevelBorderParticles, InteriorGhostParticles, - SharedBorder + SharedBorder, // deprecated? + PatchFieldBorderSum, + ExteriorGhostParticles }; + template class Refiner : private Communicator { + using FieldData_t = typename ResourcesManager::UserField_t::patch_data_type; + using TimeInterpOp_ptr = std ::shared_ptr; + using RefineOperator_ptr = std ::shared_ptr; + using VariableFillPattern_ptr = std ::shared_ptr; + public: void registerLevel(std::shared_ptr const& hierarchy, std::shared_ptr const& level) @@ -64,6 +76,15 @@ class Refiner : private Communicator this->add(algo, algo->createSchedule(level), levelNumber); } + else if constexpr (Type == RefinerType::PatchFieldBorderSum) + { + this->add(algo, + algo->createSchedule( + level, 0, + std::make_shared>()), + levelNumber); + } + // this createSchedule overload is used to initialize fields. // note that here we must take that createsSchedule() overload and put nullptr // as src since we want to take from coarser level everywhere. using the @@ -114,6 +135,11 @@ class Refiner : private Communicator this->add(algo, algo->createSchedule(level), levelNumber); } + else if constexpr (Type == RefinerType::ExteriorGhostParticles) + { + this->add(algo, algo->createSchedule(level), levelNumber); + } + // schedule to synchronize shared border values, and not include refinement else if constexpr (Type == RefinerType::SharedBorder) { @@ -125,7 +151,7 @@ class Refiner : private Communicator void regrid(std::shared_ptr const& hierarchy, - const int levelNumber, std::shared_ptr const& oldLevel, + int const levelNumber, std::shared_ptr const& oldLevel, double const initDataTime) { for (auto& algo : this->algos) @@ -158,12 +184,7 @@ class Refiner : private Communicator this->findSchedule(algo, levelNumber)->fillData(initDataTime); } - template - void fill(VecFieldT& vec, int const levelNumber, double const fillTime) - { - for (auto const& algo : this->algos) - this->findSchedule(algo, levelNumber)->fillData(fillTime); - } + /** @@ -185,70 +206,28 @@ class Refiner : private Communicator */ Refiner(core::VecFieldNames const& ghost, core::VecFieldNames const& model, core::VecFieldNames const& oldModel, std::shared_ptr const& rm, - std::shared_ptr refineOp, - std::shared_ptr timeOp) + RefineOperator_ptr refineOp, TimeInterpOp_ptr timeOp, + VariableFillPattern_ptr variableFillPattern = nullptr) { constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, this, &refineOp, &timeOp](std::string const& ghost_, std::string const& model_, - std::string const& oldModel_, auto& fillPattern) { - auto src_id = rm->getID(ghost_); - auto dest_id = rm->getID(ghost_); - auto new_id = rm->getID(model_); - auto old_id = rm->getID(oldModel_); - - if (src_id && dest_id && old_id) - { - this->add_algorithm()->registerRefine( - *dest_id, // dest - *src_id, // source at same time - *old_id, // source at past time (for time interp) - *new_id, // source at future time (for time interp) - *dest_id, // scratch - refineOp, timeOp, fillPattern); - } - }; - - registerRefine(ghost.xName, model.xName, oldModel.xName, variableFillPattern); - registerRefine(ghost.yName, model.yName, oldModel.yName, variableFillPattern); - registerRefine(ghost.zName, model.zName, oldModel.zName, variableFillPattern); + + register_time_interpolated_vector_field_refiner(rm, ghost, ghost, oldModel, model, refineOp, + timeOp, variableFillPattern); } + /** * @brief creates a Refiner for a scalar quantity with time refinement */ Refiner(std::string const& ghost, std::string const& model, std::string const& oldModel, - std::shared_ptr const& rm, - std::shared_ptr refineOp, - std::shared_ptr timeOp) + std::shared_ptr const& rm, RefineOperator_ptr refineOp, + TimeInterpOp_ptr timeOp, VariableFillPattern_ptr variableFillPattern = nullptr) { constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, this, &refineOp, &timeOp](std::string const& ghost_, std::string const& model_, - std::string const& oldModel_, auto& fillPattern) { - auto src_id = rm->getID(ghost_); - auto dest_id = rm->getID(ghost_); - auto new_id = rm->getID(model_); - auto old_id = rm->getID(oldModel_); - - if (src_id && dest_id && old_id) - { - this->add_algorithm()->registerRefine( - *dest_id, // dest - *src_id, // source at same time - *old_id, // source at past time (for time interp) - *new_id, // source at future time (for time interp) - *dest_id, // scratch - refineOp, timeOp, fillPattern); - } - }; - - registerRefine(ghost, model, oldModel, variableFillPattern); + + register_interpolated_resource(rm, ghost, ghost, oldModel, model, refineOp, timeOp, + variableFillPattern); } @@ -257,66 +236,110 @@ class Refiner : private Communicator * and from one quantity to the same quantity. It is typically used for initialization. */ Refiner(core::VecFieldNames const& src_dest, std::shared_ptr const& rm, - std::shared_ptr refineOp) + RefineOperator_ptr refineOp) : Refiner(src_dest, src_dest, rm, refineOp) { } + /** * @brief this overload creates a Refiner for communication without time interpolation * and from one quantity to another quantity. */ - Refiner(core::VecFieldNames const& source, core::VecFieldNames const& destination, - std::shared_ptr const& rm, - std::shared_ptr refineOp) + Refiner(core::VecFieldNames const& dst, core::VecFieldNames const& src, + std::shared_ptr const& rm, RefineOperator_ptr refineOp, + VariableFillPattern_ptr variableFillPattern = nullptr) { - constexpr auto dimension = ResourcesManager::dimension; - auto variableFillPattern = FieldFillPattern::make_shared(refineOp); - - auto registerRefine - = [&rm, &refineOp, this](std::string src, std::string dst, auto& fillPattern) { - auto idSrc = rm->getID(src); - auto idDest = rm->getID(dst); - if (idSrc and idDest) - { - /*if is a ghost field type Refiner, we need to add a fillPattern - * that will be used to overwrite or not the shared border node*/ - if constexpr (Type == RefinerType::GhostField - or Type == RefinerType::PatchGhostField - or Type == RefinerType::SharedBorder) - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp, - fillPattern); - else - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp); - } - }; - registerRefine(source.xName, destination.xName, variableFillPattern); - registerRefine(source.yName, destination.yName, variableFillPattern); - registerRefine(source.zName, destination.zName, variableFillPattern); + register_vector_field(rm, dst, src, refineOp, variableFillPattern); } - Refiner(std::string const& dest, std::string const& src, - std::shared_ptr const& rm, - std::shared_ptr refineOp) + + + Refiner(std::string const& dst, std::string const& src, + std::shared_ptr const& rm, RefineOperator_ptr refineOp, + VariableFillPattern_ptr fillPattern = nullptr) { - auto idSrc = rm->getID(src); - auto idDest = rm->getID(dest); - if (idSrc and idDest) - { - this->add_algorithm()->registerRefine(*idDest, *idSrc, *idDest, refineOp); - } + auto&& [idDst, idSrc] = rm->getIDsList(dst, src); + this->add_algorithm()->registerRefine(idDst, idSrc, idDst, refineOp, fillPattern); } + + /** * @brief This overload of makeRefiner creates a Refiner for communication from one * scalar quantity to itself without time interpolation. */ Refiner(std::string const& name, std::shared_ptr const& rm, - std::shared_ptr refineOp) + RefineOperator_ptr refineOp) : Refiner{name, name, rm, refineOp} { } + + + + + auto& register_resource(auto& rm, auto& dst, auto& src, auto& scratch, auto&&... args) + { + auto&& [idDst, idSrc, idScrtch] = rm->getIDsList(dst, src, scratch); + this->add_algorithm()->registerRefine(idDst, idSrc, idScrtch, args...); + return *this; + } + + + auto& register_interpolated_resource(auto& rm, auto& dst, auto& src, auto& told, auto& tnew, + auto&&... args) + { + auto&& [idDst, idSrc, idTold, idTnew] = rm->getIDsList(dst, src, told, tnew); + this->add_algorithm()->registerRefine(idDst, idSrc, idTold, idTnew, idDst, args...); + return *this; + } + + + auto& register_vector_field(auto&&... args) + { + auto const tuple = std::forward_as_tuple(args...); + using Tuple = decltype(tuple); + + static_assert(std::tuple_size_v == 4 or std::tuple_size_v == 5); + if constexpr (std::tuple_size_v == 4) + { + return register_vector_field(args..., nullptr); + } + else + { //// 0 1 2 3 4 + auto&& [rm, dst, src, refOp, fillPat] = tuple; + return (*this) + .register_resource(rm, dst.xName, src.xName, dst.xName, refOp, fillPat) + .register_resource(rm, dst.yName, src.yName, dst.yName, refOp, fillPat) + .register_resource(rm, dst.zName, src.zName, dst.zName, refOp, fillPat); + } + } + + auto& register_time_interpolated_vector_field_refiner(auto&&... args) + { + auto const tuple = std::forward_as_tuple(args...); + using Tuple = decltype(tuple); + + static_assert(std::tuple_size_v == 7 or std::tuple_size_v == 8); + + if constexpr (std::tuple_size_v == 7) + { + return register_time_interpolated_vector_field(args..., nullptr); + } + else + { //// 0 1 2 3 4 5 6 7 + auto&& [rm, dst, src, told, tnew, refOp, timeOp, fillPat] + = std::forward_as_tuple(args...); + register_interpolated_resource(rm, dst.xName, src.xName, told.xName, tnew.xName, refOp, + timeOp, fillPat); + register_interpolated_resource(rm, dst.yName, src.yName, told.yName, tnew.yName, refOp, + timeOp, fillPat); + register_interpolated_resource(rm, dst.zName, src.zName, told.zName, tnew.zName, refOp, + timeOp, fillPat); + return (*this); + } + } }; } // namespace PHARE::amr diff --git a/src/amr/messengers/refiner_pool.hpp b/src/amr/messengers/refiner_pool.hpp index f1433d006..0a562b613 100644 --- a/src/amr/messengers/refiner_pool.hpp +++ b/src/amr/messengers/refiner_pool.hpp @@ -4,6 +4,7 @@ #include "refiner.hpp" +#include #include #include #include @@ -23,37 +24,57 @@ namespace amr template class RefinerPool { - using Refiner_t = Refiner; - using RefineOperator = SAMRAI::hier::RefineOperator; + using Refiner_t = Refiner; + using TimeInterpOp_ptr = std::shared_ptr; + using RefineOperator_ptr = std::shared_ptr; + using VariableFillPattern_ptr = std::shared_ptr; public: - /*@brief add a static communication between sources and destinations. - * This overload takes several sources/destinations/keys and add one refiner for each*/ - template - void addStaticRefiners(Names const& destinations, Names const& sources, - std::shared_ptr refineOp, - std::vector keys); - + RefinerPool(std::shared_ptr const& rm) + : rm_{rm} + { + } - /*@brief convenience overload of the above when source = destination, for VecField*/ - template - void addStaticRefiners(Names const& src_dest, std::shared_ptr refineOp, - std::vector key); + virtual ~RefinerPool() {} + RefinerPool(RefinerPool const&) = delete; + RefinerPool(RefinerPool&&) = default; /* @brief add a static communication between a single source and destination.*/ - template - void addStaticRefiner(Name const& ghostName, Name const& src, - std::shared_ptr const& refineOp, - std::string const key); + template + void addStaticRefiner(Resource const& ghostName, Resource const& src, + RefineOperator_ptr const& refineOp, Key const& key, + VariableFillPattern_ptr fillPattern = nullptr); /** * @brief convenience overload of above addStaticRefiner taking only one name * used for communications from a quantity to the same quantity.*/ - template - void addStaticRefiner(Name const& src_dest, std::shared_ptr const& refineOp, - std::string const key); + template + void addStaticRefiner(Resource const& src_dest, RefineOperator_ptr const& refineOp, + Key const& key, VariableFillPattern_ptr fillPattern = nullptr); + + + /*@brief add a static communication between sources and destinations. + * This overload takes several sources/destinations/keys and add one refiner for each*/ + template + void addStaticRefiners(Resources const& destinations, Resources const& sources, + RefineOperator_ptr refineOp, Keys const& keys, + VariableFillPattern_ptr fillPattern = nullptr); + + + /*@brief convenience overload of the above when source = destination, for VecField*/ + template + void addStaticRefiners(Srcs const& src_dest, RefineOperator_ptr refineOp, Keys const& key, + VariableFillPattern_ptr fillPattern = nullptr); + + + // this overload takes simple strings. + void addTimeRefiner(std::string const& ghost, std::string const& model, + std::string const& oldModel, RefineOperator_ptr const& refineOp, + TimeInterpOp_ptr const& timeOp, std::string const& key, + VariableFillPattern_ptr fillPattern = nullptr); + /** * @brief fill the given pool of refiners with a new refiner per VecField * in ghostVecs. Data will be spatially refined using the specified refinement @@ -61,9 +82,9 @@ namespace amr * represented by modelVec and oldModelVec.*/ void addTimeRefiners(std::vector const& ghostVecs, core::VecFieldNames const& modelVec, - core::VecFieldNames const& oldModelVec, - std::shared_ptr& refineOp, - std::shared_ptr& timeOp); + core::VecFieldNames const& oldModelVec, RefineOperator_ptr& refineOp, + TimeInterpOp_ptr& timeOp, + VariableFillPattern_ptr fillPattern = nullptr); @@ -77,18 +98,9 @@ namespace amr * * This overload is for vector fields*/ void addTimeRefiner(core::VecFieldNames const& ghost, core::VecFieldNames const& model, - core::VecFieldNames const& oldModel, - std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, - std::string key); - - - // this overload takes simple strings. - void addTimeRefiner(std::string const& ghost, std::string const& model, - std::string const& oldModel, - std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, - std::string key); + core::VecFieldNames const& oldModel, RefineOperator_ptr const& refineOp, + TimeInterpOp_ptr const& timeOp, std::string const& key, + VariableFillPattern_ptr fillPattern = nullptr); @@ -102,7 +114,6 @@ namespace amr } - /** @brief this overload will execute communications for all quantities in the pool. */ void fill(int const levelNumber, double const initDataTime) const { @@ -119,14 +130,13 @@ namespace amr if (refiners_.count(vec.name()) == 0) throw std::runtime_error("no refiner for " + vec.name()); - refiners_.at(vec.name()).fill(vec, levelNumber, fillTime); + refiners_.at(vec.name()).fill(levelNumber, fillTime); } - /** @brief executes a regridding for all quantities in the pool.*/ virtual void regrid(std::shared_ptr const& hierarchy, - const int levelNumber, + int const levelNumber, std::shared_ptr const& oldLevel, double const initDataTime) { @@ -137,109 +147,113 @@ namespace amr } - RefinerPool(std::shared_ptr const& rm) - : rm_{rm} - { - } - private: using Qty = std::string; - std::map> refiners_; + std::map refiners_; std::shared_ptr rm_; }; - template - template - void RefinerPool::addStaticRefiners( - Names const& destinations, Names const& sources, std::shared_ptr refineOp, - std::vector keys) - { - assert(destinations.size() == sources.size()); - auto key = std::begin(keys); - for (std::size_t i = 0; i < destinations.size(); ++i) - { - addStaticRefiner(destinations[i], sources[i], refineOp, *key++); - } - } +} // namespace amr +} // namespace PHARE - template - template - void - RefinerPool::addStaticRefiners(Names const& src_dest, - std::shared_ptr refineOp, - std::vector key) - { - addStaticRefiners(src_dest, src_dest, refineOp, key); - } +namespace PHARE::amr +{ +template +template +void RefinerPool::addStaticRefiner(Resource const& dst, Resource const& src, + RefineOperator_ptr const& refineOp, + Key const& key, + VariableFillPattern_ptr fillPattern) +{ + auto const [it, success] + = refiners_.insert({key, Refiner_t(dst, src, rm_, refineOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} - template - template - void RefinerPool::addStaticRefiner( - Name const& ghostName, Name const& src, std::shared_ptr const& refineOp, - std::string const key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghostName, src, rm_, refineOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } +template +template +void RefinerPool::addStaticRefiner(Resource const& src_dst, + RefineOperator_ptr const& refineOp, + Key const& key, + VariableFillPattern_ptr fillPattern) +{ + addStaticRefiner(src_dst, src_dst, refineOp, key, fillPattern); +} +template +template +void RefinerPool::addStaticRefiners(Resources const& destinations, + Resources const& sources, + RefineOperator_ptr refineOp, + Keys const& keys, + VariableFillPattern_ptr fillPattern) +{ + assert(destinations.size() == sources.size()); + assert(destinations.size() == keys.size()); - template - template - void RefinerPool::addStaticRefiner( - Name const& descriptor, std::shared_ptr const& refineOp, - std::string const key) - { - addStaticRefiner(descriptor, descriptor, refineOp, key); - } + for (std::size_t i = 0; i < destinations.size(); ++i) + addStaticRefiner(destinations[i], sources[i], refineOp, keys[i], fillPattern); +} - template - void RefinerPool::addTimeRefiners( - std::vector const& ghostVecs, core::VecFieldNames const& modelVec, - core::VecFieldNames const& oldModelVec, std::shared_ptr& refineOp, - std::shared_ptr& timeOp) - { - for (auto const& ghostVec : ghostVecs) - { - addTimeRefiner(ghostVec, modelVec, oldModelVec, refineOp, timeOp, ghostVec.vecName); - } - } +template +template +void RefinerPool::addStaticRefiners(Srcs const& src_dest, + RefineOperator_ptr refineOp, + Keys const& keys, + VariableFillPattern_ptr fillPattern) +{ + addStaticRefiners(src_dest, src_dest, refineOp, keys, fillPattern); +} - template - void RefinerPool::addTimeRefiner( - core::VecFieldNames const& ghost, core::VecFieldNames const& model, - core::VecFieldNames const& oldModel, std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, std::string key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } - template - void RefinerPool::addTimeRefiner( - std::string const& ghost, std::string const& model, std::string const& oldModel, - std::shared_ptr const& refineOp, - std::shared_ptr const& timeOp, std::string key) - { - auto const [it, success] - = refiners_.insert({key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp)}); - if (!success) - throw std::runtime_error(key + " is already registered"); - } -} // namespace amr -} // namespace PHARE +template +void RefinerPool::addTimeRefiner( + std::string const& ghost, std::string const& model, std::string const& oldModel, + RefineOperator_ptr const& refineOp, TimeInterpOp_ptr const& timeOp, std::string const& key, + VariableFillPattern_ptr fillPattern) +{ + auto const [it, success] = refiners_.insert( + {key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} + + +template +void RefinerPool::addTimeRefiners( + std::vector const& ghostVecs, core::VecFieldNames const& modelVec, + core::VecFieldNames const& oldModelVec, RefineOperator_ptr& refineOp, TimeInterpOp_ptr& timeOp, + VariableFillPattern_ptr fillPattern) +{ + for (auto const& ghostVec : ghostVecs) + addTimeRefiner(ghostVec, modelVec, oldModelVec, refineOp, timeOp, ghostVec.vecName, + fillPattern); +} + +template +void RefinerPool::addTimeRefiner( + core::VecFieldNames const& ghost, core::VecFieldNames const& model, + core::VecFieldNames const& oldModel, RefineOperator_ptr const& refineOp, + TimeInterpOp_ptr const& timeOp, std::string const& key, VariableFillPattern_ptr fillPattern) +{ + auto const [it, success] = refiners_.insert( + {key, Refiner_t(ghost, model, oldModel, rm_, refineOp, timeOp, fillPattern)}); + if (!success) + throw std::runtime_error(key + " is already registered"); +} + + +} // namespace PHARE::amr #endif diff --git a/src/amr/multiphysics_integrator.hpp b/src/amr/multiphysics_integrator.hpp index e6f71d85e..f963a461f 100644 --- a/src/amr/multiphysics_integrator.hpp +++ b/src/amr/multiphysics_integrator.hpp @@ -370,9 +370,9 @@ namespace solver load_balancer_manager_->estimate(*level, model); if (static_cast(levelNumber) == model_views_.size()) - model_views_.push_back(solver.make_view(*level, model)); + model_views_.push_back(solver.make_view(*hierarchy, *level, model)); else - model_views_[levelNumber] = solver.make_view(*level, model); + model_views_[levelNumber] = solver.make_view(*hierarchy, *level, model); } @@ -393,7 +393,7 @@ namespace solver messenger.registerLevel(hierarchy, ilvl); model_views_.push_back(getSolver_(ilvl).make_view( - AMR_Types::getLevel(*hierarchy, ilvl), getModel_(ilvl))); + *hierarchy, AMR_Types::getLevel(*hierarchy, ilvl), getModel_(ilvl))); auto level = hierarchy->getPatchLevel(ilvl); for (auto& patch : *level) @@ -436,7 +436,7 @@ namespace solver void initializeLevelIntegrator( - const std::shared_ptr& /*griddingAlg*/) + std::shared_ptr const& /*griddingAlg*/) override { } @@ -557,7 +557,7 @@ namespace solver standardLevelSynchronization(std::shared_ptr const& hierarchy, int const coarsestLevel, int const finestLevel, double const syncTime, - const std::vector& /*oldTimes*/) override + std::vector const& /*oldTimes*/) override { // TODO use messengers to sync with coarser for (auto ilvl = finestLevel; ilvl > coarsestLevel; --ilvl) diff --git a/src/amr/physical_models/hybrid_model.hpp b/src/amr/physical_models/hybrid_model.hpp index 449bafc22..fabc14c45 100644 --- a/src/amr/physical_models/hybrid_model.hpp +++ b/src/amr/physical_models/hybrid_model.hpp @@ -43,14 +43,14 @@ class HybridModel : public IPhysicalModel using ParticleInitializerFactory = core::ParticleInitializerFactory; - static const inline std::string model_name = "HybridModel"; + static inline std::string const model_name = "HybridModel"; core::HybridState state; std::shared_ptr resourcesManager; - virtual void initialize(level_t& level) override; + void initialize(level_t& level) override; /** @@ -70,7 +70,7 @@ class HybridModel : public IPhysicalModel * @brief fillMessengerInfo describes which variables of the model are to be initialized or * filled at ghost nodes. */ - virtual void fillMessengerInfo(std::unique_ptr const& info) const override; + void fillMessengerInfo(std::unique_ptr const& info) const override; NO_DISCARD auto setOnPatch(patch_t& patch) @@ -166,7 +166,6 @@ void HybridModel::f hybridInfo.ghostCurrent.push_back(core::VecFieldNames{state.J}); hybridInfo.ghostBulkVelocity.push_back(hybridInfo.modelIonBulkVelocity); - auto transform_ = [](auto& ions, auto& inserter) { std::transform(std::begin(ions), std::end(ions), std::back_inserter(inserter), [](auto const& pop) { return pop.name(); }); @@ -175,6 +174,12 @@ void HybridModel::f transform_(state.ions, hybridInfo.levelGhostParticlesOld); transform_(state.ions, hybridInfo.levelGhostParticlesNew); transform_(state.ions, hybridInfo.patchGhostParticles); + + for (auto const& pop : state.ions) + { + hybridInfo.ghostFlux.emplace_back(pop.flux()); + hybridInfo.sumBorderFields.emplace_back(pop.density().name()); + } } diff --git a/src/amr/resources_manager/amr_utils.cpp b/src/amr/resources_manager/amr_utils.cpp index 48218a28c..1ceeffa17 100644 --- a/src/amr/resources_manager/amr_utils.cpp +++ b/src/amr/resources_manager/amr_utils.cpp @@ -31,10 +31,12 @@ namespace amr /** * @brief AMRToLocal sets the AMRBox to local indexing relative to the referenceAMRBox */ - void AMRToLocal(SAMRAI::hier::Box& AMRBox, SAMRAI::hier::Box const& referenceAMRBox) + SAMRAI::hier::Box& AMRToLocal(SAMRAI::hier::Box& AMRBox, + SAMRAI::hier::Box const& referenceAMRBox) { AMRBox.setLower(AMRBox.lower() - referenceAMRBox.lower()); AMRBox.setUpper(AMRBox.upper() - referenceAMRBox.lower()); + return AMRBox; } diff --git a/src/amr/resources_manager/amr_utils.hpp b/src/amr/resources_manager/amr_utils.hpp index 0efd3f795..e85f1617b 100644 --- a/src/amr/resources_manager/amr_utils.hpp +++ b/src/amr/resources_manager/amr_utils.hpp @@ -3,21 +3,22 @@ #include "core/def/phare_mpi.hpp" -#include -#include -#include -#include -#include -#include - -#include "amr/types/amr_types.hpp" +#include "core/def.hpp" #include "core/utilities/constants.hpp" #include "core/utilities/point/point.hpp" -#include "core/def.hpp" +#include "amr/types/amr_types.hpp" #include "amr/utilities/box/amr_box.hpp" +#include +#include +#include +#include +#include +#include +#include + namespace PHARE { namespace amr @@ -43,7 +44,8 @@ namespace amr /** * @brief AMRToLocal sets the AMRBox to local indexing relative to the referenceAMRBox */ - void AMRToLocal(SAMRAI::hier::Box& AMRBox, SAMRAI::hier::Box const& referenceAMRBox); + SAMRAI::hier::Box& AMRToLocal(SAMRAI::hier::Box& AMRBox, + SAMRAI::hier::Box const& referenceAMRBox); @@ -134,7 +136,7 @@ namespace amr template NO_DISCARD GridLayoutT layoutFromPatch(SAMRAI::hier::Patch const& patch) { - int constexpr dimension = GridLayoutT::dimension; + auto constexpr dimension = GridLayoutT::dimension; SAMRAI::tbox::Dimension const dim{dimension}; @@ -173,19 +175,37 @@ namespace amr } } - SAMRAI::hier::Box domain = patch.getBox(); + SAMRAI::hier::Box const domain = patch.getBox(); - std::array nbrCell; + auto const nbrCell = core::for_N( + [&](auto iDim) { return static_cast(domain.numberCells(iDim)); }); - for (std::size_t iDim = 0; iDim < dimension; ++iDim) - { - nbrCell[iDim] = static_cast(domain.numberCells(iDim)); - } + auto const lvlNbr = patch.getPatchLevelNumber(); - auto lvlNbr = patch.getPatchLevelNumber(); return GridLayoutT{dl, nbrCell, origin, amr::Box{domain}, lvlNbr}; } + template + NO_DISCARD auto makeNonLevelGhostBoxFor(SAMRAI::hier::Patch const& patch, + SAMRAI::hier::PatchHierarchy const& hierarchy) + { + auto constexpr dimension = GridLayoutT::dimension; + auto const lvlNbr = patch.getPatchLevelNumber(); + SAMRAI::hier::HierarchyNeighbors const hier_nbrs{hierarchy, lvlNbr, lvlNbr}; + + + SAMRAI::hier::Box const domain = patch.getBox(); + auto const domBox = phare_box_from(domain); + auto const particleGhostBox = grow(domBox, GridLayoutT::nbrParticleGhosts()); + auto particleGhostBoxMinusLevelGhostsCells{domBox}; + + for (auto const& neighbox : hier_nbrs.getSameLevelNeighbors(domain, lvlNbr)) + particleGhostBoxMinusLevelGhostsCells = particleGhostBoxMinusLevelGhostsCells.merge( + *(particleGhostBox * phare_box_from(neighbox))); + + return particleGhostBoxMinusLevelGhostsCells; + } + inline auto to_string(SAMRAI::hier::GlobalId const& id) { std::stringstream patchID; @@ -205,6 +225,16 @@ namespace amr } } + template + void visitLevel(SAMRAI_Types::level_t& level, ResMan& resman, Action&& action, Args&&... args) + { + for (auto& patch : level) + { + auto guard = resman.setOnPatch(*patch, args...); + action(); + } + } + template void visitHierarchy(SAMRAI::hier::PatchHierarchy& hierarchy, ResMan& resman, Action&& action, diff --git a/src/amr/resources_manager/resources_manager.hpp b/src/amr/resources_manager/resources_manager.hpp index d3e5c26c9..2615c605a 100644 --- a/src/amr/resources_manager/resources_manager.hpp +++ b/src/amr/resources_manager/resources_manager.hpp @@ -280,6 +280,25 @@ namespace amr } + auto getIDsList(auto&&... keys) + { + auto const Fn = [&](auto& key) { + if (auto const id = getID(key)) + return *id; + throw std::runtime_error("bad key"); + }; + return std::array{Fn(keys)...}; + } + + void print_resources() const + { + for (auto& [key, _] : nameToResourceInfo_) + { + PHARE_LOG_LINE_SS(key); + } + } + + ~ResourcesManager() { @@ -438,7 +457,7 @@ namespace amr ResourcesInfo info; info.variable = ResourcesResolver_t::make_shared_variable(view); info.id = variableDatabase_->registerVariableAndContext( - info.variable, context_, SAMRAI::hier::IntVector::getZero(dimension_)); + info.variable, context_, SAMRAI::hier::IntVector::getZero(dimension_)); nameToResourceInfo_.emplace(view.name(), info); } diff --git a/src/amr/solvers/solver.hpp b/src/amr/solvers/solver.hpp index ec3cfca1c..6f1e26cdc 100644 --- a/src/amr/solvers/solver.hpp +++ b/src/amr/solvers/solver.hpp @@ -89,7 +89,7 @@ namespace solver virtual void advanceLevel(hierarchy_t const& hierarchy, int const levelNumber, ISolverModelView& view, amr::IMessenger>& fromCoarser, - const double currentTime, const double newTime) + double const currentTime, double const newTime) = 0; @@ -100,7 +100,8 @@ namespace solver * ResourcesManager of the given model, onto the given Patch, at the given time. */ virtual void allocate(IPhysicalModel& model, patch_t& patch, - double const allocateTime) const = 0; + double const allocateTime) const + = 0; @@ -110,7 +111,8 @@ namespace solver virtual ~ISolver() = default; - virtual std::shared_ptr make_view(level_t&, IPhysicalModel&) + virtual std::shared_ptr make_view(hierarchy_t const&, level_t&, + IPhysicalModel&) = 0; protected: diff --git a/src/amr/solvers/solver_mhd.hpp b/src/amr/solvers/solver_mhd.hpp index 8ef17c543..b2184337f 100644 --- a/src/amr/solvers/solver_mhd.hpp +++ b/src/amr/solvers/solver_mhd.hpp @@ -45,11 +45,12 @@ namespace solver void advanceLevel(hierarchy_t const& /*hierarchy*/, int const /*levelNumber*/, ISolverModelView& /*view*/, amr::IMessenger>& /*fromCoarser*/, - const double /*currentTime*/, const double /*newTime*/) override + double const /*currentTime*/, double const /*newTime*/) override { } - std::shared_ptr make_view(level_t&, IPhysicalModel&) override + std::shared_ptr make_view(hierarchy_t const& hierarchy, level_t&, + IPhysicalModel&) override { throw std::runtime_error("Not implemented in mhd solver"); return nullptr; diff --git a/src/amr/solvers/solver_ppc.hpp b/src/amr/solvers/solver_ppc.hpp index 7357f72a3..236363fd6 100644 --- a/src/amr/solvers/solver_ppc.hpp +++ b/src/amr/solvers/solver_ppc.hpp @@ -3,8 +3,13 @@ #include "core/def/phare_mpi.hpp" -#include +#include "core/numerics/ion_updater/ion_updater.hpp" +#include "core/numerics/ampere/ampere.hpp" +#include "core/numerics/faraday/faraday.hpp" +#include "core/numerics/ohm/ohm.hpp" +#include "core/data/vecfield/vecfield.hpp" +#include "core/data/grid/gridlayout_utils.hpp" #include "amr/messengers/hybrid_messenger.hpp" #include "amr/messengers/hybrid_messenger_info.hpp" @@ -13,17 +18,10 @@ #include "amr/solvers/solver.hpp" #include "amr/solvers/solver_ppc_model_view.hpp" -#include "core/numerics/ion_updater/ion_updater.hpp" -#include "core/numerics/ampere/ampere.hpp" -#include "core/numerics/faraday/faraday.hpp" -#include "core/numerics/ohm/ohm.hpp" - -#include "core/data/vecfield/vecfield.hpp" -#include "core/data/grid/gridlayout_utils.hpp" +#include +#include -#include -#include namespace PHARE::solver { @@ -36,20 +34,21 @@ class SolverPPC : public ISolver static constexpr auto dimension = HybridModel::dimension; static constexpr auto interp_order = HybridModel::gridlayout_type::interp_order; - using Electromag = typename HybridModel::electromag_type; - using Ions = typename HybridModel::ions_type; - using ParticleArray = typename Ions::particle_array_type; - using VecFieldT = typename HybridModel::vecfield_type; - using GridLayout = typename HybridModel::gridlayout_type; - using ResourcesManager = typename HybridModel::resources_manager_type; + using Electromag = HybridModel::electromag_type; + using Ions = HybridModel::ions_type; + using ParticleArray = Ions::particle_array_type; + using VecFieldT = HybridModel::vecfield_type; + using GridLayout = HybridModel::gridlayout_type; + using ResourcesManager = HybridModel::resources_manager_type; using IPhysicalModel_t = IPhysicalModel; using IMessenger = amr::IMessenger; using HybridMessenger = amr::HybridMessenger; using ModelViews_t = HybridPPCModelView; - using Faraday_t = typename ModelViews_t::Faraday_t; - using Ampere_t = typename ModelViews_t::Ampere_t; - using Ohm_t = typename ModelViews_t::Ohm_t; + using Faraday_t = ModelViews_t::Faraday_t; + using Ampere_t = ModelViews_t::Ampere_t; + using Ohm_t = ModelViews_t::Ohm_t; + using IonUpdater_t = PHARE::core::IonUpdater; Electromag electromagPred_{"EMPred"}; Electromag electromagAvg_{"EMAvg"}; @@ -58,13 +57,13 @@ class SolverPPC : public ISolver Ampere_t ampere_; Ohm_t ohm_; - PHARE::core::IonUpdater ionUpdater_; + IonUpdater_t ionUpdater_; public: - using patch_t = typename AMR_Types::patch_t; - using level_t = typename AMR_Types::level_t; - using hierarchy_t = typename AMR_Types::hierarchy_t; + using patch_t = AMR_Types::patch_t; + using level_t = AMR_Types::level_t; + using hierarchy_t = AMR_Types::hierarchy_t; @@ -97,12 +96,17 @@ class SolverPPC : public ISolver double const newTime) override; - void onRegrid() override { ionUpdater_.reset(); } + void onRegrid() override + { + boxing.clear(); + ionUpdater_.reset(); + } - std::shared_ptr make_view(level_t& level, IPhysicalModel_t& model) override + std::shared_ptr make_view(hierarchy_t const& hierarchy, level_t& level, + IPhysicalModel_t& model) override { - return std::make_shared(level, dynamic_cast(model)); + return std::make_shared(hierarchy, level, dynamic_cast(model)); } @@ -147,28 +151,38 @@ class SolverPPC : public ISolver double newTime; }; + void make_boxes(hierarchy_t const& hierarchy, level_t& level) + { + int const lvlNbr = level.getLevelNumber(); + if (boxing.count(lvlNbr)) + return; + + auto& levelBoxing = boxing[lvlNbr]; // creates if missing + + for (auto const& patch : level) + if (auto [it, suc] = levelBoxing.try_emplace( + amr::to_string(patch->getGlobalId()), + Boxing_t{amr::layoutFromPatch(*patch), + amr::makeNonLevelGhostBoxFor(*patch, hierarchy)}); + !suc) + throw std::runtime_error("boxing map insertion failure"); + } - // extend lifespan - std::unordered_map tmpDomain; - std::unordered_map patchGhost; - - template - static void add_to(Map& map, std::string const& key, ParticleArray const& ps) + auto& setup_level(hierarchy_t const& hierarchy, int const levelNumber) { - // vector copy drops the capacity (over allocation of the source) - // we want to keep the overallocation somewhat - how much to be assessed - ParticleArray empty{ps.box()}; - - if (!map.count(key)) - map.emplace(key, empty); - else - map.at(key) = empty; - - auto& v = map.at(key); - v.reserve(ps.capacity()); - v.replace_from(ps); + auto level = hierarchy.getPatchLevel(levelNumber); + if (boxing.count(levelNumber) == 0) + make_boxes(hierarchy, *level); + return *level; } + + + using Boxing_t = core::UpdaterSelectionBoxing; + std::unordered_map> boxing; + + + }; // end solverPPC @@ -213,41 +227,6 @@ void SolverPPC::fillMessengerInfo( } -template -void SolverPPC::saveState_(level_t& level, ModelViews_t& views) -{ - PHARE_LOG_SCOPE(1, "SolverPPC::saveState_"); - - for (auto& state : views) - { - std::stringstream ss; - ss << state.patch->getGlobalId(); - for (auto& pop : state.ions) - { - std::string const key = ss.str() + "_" + pop.name(); - add_to(tmpDomain, key, pop.domainParticles()); - add_to(patchGhost, key, pop.patchGhostParticles()); - } - } -} - -template -void SolverPPC::restoreState_(level_t& level, ModelViews_t& views) -{ - PHARE_LOG_SCOPE(1, "SolverPPC::restoreState_"); - - for (auto& state : views) - { - std::stringstream ss; - ss << state.patch->getGlobalId(); - - for (auto& pop : state.ions) - { - pop.domainParticles() = std::move(tmpDomain.at(ss.str() + "_" + pop.name())); - pop.patchGhostParticles() = std::move(patchGhost.at(ss.str() + "_" + pop.name())); - } - } -} template @@ -260,26 +239,22 @@ void SolverPPC::advanceLevel(hierarchy_t const& hierarch auto& modelView = dynamic_cast(views); auto& fromCoarser = dynamic_cast(fromCoarserMessenger); - auto level = hierarchy.getPatchLevel(levelNumber); + auto& level = setup_level(hierarchy, levelNumber); - predictor1_(*level, modelView, fromCoarser, currentTime, newTime); + predictor1_(level, modelView, fromCoarser, currentTime, newTime); - average_(*level, modelView, fromCoarser, newTime); + average_(level, modelView, fromCoarser, newTime); - saveState_(*level, modelView); + moveIons_(level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::domain_only); - moveIons_(*level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::domain_only); + predictor2_(level, modelView, fromCoarser, currentTime, newTime); - predictor2_(*level, modelView, fromCoarser, currentTime, newTime); + average_(level, modelView, fromCoarser, newTime); - average_(*level, modelView, fromCoarser, newTime); + moveIons_(level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::all); - restoreState_(*level, modelView); - - moveIons_(*level, modelView, fromCoarser, currentTime, newTime, core::UpdaterMode::all); - - corrector_(*level, modelView, fromCoarser, currentTime, newTime); + corrector_(level, modelView, fromCoarser, currentTime, newTime); } @@ -454,16 +429,22 @@ void SolverPPC::moveIons_(level_t& level, ModelViews_t& PHARE_DEBUG_DO(_debug_log_move_ions(views);) TimeSetter setTime{views, newTime}; + auto const& levelBoxing = boxing[level.getLevelNumber()]; { auto dt = newTime - currentTime; for (auto& state : views) - ionUpdater_.updatePopulations(state.ions, state.electromagAvg, state.layout, dt, mode); + ionUpdater_.updatePopulations( + state.ions, state.electromagAvg, + levelBoxing.at(amr::to_string(state.patch->getGlobalId())), dt, mode); } // this needs to be done before calling the messenger setTime([](auto& state) -> auto& { return state.ions; }); + + fromCoarser.fillFluxBorders(views.model().state.ions, level, newTime); + fromCoarser.fillDensityBorders(views.model().state.ions, level, newTime); fromCoarser.fillIonGhostParticles(views.model().state.ions, level, newTime); fromCoarser.fillIonPopMomentGhosts(views.model().state.ions, level, newTime); diff --git a/src/amr/solvers/solver_ppc_model_view.hpp b/src/amr/solvers/solver_ppc_model_view.hpp index e845b2c99..dd7552bcc 100644 --- a/src/amr/solvers/solver_ppc_model_view.hpp +++ b/src/amr/solvers/solver_ppc_model_view.hpp @@ -1,11 +1,12 @@ #ifndef PHARE_SOLVER_SOLVER_PPC_MODEL_VIEW_HPP #define PHARE_SOLVER_SOLVER_PPC_MODEL_VIEW_HPP +#include "core/numerics/ohm/ohm.hpp" #include "core/numerics/ampere/ampere.hpp" #include "core/numerics/faraday/faraday.hpp" -#include "core/numerics/ohm/ohm.hpp" #include "amr/solvers/solver.hpp" +#include namespace PHARE::solver { @@ -96,18 +97,19 @@ class HybridPPCModelView : public ISolverModelView public: using This = HybridPPCModelView; using HybridModel_t = HybridModel_; - using HybridModel_args = typename HybridModel_t::type_list::Tuple; - using IPhysicalModel_t = typename HybridModel_t::Interface; - using patch_t = typename HybridModel_t::patch_t; - using Electrons = typename HybridModel_t::electrons_t; - using level_t = typename HybridModel_t::amr_types::level_t; - using Electromag = typename HybridModel_t::electromag_type; - using Ions = typename HybridModel_t::ions_type; - using ParticleArray_t = typename Ions::particle_array_type; - using Particle_t = typename ParticleArray_t::value_type; - using VecFieldT = typename HybridModel_t::vecfield_type; - using FieldT = typename HybridModel_t::field_type; - using GridLayout = typename HybridModel_t::gridlayout_type; + using HybridModel_args = HybridModel_t::type_list::Tuple; + using IPhysicalModel_t = HybridModel_t::Interface; + using patch_t = HybridModel_t::patch_t; + using Electrons = HybridModel_t::electrons_t; + using hierarchy_t = HybridModel_t::amr_types::hierarchy_t; + using level_t = HybridModel_t::amr_types::level_t; + using Electromag = HybridModel_t::electromag_type; + using Ions = HybridModel_t::ions_type; + using ParticleArray_t = Ions::particle_array_type; + using Particle_t = ParticleArray_t::value_type; + using VecFieldT = HybridModel_t::vecfield_type; + using FieldT = HybridModel_t::field_type; + using GridLayout = HybridModel_t::gridlayout_type; using Faraday_t = FaradayTransformer; using Ampere_t = AmpereTransformer; using Ohm_t = OhmTransformer; @@ -119,13 +121,13 @@ class HybridPPCModelView : public ISolverModelView template struct iterator; - HybridPPCModelView(level_t& level, IPhysicalModel_t& model) + HybridPPCModelView(hierarchy_t const& hierarchy, level_t& level, IPhysicalModel_t& model) : model_{dynamic_cast(model)} { - onRegrid(level, model_); + onRegrid(hierarchy, level, model_); } - void onRegrid(level_t& level, HybridModel_t& hybridModel); + void onRegrid(hierarchy_t const& hierarchy, level_t& level, HybridModel_t& hybridModel); auto begin() { return iterator{*this}; } auto begin() const { return iterator{*this}; } @@ -169,7 +171,8 @@ class HybridPPCModelView : public ISolverModelView template -void HybridPPCModelView::onRegrid(level_t& level, HybridModel_t& hybridModel) +void HybridPPCModelView::onRegrid(hierarchy_t const& hierarchy, level_t& level, + HybridModel_t& hybridModel) { auto& hybridState = hybridModel.state; auto& rm = *hybridModel.resourcesManager; diff --git a/src/amr/utilities/box/amr_box.hpp b/src/amr/utilities/box/amr_box.hpp index 4b615acd9..3844840eb 100644 --- a/src/amr/utilities/box/amr_box.hpp +++ b/src/amr/utilities/box/amr_box.hpp @@ -30,6 +30,14 @@ NO_DISCARD auto phare_box_from(SAMRAI::hier::Box const& box) return PHARE::core::Box{core::Point{lower}, core::Point{upper}}; } +template +NO_DISCARD auto phare_lcl_box_from(SAMRAI::hier::Box const& box) +{ + auto const& amr_box = phare_box_from(box); + return PHARE::core::Box{core::Point{amr_box.lower}.as_unsigned(), + core::Point{amr_box.upper}.as_unsigned()}; +} + NO_DISCARD inline bool operator==(SAMRAI::hier::Box const& b1, SAMRAI::hier::Box const& b2) { auto dim1 = b1.getDim().getValue(); @@ -85,6 +93,62 @@ struct Box : public PHARE::core::Box } }; + + +template +NO_DISCARD inline bool isInBox(SAMRAI::hier::Box const& box, Particle const& particle) +{ + constexpr auto dim = Particle::dimension; + + auto const& iCell = particle.iCell; + + auto const& lower = box.lower(); + auto const& upper = box.upper(); + + + if (iCell[0] >= lower(0) && iCell[0] <= upper(0)) + { + if constexpr (dim > 1) + { + if (iCell[1] >= lower(1) && iCell[1] <= upper(1)) + { + if constexpr (dim > 2) + { + if (iCell[2] >= lower(2) && iCell[2] <= upper(2)) + { + return true; + } + } + else + { + return true; + } + } + } + else + { + return true; + } + } + return false; +} + + +template +auto as_point(SAMRAI::hier::IntVector const& vec) +{ + return core::Point{ + core::for_N([&](auto i) { return vec[i]; })}; +} + + +template +auto as_point(SAMRAI::hier::Transformation const& tform) +{ + return as_point(tform.getOffset()); +} + + } // namespace PHARE::amr #endif diff --git a/src/amr/wrappers/hierarchy.hpp b/src/amr/wrappers/hierarchy.hpp index 3a86fab94..10220ac00 100644 --- a/src/amr/wrappers/hierarchy.hpp +++ b/src/amr/wrappers/hierarchy.hpp @@ -394,15 +394,15 @@ auto patchHierarchyDatabase(PHARE::initializer::PHAREDict const& amr) template DimHierarchy<_dimension>::DimHierarchy(PHARE::initializer::PHAREDict const& dict) : Hierarchy{ - dict, - std::make_shared( - SAMRAI::tbox::Dimension{dimension}, "CartesianGridGeom", - griddingAlgorithmDatabase(dict["simulation"]["grid"])), - patchHierarchyDatabase(dict["simulation"]["AMR"]), - shapeToBox(parseDimXYZType(dict["simulation"]["grid"], "nbr_cells")), - parseDimXYZType(dict["simulation"]["grid"], "origin"), - parseDimXYZType(dict["simulation"]["grid"], "meshsize"), - parseDimXYZType(dict["simulation"]["grid"], "boundary_type")} + dict, + std::make_shared( + SAMRAI::tbox::Dimension{dimension}, "CartesianGridGeom", + griddingAlgorithmDatabase(dict["simulation"]["grid"])), + patchHierarchyDatabase(dict["simulation"]["AMR"]), + shapeToBox(parseDimXYZType(dict["simulation"]["grid"], "nbr_cells")), + parseDimXYZType(dict["simulation"]["grid"], "origin"), + parseDimXYZType(dict["simulation"]["grid"], "meshsize"), + parseDimXYZType(dict["simulation"]["grid"], "boundary_type")} { } diff --git a/src/amr/wrappers/integrator.hpp b/src/amr/wrappers/integrator.hpp index ed03c1769..2cf00072e 100644 --- a/src/amr/wrappers/integrator.hpp +++ b/src/amr/wrappers/integrator.hpp @@ -3,6 +3,7 @@ #include "core/logger.hpp" #include "core/def/phare_mpi.hpp" +#include "core/utilities/mpi_utils.hpp" #include #include diff --git a/src/core/data/electromag/electromag.hpp b/src/core/data/electromag/electromag.hpp index ed0a766f3..01abd0560 100644 --- a/src/core/data/electromag/electromag.hpp +++ b/src/core/data/electromag/electromag.hpp @@ -4,6 +4,7 @@ #include #include +#include "core/data/vecfield/vecfield.hpp" #include "core/hybrid/hybrid_quantities.hpp" #include "core/data/vecfield/vecfield_initializer.hpp" #include "initializer/data_provider.hpp" @@ -80,6 +81,10 @@ namespace core private: VecFieldInitializer Binit_; }; + + } // namespace core } // namespace PHARE + + #endif diff --git a/src/core/data/field/field_box.hpp b/src/core/data/field/field_box.hpp new file mode 100644 index 000000000..44cd270a4 --- /dev/null +++ b/src/core/data/field/field_box.hpp @@ -0,0 +1,99 @@ +#ifndef PHARE_CORE_DATA_FIELD_FIELD_BOX_HPP +#define PHARE_CORE_DATA_FIELD_FIELD_BOX_HPP + +#include "core/def.hpp" +#include "core/logger.hpp" +#include "core/utilities/types.hpp" +#include "core/utilities/box/box.hpp" + +#include +#include +#include + +namespace PHARE::core +{ + +template +class FieldBox +{ + using value_type = std::decay_t; + +public: + auto constexpr static dimension = Field_t::dimension; + + Field_t& field; + Box amr_box; + Box lcl_box; + + template + FieldBox(Field_t& field_, GridLayout_t const& layout) + : field{field_} + , amr_box{layout.AMRBox()} + , lcl_box{layout.ghostBoxFor(field)} + { + } + + template + FieldBox(Field_t& field_, GridLayout_t const& layout, + Box const& selection) + : field{field_} + , amr_box{layout.AMRBox()} + , lcl_box{selection} + { + // assert(amr_box * selection); + } + + template + FieldBox(Field_t& field_, GridLayout_t const& layout, Box const& selection) + : field{field_} + , amr_box{layout.AMRBox()} + , lcl_box{layout.AMRToLocal(selection)} + { + // assert(amr_box * selection); + } + + + template + void op(FieldBox const& that); + + template + void op(std::vector const& vec, std::size_t seek = 0); + + void append_to(std::vector& vec); +}; + + +template +template +void FieldBox::op(FieldBox const& that) +{ + auto src_it = that.lcl_box.begin(); + auto dst_it = lcl_box.begin(); + for (; dst_it != lcl_box.end(); ++src_it, ++dst_it) + Operator{field(*dst_it)}(that.field(*src_it)); +} + + + +template +template +void FieldBox::op(std::vector const& vec, std::size_t seek) +{ + auto dst_it = lcl_box.begin(); + for (; dst_it != lcl_box.end(); ++seek, ++dst_it) + Operator{field(*dst_it)}(vec[seek]); +} + +template +void FieldBox::append_to(std::vector& vec) +{ + // reserve vec before use! + auto src_it = lcl_box.begin(); + for (; src_it != lcl_box.end(); ++src_it) + vec.push_back(field(*src_it)); +} + +} // namespace PHARE::core + + +#endif diff --git a/src/core/data/grid/grid.hpp b/src/core/data/grid/grid.hpp index f507866ec..7f4367eef 100644 --- a/src/core/data/grid/grid.hpp +++ b/src/core/data/grid/grid.hpp @@ -35,7 +35,6 @@ class Grid : public NdArrayImpl Grid() = delete; - Grid(Grid const& source) = delete; Grid(Grid&& source) = default; Grid& operator=(Grid&& source) = delete; Grid& operator=(Grid const& source) = delete; @@ -45,7 +44,6 @@ class Grid : public NdArrayImpl : Super{dims...} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} { static_assert(sizeof...(Dims) == dimension, "Invalid dimension"); } @@ -55,7 +53,6 @@ class Grid : public NdArrayImpl : Super{dims} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} { } @@ -64,7 +61,13 @@ class Grid : public NdArrayImpl : Super{layout.allocSize(qty)} , name_{name} , qty_{qty} - , field_{name, qty, Super::data(), Super::shape()} + { + } + + Grid(Grid const& source) // let field_ default + : Super{source.shape()} + , name_{source.name()} + , qty_{source.physicalQuantity()} { } @@ -87,7 +90,7 @@ class Grid : public NdArrayImpl private: std::string name_{"No Name"}; PhysicalQuantity qty_; - field_type field_; + field_type field_{name_, qty_, Super::data(), Super::shape()}; }; diff --git a/src/core/data/grid/gridlayout.hpp b/src/core/data/grid/gridlayout.hpp index 74297a864..08cc4cda2 100644 --- a/src/core/data/grid/gridlayout.hpp +++ b/src/core/data/grid/gridlayout.hpp @@ -134,7 +134,6 @@ namespace core } } - inverseMeshSize_[0] = 1. / meshSize_[0]; if constexpr (dimension > 1) { @@ -1150,6 +1149,30 @@ namespace core + template + auto ghostBoxFor(Field const& field) const + { + return _BoxFor(field, [&](auto const& centering, auto const direction) { + return this->ghostStartToEnd(centering, direction); + }); + } + + template + auto AMRGhostBoxFor(Field const& field) const + { + auto const centerings = centering(field); + auto const growBy = [&]() { + std::array arr; + for (std::uint8_t i = 0; i < dimension; ++i) + arr[i] = nbrGhosts(centerings[i]); + return arr; + }(); + auto ghostBox = grow(AMRBox_, growBy); + for (std::uint8_t i = 0; i < dimension; ++i) + ghostBox.upper[i] += (centerings[i] == QtyCentering::primal) ? 1 : 0; + return ghostBox; + } + template void evalOnBox(Field& field, Fn&& fn) const @@ -1207,6 +1230,30 @@ namespace core } + template + auto _BoxFor(Field const& field, Fn startToEnd) const + { + std::array lower, upper; + + auto const [ix0, ix1] = startToEnd(field, Direction::X); + lower[0] = ix0; + upper[0] = ix1; + if constexpr (dimension > 1) + { + auto const [iy0, iy1] = startToEnd(field, Direction::Y); + lower[1] = iy0; + upper[1] = iy1; + } + if constexpr (dimension == 3) + { + auto const [iz0, iz1] = startToEnd(field, Direction::Z); + lower[2] = iz0; + upper[2] = iz1; + } + return Box{lower, upper}; + } + + template auto StartToEndIndices_(Centering const& centering, StartToEnd const&& startToEnd, bool const includeEnd = false) const @@ -1509,7 +1556,6 @@ namespace core std::array, 2> ghostEndIndexTable_; Box AMRBox_; - // this constexpr initialization only works if primal==0 and dual==1 // this is defined in gridlayoutdefs.hpp don't change it because these // arrays will be accessed with [primal] and [dual] indexes. diff --git a/src/core/data/ions/ion_population/ion_population.hpp b/src/core/data/ions/ion_population/ion_population.hpp index aa6554e47..79054c724 100644 --- a/src/core/data/ions/ion_population/ion_population.hpp +++ b/src/core/data/ions/ion_population/ion_population.hpp @@ -43,7 +43,11 @@ namespace core NO_DISCARD std::string const& name() const { return name_; } - NO_DISCARD auto const& particleInitializerInfo() const { return particleInitializerInfo_; } + NO_DISCARD auto const& particleInitializerInfo() const + { + assert(particleInitializerInfo_.contains("density")); + return particleInitializerInfo_; + } diff --git a/src/core/data/ions/ions.hpp b/src/core/data/ions/ions.hpp index 2701ebdac..64b259503 100644 --- a/src/core/data/ions/ions.hpp +++ b/src/core/data/ions/ions.hpp @@ -253,6 +253,7 @@ namespace core NO_DISCARD bool sameMasses() const { return sameMasses_; } + auto& operator[](std::size_t const i) const { return populations_[i]; } private: bool allSameMass_() const @@ -279,4 +280,6 @@ namespace core } // namespace core } // namespace PHARE + + #endif diff --git a/src/core/data/particles/particle.hpp b/src/core/data/particles/particle.hpp index 0457865c2..14bd23046 100644 --- a/src/core/data/particles/particle.hpp +++ b/src/core/data/particles/particle.hpp @@ -40,7 +40,7 @@ template struct Particle { static_assert(dim > 0 and dim < 4, "Only dimensions 1,2,3 are supported."); - static const size_t dimension = dim; + static size_t const dimension = dim; Particle(double a_weight, double a_charge, std::array cell, std::array a_delta, std::array a_v) @@ -121,9 +121,9 @@ inline constexpr auto is_phare_particle_type template typename ParticleA, template typename ParticleB> -NO_DISCARD typename std::enable_if_t< - is_phare_particle_type> and is_phare_particle_type>, - bool> +NO_DISCARD typename std::enable_if_t> + and is_phare_particle_type>, + bool> operator==(ParticleA const& particleA, ParticleB const& particleB) { return particleA.weight == particleB.weight and // @@ -133,6 +133,8 @@ operator==(ParticleA const& particleA, ParticleB const& particleB) particleA.v == particleB.v; } + + } // namespace PHARE::core diff --git a/src/core/data/particles/particle_array.hpp b/src/core/data/particles/particle_array.hpp index 23990c47d..372b90b80 100644 --- a/src/core/data/particles/particle_array.hpp +++ b/src/core/data/particles/particle_array.hpp @@ -95,12 +95,8 @@ class ParticleArray NO_DISCARD auto back() { return particles_.back(); } NO_DISCARD auto front() { return particles_.front(); } - auto erase(IndexRange_& range) { cellMap_.erase(particles_, range); } - auto erase(IndexRange_&& range) - { - // TODO move ctor for range? - cellMap_.erase(std::forward(range)); - } + + auto erase(IndexRange_ range) { cellMap_.erase(range); } iterator erase(iterator first, iterator last) { @@ -201,6 +197,14 @@ class ParticleArray return cellMap_.partition(makeIndexRange(*this), std::forward(pred)); } + template + auto partition(Range_t&& range, Predicate&& pred) + { + auto const ret = cellMap_.partition(range, std::forward(pred)); + assert(ret.size() <= range.size()); + return ret; + } + template void print(CellIndex const& cell) const { @@ -228,18 +232,6 @@ class ParticleArray auto& box() const { return box_; } - auto& replace_from(ParticleArray const& that) - { - if (this == &that) // just in case - return *this; - this->resize(that.size()); - std::copy(that.begin(), that.end(), this->begin()); - this->box_ = that.box_; - this->cellMap_ = that.cellMap_; - return *this; - } - - private: Vector particles_; box_t box_; diff --git a/src/core/data/tensorfield/tensorfield.hpp b/src/core/data/tensorfield/tensorfield.hpp index ffc6bed92..348090aee 100644 --- a/src/core/data/tensorfield/tensorfield.hpp +++ b/src/core/data/tensorfield/tensorfield.hpp @@ -64,7 +64,7 @@ class TensorField TensorField() = delete; TensorField(TensorField const& source) = default; TensorField(TensorField&& source) = default; - TensorField& operator=(TensorField const& source) = delete; + TensorField& operator=(TensorField const& source) = default; TensorField& operator=(TensorField&& source) = default; TensorField(std::string const& name, tensor_t physQty) @@ -196,9 +196,9 @@ class TensorField NO_DISCARD auto begin() { return std::begin(components_); } - NO_DISCARD auto cbegin() const { return std::cbegin(components_); } + NO_DISCARD auto begin() const { return std::cbegin(components_); } NO_DISCARD auto end() { return std::end(components_); } - NO_DISCARD auto cend() const { return std::cend(components_); } + NO_DISCARD auto end() const { return std::cend(components_); } NO_DISCARD auto& componentNames() const { return componentNames_; } diff --git a/src/core/data/vecfield/vecfield.hpp b/src/core/data/vecfield/vecfield.hpp index 746bac66d..109d4c832 100644 --- a/src/core/data/vecfield/vecfield.hpp +++ b/src/core/data/vecfield/vecfield.hpp @@ -43,7 +43,7 @@ namespace core VecFieldNames() = default; template - explicit VecFieldNames(VecFieldT const& v) + VecFieldNames(VecFieldT const& v) : vecName{v.name()} , xName{v.getComponentName(core::Component::X)} , yName{v.getComponentName(core::Component::Y)} diff --git a/src/core/def/phlop.hpp b/src/core/def/phlop.hpp index f8c574566..db40ee2b8 100644 --- a/src/core/def/phlop.hpp +++ b/src/core/def/phlop.hpp @@ -1,7 +1,9 @@ #ifndef PHARE_CORE_DEF_PHLOP_HPP #define PHARE_CORE_DEF_PHLOP_HPP -#if __has_include("phlop/timing/scope_timer.hpp") + +#if __has_include("phlop/timing/scope_timer.hpp") \ + and (!defined(PHARE_HAVE_PHLOP) || PHARE_HAVE_PHLOP == 1) #include "phlop/timing/scope_timer.hpp" #define PHARE_HAVE_PHLOP 1 diff --git a/src/core/hybrid/hybrid_quantities.hpp b/src/core/hybrid/hybrid_quantities.hpp index ab55eff9d..e5784f90a 100644 --- a/src/core/hybrid/hybrid_quantities.hpp +++ b/src/core/hybrid/hybrid_quantities.hpp @@ -5,6 +5,7 @@ #include #include +#include #include @@ -13,8 +14,8 @@ namespace PHARE::core class HybridQuantity { public: - enum class Scalar { - Bx, // magnetic field components + enum class Scalar : std::uint16_t { + Bx = 0, // magnetic field components By, Bz, Ex, // electric field components diff --git a/src/core/numerics/interpolator/interpolator.hpp b/src/core/numerics/interpolator/interpolator.hpp index a0a6a5a96..b90db46dd 100644 --- a/src/core/numerics/interpolator/interpolator.hpp +++ b/src/core/numerics/interpolator/interpolator.hpp @@ -393,7 +393,7 @@ class Interpolator : private Weighter return std::forward_as_tuple(primal_startIndex_, primal_weights_); }(); - auto iCell = layout.AMRToLocal(Point{iCell_}); + auto const iCell = layout.AMRToLocal(Point{iCell_}); for (auto iDim = 0u; iDim < dimension; ++iDim) { startIndex_[iDim] diff --git a/src/core/numerics/ion_updater/ion_updater.hpp b/src/core/numerics/ion_updater/ion_updater.hpp index dbae99b75..be029dae2 100644 --- a/src/core/numerics/ion_updater/ion_updater.hpp +++ b/src/core/numerics/ion_updater/ion_updater.hpp @@ -2,20 +2,19 @@ #define PHARE_ION_UPDATER_HPP +#include "core/logger.hpp" +// #include "core/data/ions/ions.hpp" #include "core/utilities/box/box.hpp" #include "core/utilities/range/range.hpp" -#include "core/numerics/interpolator/interpolator.hpp" #include "core/numerics/pusher/pusher.hpp" +#include "core/numerics/moments/moments.hpp" #include "core/numerics/pusher/pusher_factory.hpp" +#include "core/numerics/interpolator/interpolator.hpp" #include "core/numerics/boundary_condition/boundary_condition.hpp" -#include "core/numerics/moments/moments.hpp" -#include "core/data/ions/ions.hpp" #include "initializer/data_provider.hpp" -#include "core/logger.hpp" -#include #include @@ -26,6 +25,8 @@ enum class UpdaterMode { domain_only = 1, all = 2 }; template class IonUpdater { + using This = IonUpdater; + public: static constexpr auto dimension = GridLayout::dimension; static constexpr auto interp_order = GridLayout::interp_order; @@ -55,7 +56,8 @@ class IonUpdater { } - void updatePopulations(Ions& ions, Electromag const& em, GridLayout const& layout, double dt, + template + void updatePopulations(Ions& ions, Electromag const& em, Boxing_t const& boxing, double dt, UpdaterMode = UpdaterMode::all); @@ -70,9 +72,11 @@ class IonUpdater private: - void updateAndDepositDomain_(Ions& ions, Electromag const& em, GridLayout const& layout); + template + void updateAndDepositDomain_(Ions& ions, Electromag const& em, Boxing_t const& boxing); - void updateAndDepositAll_(Ions& ions, Electromag const& em, GridLayout const& layout); + template + void updateAndDepositAll_(Ions& ions, Electromag const& em, Boxing_t const& boxing); // dealloced on regridding/load balancing coarsest @@ -83,22 +87,23 @@ class IonUpdater template +template void IonUpdater::updatePopulations(Ions& ions, Electromag const& em, - GridLayout const& layout, - double dt, UpdaterMode mode) + Boxing_t const& boxing, double dt, + UpdaterMode mode) { PHARE_LOG_SCOPE(3, "IonUpdater::updatePopulations"); resetMoments(ions); - pusher_->setMeshAndTimeStep(layout.meshSize(), dt); + pusher_->setMeshAndTimeStep(boxing.layout.meshSize(), dt); if (mode == UpdaterMode::domain_only) { - updateAndDepositDomain_(ions, em, layout); + updateAndDepositDomain_(ions, em, boxing); } else { - updateAndDepositAll_(ions, em, layout); + updateAndDepositAll_(ions, em, boxing); } } @@ -111,86 +116,89 @@ void IonUpdater::updateIons(Ions& ions) ions.computeBulkVelocity(); } +template +struct UpdaterSelectionBoxing +{ + auto constexpr static partGhostWidth = GridLayout::nbrParticleGhosts(); + using GridLayout_t = GridLayout; + using Box_t = IonUpdater_t::Box; + using Selector_t = IonUpdater_t::Pusher::ParticleSelector; + GridLayout_t const layout; + Box_t const nonLevelGhostBox; + Box_t const domainBox = layout.AMRBox(); + Box_t const ghostBox = grow(domainBox, partGhostWidth); + + Selector_t const noop = [](auto& particleRange) { return particleRange; }; + + // lambda copy captures to detach from above references in case of class copy construct + Selector_t const inDomainBox = [domainBox = domainBox](auto& particleRange) { + return particleRange.array().partition( + particleRange, [&](auto const& cell) { return core::isIn(cell, domainBox); }); + }; + + Selector_t const inGhostBox = [ghostBox = ghostBox](auto& particleRange) { + return particleRange.array().partition( + particleRange, [&](auto const& cell) { return isIn(cell, ghostBox); }); + }; + + Selector_t const inNonLevelGhostBox + = [nonLevelGhostBox = nonLevelGhostBox](auto& particleRange) { + return particleRange.array().partition( + particleRange, [&](auto const& cell) { return isIn(cell, nonLevelGhostBox); }); + }; + + Selector_t const inGhostLayer + = [ghostBox = ghostBox, domainBox = domainBox](auto& particleRange) { + return particleRange.array().partition(particleRange, [&](auto const& cell) { + return isIn(cell, ghostBox) and !isIn(cell, domainBox); + }); + }; +}; -template /** * @brief IonUpdater::updateAndDepositDomain_ evolves moments from time n to n+1 without updating particles, which stay at time n */ +template +template void IonUpdater::updateAndDepositDomain_(Ions& ions, Electromag const& em, - GridLayout const& layout) + Boxing_t const& boxing) { PHARE_LOG_SCOPE(3, "IonUpdater::updateAndDepositDomain_"); - auto domainBox = layout.AMRBox(); - - auto inDomainBox = [&domainBox](auto& particleRange) // - { - auto& box = domainBox; - return particleRange.array().partition( - [&](auto const& cell) { return core::isIn(Point{cell}, box); }); - }; - - auto constexpr partGhostWidth = GridLayout::nbrParticleGhosts(); - auto ghostBox{domainBox}; - ghostBox.grow(partGhostWidth); - - auto inGhostBox = [&](auto& particleRange) { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, ghostBox); }); - }; + auto const& layout = boxing.layout; for (auto& pop : ions) { - ParticleArray& domain = pop.domainParticles(); + auto& domain = (tmp_particles_ = pop.domainParticles()); // make local copy - // first push all domain particles - // push them while still inDomainBox - // accumulate those inDomainBox - // erase those which left - - auto inRange = makeIndexRange(domain); + // first push all domain particles twice + // accumulate those inNonLevelGhostBox auto outRange = makeIndexRange(domain); + auto allowed = outRange = pusher_->move(outRange, outRange, em, pop.mass(), interpolator_, + layout, boxing.noop, boxing.inNonLevelGhostBox); - auto inDomain = pusher_->move( - inRange, outRange, em, pop.mass(), interpolator_, layout, - [](auto& particleRange) { return particleRange; }, inDomainBox); - - interpolator_(inDomain, pop.density(), pop.flux(), layout); + interpolator_(allowed, pop.density(), pop.flux(), layout); - // TODO : we can erase here because we know we are working on a state - // that has been saved in the solverPPC - // this makes the updater quite coupled to how the solverPPC works while - // it kind of pretends not to be by being independent object in core... - // note we need to erase here if using the back_inserter for ghost copy - // otherwise they will be added after leaving domain particles. - domain.erase(makeRange(domain, inDomain.iend(), domain.size())); - - // then push patch and level ghost particles // push those in the ghostArea (i.e. stop pushing if they're not out of it) // deposit moments on those which leave to go inDomainBox - auto pushAndAccumulateGhosts = [&](auto& inputArray, bool copyInDomain = false) { - auto& outputArray = tmp_particles_.replace_from(inputArray); + auto pushAndAccumulateGhosts = [&](auto const& inputArray) { + tmp_particles_ = inputArray; // work on local copy - inRange = makeIndexRange(inputArray); - outRange = makeIndexRange(outputArray); - auto enteredInDomain = pusher_->move(inRange, outRange, em, pop.mass(), interpolator_, - layout, inGhostBox, inDomainBox); + auto outRange = makeIndexRange(tmp_particles_); + + auto enteredInDomain = pusher_->move(outRange, outRange, em, pop.mass(), interpolator_, + layout, boxing.inGhostBox, boxing.inDomainBox); - interpolator_(enteredInDomain, pop.density(), pop.flux(), layout); - if (copyInDomain) - { - domain.reserve(domain.size() + enteredInDomain.size()); - std::copy(enteredInDomain.begin(), enteredInDomain.end(), - std::back_inserter(domain)); - } + interpolator_(enteredInDomain, pop.density(), pop.flux(), layout); }; + // !TODO REVISE! // After this function is done domain particles overlaping ghost layers of neighbor patches // are sent to these neighbor's patchghost particle array. // After being pushed, some patch ghost particles may enter the domain. These need to be @@ -199,77 +207,66 @@ void IonUpdater::updateAndDepositDomain_(Ions& ion // On the contrary level ghost particles entering the domain here do not need to be copied // since they contribute to nodes that are not shared with neighbor patches an since // level border nodes will receive contributions from levelghost old and new particles - pushAndAccumulateGhosts(pop.patchGhostParticles(), true); - pushAndAccumulateGhosts(pop.levelGhostParticles()); + + if (pop.levelGhostParticles().size()) + pushAndAccumulateGhosts(pop.levelGhostParticles()); } } -template /** * @brief IonUpdater::updateAndDepositDomain_ evolves moments and particles from time n to n+1 */ +template +template void IonUpdater::updateAndDepositAll_(Ions& ions, Electromag const& em, - GridLayout const& layout) + Boxing_t const& boxing) { PHARE_LOG_SCOPE(3, "IonUpdater::updateAndDepositAll_"); - auto constexpr partGhostWidth = GridLayout::nbrParticleGhosts(); - auto domainBox = layout.AMRBox(); - auto ghostBox{domainBox}; - ghostBox.grow(partGhostWidth); - - auto inDomainBox = [&domainBox](auto& particleRange) // - { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, domainBox); }); - }; - - auto inGhostBox = [&](auto& particleRange) { - return particleRange.array().partition( - [&](auto const& cell) { return isIn(Point{cell}, ghostBox); }); - }; - - - auto inGhostLayer = [&](auto& particleRange) { - return particleRange.array().partition([&](auto const& cell) { - return isIn(Point{cell}, ghostBox) and !isIn(Point{cell}, domainBox); - }); - }; + auto const& layout = boxing.layout; // push domain particles, erase from array those leaving domain - // push patch and level ghost particles that are in ghost area (==ghost box without domain) - // copy patch and ghost particles out of ghost area that are in domain, in particle array - // finally all particles in domain are to be interpolated on mesh. + // push level ghost particles that are in ghost area (==ghost box without domain) + // copy ghost particles out of ghost area that are in domain, in particle array + // finally all particles in non level ghost box are to be interpolated on mesh. for (auto& pop : ions) { auto& domainParticles = pop.domainParticles(); auto domainPartRange = makeIndexRange(domainParticles); - auto inDomain = pusher_->move( - domainPartRange, domainPartRange, em, pop.mass(), interpolator_, layout, - [](auto const& particleRange) { return particleRange; }, inDomainBox); + auto inDomain = pusher_->move(domainPartRange, domainPartRange, em, pop.mass(), + interpolator_, layout, boxing.noop, boxing.inDomainBox); + + auto now_ghosts = makeRange(domainParticles, inDomain.iend(), domainParticles.size()); + auto const not_level_ghosts = boxing.inNonLevelGhostBox(now_ghosts); - domainParticles.erase(makeRange(domainParticles, inDomain.iend(), domainParticles.size())); + // copy out new patch ghosts + auto& patchGhost = pop.patchGhostParticles(); + patchGhost.reserve(patchGhost.size() + not_level_ghosts.size()); + std::copy(not_level_ghosts.begin(), not_level_ghosts.end(), std::back_inserter(patchGhost)); - auto pushAndCopyInDomain = [&](auto&& particleRange) { - auto inGhostLayerRange = pusher_->move(particleRange, particleRange, em, pop.mass(), - interpolator_, layout, inGhostBox, inGhostLayer); + domainParticles.erase(now_ghosts); // drop all ghosts + + if (pop.levelGhostParticles().size()) + { + auto particleRange = makeIndexRange(pop.levelGhostParticles()); + auto inGhostLayerRange + = pusher_->move(particleRange, particleRange, em, pop.mass(), interpolator_, layout, + boxing.inGhostBox, boxing.inGhostLayer); auto& particleArray = particleRange.array(); particleArray.export_particles( - domainParticles, [&](auto const& cell) { return isIn(Point{cell}, domainBox); }); + domainParticles, [&](auto const& cell) { return isIn(cell, boxing.domainBox); }); particleArray.erase( makeRange(particleArray, inGhostLayerRange.iend(), particleArray.size())); - }; - - pushAndCopyInDomain(makeIndexRange(pop.patchGhostParticles())); - pushAndCopyInDomain(makeIndexRange(pop.levelGhostParticles())); + } interpolator_(makeIndexRange(domainParticles), pop.density(), pop.flux(), layout); + interpolator_(makeIndexRange(patchGhost), pop.density(), pop.flux(), layout); } } diff --git a/src/core/numerics/pusher/pusher.hpp b/src/core/numerics/pusher/pusher.hpp index 9c1b48b9f..2337140d2 100644 --- a/src/core/numerics/pusher/pusher.hpp +++ b/src/core/numerics/pusher/pusher.hpp @@ -20,9 +20,10 @@ namespace core protected: static auto constexpr dimension = GridLayout::dimension; - using ParticleSelector = std::function; public: + using ParticleSelector = std::function; + // TODO : to really be independant on boris which has 2 push steps // we should have an arbitrary number of selectors, 1 per push step virtual ParticleRange move(ParticleRange const& rangeIn, ParticleRange& rangeOut, diff --git a/src/core/utilities/box/box.hpp b/src/core/utilities/box/box.hpp index 0ff5d1a8a..b194a5b5a 100644 --- a/src/core/utilities/box/box.hpp +++ b/src/core/utilities/box/box.hpp @@ -2,15 +2,16 @@ #define PHARE_CORE_UTILITIES_BOX_BOX_HPP +#include "core/def.hpp" #include "core/utilities/types.hpp" #include "core/utilities/point/point.hpp" #include "core/utilities/meta/meta_utilities.hpp" -#include "core/def.hpp" +#include #include -#include #include #include +#include namespace PHARE::core { @@ -25,11 +26,12 @@ class box_iterator; template struct Box { - static const size_t dimension = dim; + static size_t const dimension = dim; + using Point_t = Point; - Point lower; - Point upper; + Point_t lower; + Point_t upper; Box() = default; @@ -44,6 +46,7 @@ struct Box : lower{_lower} , upper{_upper} { + assert(lower <= upper); } template @@ -70,21 +73,42 @@ struct Box return Box{lower - that.lower, upper - that.upper}; } + NO_DISCARD bool isEmpty() const { return (*this) == Box{}; } void grow(Type const& size) { assert(size >= 0); - for (auto& c : lower) - { - c -= size; - } - for (auto& c : upper) - { - c += size; - } + lower -= size; + upper += size; + } + + + template + auto& grow(std::array const& size) + { + lower -= size; + upper += size; + return *this; } + auto& shrink(Type const& size) + { + assert(size >= 0); + lower += size; + upper -= size; + return *this; + } + + template + auto& shrink(std::array const& size) + { + lower += size; + upper -= size; + return *this; + } + + NO_DISCARD auto shape() const { return upper - lower + 1; } NO_DISCARD auto size() const { return core::product(shape()); } @@ -146,6 +170,11 @@ struct Box else return 6; } + + + std::vector remove(Box const& that) const; + Box merge(Box const& that) const; + Box remove_edge(Box const& that) const; }; template @@ -235,34 +264,62 @@ bool isIn(Point const& point, BoxContainer const& boxes) /** This overload of isIn does the same as the one above but takes only * one box. */ -template -NO_DISCARD bool isIn(Point const& point, - Box const& box) +template typename Point, typename Type, std::size_t SIZE> +NO_DISCARD bool isIn(Point const& point, Box const& box) { - auto isIn1D = [](typename Point::value_type pos, typename Point::value_type lower, - typename Point::value_type upper) { return pos >= lower && pos <= upper; }; + auto isIn1D = [](auto const pos, auto const lower, auto const upper) { + return pos >= lower && pos <= upper; + }; bool pointInBox = true; - for (auto iDim = 0u; iDim < Point::dimension; ++iDim) - { + for (auto iDim = 0u; iDim < SIZE; ++iDim) pointInBox = pointInBox && isIn1D(point[iDim], box.lower[iDim], box.upper[iDim]); - } if (pointInBox) return pointInBox; return false; } +template +NO_DISCARD auto isIn(Particle const& particle, Box const& box) + -> decltype(isIn(particle.iCell, box), bool()) +{ + return isIn(particle.iCell, box); +} + +template +NO_DISCARD auto isIn(Args const&&... args) +{ + return isIn(args...); +} + + +template typename Point, typename Type, std::size_t SIZE> +struct InBox +{ + bool operator()(auto const& arg) const { return isIn(arg, box); } + + Box const& box; +}; + template -Box grow(Box const& box, OType const& size) +NO_DISCARD Box grow(Box const& box, OType const& size) { auto copy{box}; copy.grow(size); return copy; } +template +NO_DISCARD Box shrink(Box const& box, T2 const& size) +{ + auto copy{box}; + copy.shrink(size); + return copy; +} + template NO_DISCARD Box emptyBox() { @@ -284,6 +341,138 @@ auto& operator<<(std::ostream& os, Box const& box) +template +std::vector> Box::remove(Box const& to_remove) const +{ + using box_t = Box; + using _m = std::unordered_map; + + auto const box = *this; + + auto overlap = box * to_remove; + + if (not overlap) + return std::vector{box}; + + auto copy = [](auto cpy, auto const& replace) { + for (auto const& [i, v] : replace) + cpy[i] = v; + return cpy; + }; + + auto intersection = *overlap; + + std::unordered_map boxes; + + if (intersection.lower[0] > box.lower[0]) + boxes["left"] = Box(box.lower, copy(box.upper, _m{{0, intersection.lower[0] - 1}})); + if (intersection.upper[0] < box.upper[0]) + boxes["right"] = box_t{copy(box.lower, _m{{0, intersection.upper[0] + 1}}), box.upper}; + + [[maybe_unused]] Type minx = 0, maxx = 0; + if constexpr (dim > 1) + { + minx = boxes.count("left") > 0 ? intersection.lower[0] : box.lower[0]; + maxx = boxes.count("right") > 0 ? intersection.upper[0] : box.upper[0]; + + if (intersection.lower[1] > box.lower[1]) + boxes["down"] = box_t{copy(box.lower, _m{{0, minx}}), + copy(box.upper, _m{{0, maxx}, {1, intersection.lower[1] - 1}})}; + + if (intersection.upper[1] < box.upper[1]) + boxes["up"] = Box(copy(box.lower, _m{{0, minx}, {1, intersection.upper[1] + 1}}), + copy(box.upper, _m{{0, maxx}})); + } + + if constexpr (dim > 2) + { + Type miny = boxes.count("down") > 0 ? intersection.lower[1] : box.lower[1]; + Type maxy = boxes.count("up") > 0 ? intersection.upper[1] : box.upper[1]; + + if (intersection.lower[2] > box.lower[2]) + boxes["back"] = Box(copy(box.lower, _m{{0, minx}, {1, miny}}), + copy(intersection.lower - 1, _m{{0, maxx}, {1, maxy}})); + if (intersection.upper[2] < box.upper[2]) + boxes["front"] = Box(copy(intersection.upper + 1, _m{{0, minx}, {1, miny}}), + copy(box.upper, _m{{0, maxx}, {1, maxy}})); + } + + std::vector remaining; + for (auto const& [key, val] : boxes) + remaining.emplace_back(val); + return remaining; +} + + +template +Box Box::merge(Box const& to_merge) const +{ + Box merged{*this}; + for (std::size_t i = 0; i < dim; ++i) + { + merged.lower[i] = std::min(merged.lower[i], to_merge.lower[i]); + merged.upper[i] = std::max(merged.upper[i], to_merge.upper[i]); + } + return merged; +} + +template +Box Box::remove_edge(Box const& to_remove) const +{ + auto const remaining = this->remove(to_remove); + + if (remaining.size() == 0) + return *this; + if (remaining.size() == 1) + return remaining[0]; + + Box leftover{ConstArray(std::numeric_limits::max()), + ConstArray(std::numeric_limits::min())}; + + for (auto const& rem : remaining) + leftover = leftover.merge(rem); + + return leftover; +} + +template +bool any_overlaps_in(BoxHavers const& havers, Accessor&& fn) +{ + if (havers.size()) + for (std::size_t i = 0; i < havers.size() - 1; ++i) + for (std::size_t j = i + 1; j < havers.size(); ++j) + if (auto overlap = fn(havers[i]) * fn(havers[j])) + return true; + return false; +} + +template +NO_DISCARD Box shift(Box const& box, Type const& offset) +{ + auto copy{box}; + copy.lower += offset; + copy.upper += offset; + return copy; +} + +template typename Point_t, typename Type, std::size_t dim> +NO_DISCARD Box shift(Box const& box, Point_t const& offset) +{ + auto copy{box}; + for (std::uint8_t i = 0; i < dim; ++i) + copy.lower[i] += offset[i], copy.upper[i] += offset[i]; + return copy; +} + +template +NO_DISCARD Box shift_idx(Box const& box, Type const& offset) +{ + auto copy{box}; + copy.lower[idx] += offset; + copy.upper[idx] += offset; + return copy; +} + } // namespace PHARE::core #endif diff --git a/src/core/utilities/cellmap.hpp b/src/core/utilities/cellmap.hpp index c50f48d5d..86ab52a48 100644 --- a/src/core/utilities/cellmap.hpp +++ b/src/core/utilities/cellmap.hpp @@ -170,7 +170,7 @@ class CellMap // erase all items indexed in the given range from both the cellmap and the // array the range is for. template - void erase(Range&& range); + void erase(Range range); // erase items indexes from the cellmap @@ -448,7 +448,7 @@ inline auto CellMap::partition(Range range, Predicate&& pred, } } - return makeRange(range.array(), range.ibegin(), range.ibegin() + pivot); + return makeRange(range.array(), range.ibegin(), pivot); } @@ -456,7 +456,7 @@ inline auto CellMap::partition(Range range, Predicate&& pred, template template -inline void CellMap::erase(Range&& range) +inline void CellMap::erase(Range range) { auto& items = range.array(); diff --git a/src/core/utilities/logger/logger_defaults.hpp b/src/core/utilities/logger/logger_defaults.hpp index 6bb2b4c90..0397c9d8f 100644 --- a/src/core/utilities/logger/logger_defaults.hpp +++ b/src/core/utilities/logger/logger_defaults.hpp @@ -1,3 +1,6 @@ +// IWYU pragma: private, include "core/logger.hpp" + + #ifndef PHARE_CORE_UTILITIES_LOGGER_LOGGER_DEFAULTS_HPP #define PHARE_CORE_UTILITIES_LOGGER_LOGGER_DEFAULTS_HPP @@ -7,8 +10,8 @@ #define PHARE_SCOPE_TIMER PHLOP_SCOPE_TIMER #endif // PHARE_WITH_PHLOP -#ifndef PHARE_SCOPE_TIMER -#define PHARE_SCOPE_TIMER(str) // nothing +#ifndef PHARE_SCOPE_TIMER // uncomment next line to activate +#define PHARE_SCOPE_TIMER(str) // PHARE_LOG_LINE_STR(str); #endif // PHARE_SCOPE_TIMER diff --git a/src/core/utilities/meta/meta_utilities.hpp b/src/core/utilities/meta/meta_utilities.hpp index 3bea76ba3..449d5ae01 100644 --- a/src/core/utilities/meta/meta_utilities.hpp +++ b/src/core/utilities/meta/meta_utilities.hpp @@ -1,10 +1,13 @@ #ifndef PHARE_CORE_UTILITIES_META_META_UTILITIES_HPP #define PHARE_CORE_UTILITIES_META_META_UTILITIES_HPP +#include "core/utilities/types.hpp" + + +#include #include #include -#include "core/utilities/types.hpp" namespace PHARE { @@ -17,7 +20,7 @@ namespace core struct dummy { using type = int; - static const type value = 0; + static type const value = 0; }; @@ -81,6 +84,30 @@ namespace core SimulatorOption, InterpConst<3>, 4, 5, 8, 9, 25>>{}; } + template + constexpr auto defaultNbrRefinedParts() + { + auto sims = possibleSimulators(); + using SimsTuple_t = decltype(sims); + auto constexpr nsims = std::tuple_size_v; + + std::size_t nbRefinedPart = 0; + + for_N([&](auto i) { + using SimOption = std::tuple_element_t; + + if constexpr (std::tuple_element_t<0, SimOption>{}() == dim + and std::tuple_element_t<1, SimOption>{}() == interp) + { + nbRefinedPart = std::tuple_element_t<2, SimOption>{}; + } + }); + + assert(nbRefinedPart != 0); // is static_assert in constexpr call + + return nbRefinedPart; + } + template // used from PHARE::amr::Hierarchy auto makeAtRuntime(std::size_t dim, Maker&& maker) diff --git a/src/core/utilities/mpi_utils.hpp b/src/core/utilities/mpi_utils.hpp index 112c3f5a5..66df46268 100644 --- a/src/core/utilities/mpi_utils.hpp +++ b/src/core/utilities/mpi_utils.hpp @@ -2,6 +2,10 @@ #define PHARE_CORE_UTILITIES_MPI_HPP #include "core/def.hpp" +#include "core/def/phare_mpi.hpp" +#include "core/utilities/span.hpp" +#include "core/utilities/types.hpp" + #include #include #include @@ -9,11 +13,6 @@ #include #include - -#include "core/def/phare_mpi.hpp" -#include "core/utilities/span.hpp" -#include "core/utilities/types.hpp" - namespace PHARE::core::mpi { template diff --git a/src/core/utilities/point/point.hpp b/src/core/utilities/point/point.hpp index 367636b49..858369cd1 100644 --- a/src/core/utilities/point/point.hpp +++ b/src/core/utilities/point/point.hpp @@ -86,6 +86,18 @@ namespace core NO_DISCARD bool operator!=(Point const& other) const { return !(*this == other); } + template typename Arr, typename T> + auto operator<(Arr const& arr) const + { + return for_N_all([&](auto iDim) { return r[iDim] < arr[iDim]; }); + } + template typename Arr, typename T> + auto operator<=(Arr const& arr) const + { + return for_N_all([&](auto iDim) { return r[iDim] <= arr[iDim]; }); + } + + template NO_DISCARD auto toArray() const { @@ -131,40 +143,46 @@ namespace core } - - auto operator+(Type const& value) const + auto& operator+=(Type const& value) { - auto copy = *this; for (auto iDim = 0u; iDim < dim; ++iDim) - copy[iDim] += value; - return copy; + r[iDim] += value; + return *this; } - auto operator+(std::array const& value) const + auto& operator-=(Type const& value) { - auto copy = *this; for (auto iDim = 0u; iDim < dim; ++iDim) - copy[iDim] += value[iDim]; - return copy; + r[iDim] -= value; + return *this; } - auto operator+(Point const& value) const { return (*this) + value.r; } - - - auto operator-(Type const& value) const + auto& operator+=(std::array const& value) { - auto copy = *this; for (auto iDim = 0u; iDim < dim; ++iDim) - copy[iDim] -= value; - return copy; + r[iDim] += value[iDim]; + return *this; } - auto operator-(std::array const& value) const + auto& operator-=(std::array const& value) { - auto copy = *this; for (auto iDim = 0u; iDim < dim; ++iDim) - copy[iDim] -= value[iDim]; - return copy; + r[iDim] -= value[iDim]; + return *this; } + + auto operator+(Type const& value) const { return Point{r} += value; } + auto operator+(std::array const& value) const { return Point{r} += value; } + auto operator+(Point const& value) const { return (*this) + value.r; } + + auto operator-(Type const& value) const { return Point{r} -= value; } + auto operator-(std::array const& value) const { return Point{r} -= value; } auto operator-(Point const& value) const { return (*this) - value.r; } + auto operator*(Type const& value) const + { + auto copy = *this; + for (auto iDim = 0u; iDim < dim; ++iDim) + copy[iDim] *= value; + return copy; + } NO_DISCARD constexpr auto size() const { return dim; } NO_DISCARD auto begin() { return r.begin(); } @@ -174,6 +192,24 @@ namespace core NO_DISCARD auto& operator*() const { return r; } + auto as_unsigned() const + { + PHARE_DEBUG_DO({ + for (auto iDim = 0u; iDim < dim; ++iDim) + assert(r[iDim] >= 0); + }) + if constexpr (sizeof(Type) == 4) + return Point{this->template toArray()}; + // else no return cause not yet handled + } + auto as_signed() const + { + if constexpr (sizeof(Type) == 4) + return Point{this->template toArray()}; + // else no return cause not yet handled + } + + private: std::array r{}; }; diff --git a/src/core/utilities/types.hpp b/src/core/utilities/types.hpp index ee90667ad..cef9d8aba 100644 --- a/src/core/utilities/types.hpp +++ b/src/core/utilities/types.hpp @@ -226,7 +226,7 @@ namespace core NO_DISCARD inline std::optional get_env(std::string const& key) { - if (const char* val = std::getenv(key.c_str())) + if (char const* val = std::getenv(key.c_str())) return std::string{val}; return std::nullopt; } @@ -497,6 +497,22 @@ auto make_named_tuple(Pairs&&... pairs) return std::make_tuple(pairs...); } + + +template +struct Equals +{ + void operator()(auto& d0) { d = d0; } + D& d; +}; + +template +struct PlusEquals +{ + void operator()(auto& d0) { d += d0; } + D& d; +}; + } // namespace PHARE::core diff --git a/src/phare/phare.hpp b/src/phare/phare.hpp index 666f3de8e..629fc03e5 100644 --- a/src/phare/phare.hpp +++ b/src/phare/phare.hpp @@ -4,10 +4,13 @@ #include "core/def/phlop.hpp" // scope timing -#include "simulator/simulator.hpp" -#include "core/utilities/algorithm.hpp" +#include "amr/wrappers/integrator.hpp" #include "core/utilities/mpi_utils.hpp" +#include +#include +#include + #include #include @@ -17,7 +20,7 @@ class StreamAppender : public SAMRAI::tbox::Logger::Appender { public: StreamAppender(std::ostream* stream) { d_stream = stream; } - void logMessage(std::string const& message, std::string const& filename, const int line) + void logMessage(std::string const& message, std::string const& filename, int const line) { (*d_stream) << "At :" << filename << " line :" << line << " message: " << message << std::endl; diff --git a/tests/amr/data/field/CMakeLists.txt b/tests/amr/data/field/CMakeLists.txt new file mode 100644 index 000000000..be0cbd503 --- /dev/null +++ b/tests/amr/data/field/CMakeLists.txt @@ -0,0 +1,35 @@ + +add_subdirectory(coarsening) +add_subdirectory(copy_pack) +add_subdirectory(geometry) +add_subdirectory(overlap) +add_subdirectory(refine) +add_subdirectory(variable) +add_subdirectory(time_interpolate) + +function(_add_amr_fields_test src_name) + + add_executable(${src_name} ${src_name}.cpp) + + target_include_directories(${src_name} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + ) + + target_link_directories(${src_name} PRIVATE + ${HDF5_LIBRARY_PATH} + ) + + target_link_libraries(${src_name} PRIVATE + phare_amr + ${GTEST_LIBS} + ) + + add_phare_test(${src_name} ${CMAKE_CURRENT_BINARY_DIR}) + +endfunction(_add_amr_fields_test) + +file(COPY "test_fields_schedules_inputs" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + +_add_amr_fields_test(test_fields_schedules) + diff --git a/tests/amr/data/field/refine/test_field_refine.cpp b/tests/amr/data/field/refine/test_field_refine.cpp index 836616d56..d992f83c5 100644 --- a/tests/amr/data/field/refine/test_field_refine.cpp +++ b/tests/amr/data/field/refine/test_field_refine.cpp @@ -7,11 +7,13 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +#include "core/data/grid/gridlayout.hpp" +#include "core/data/grid/gridlayout_impl.hpp" #include "amr/data/field/refine/field_linear_refine.hpp" #include "amr/data/field/refine/field_refine_operator.hpp" #include "amr/data/field/refine/field_refiner.hpp" -#include "core/data/grid/gridlayout.hpp" + #include "test_field_refinement_on_hierarchy.hpp" diff --git a/tests/amr/data/field/test_fields_schedules.cpp b/tests/amr/data/field/test_fields_schedules.cpp new file mode 100644 index 000000000..77d6b0441 --- /dev/null +++ b/tests/amr/data/field/test_fields_schedules.cpp @@ -0,0 +1,176 @@ + +#include "phare_core.hpp" +#include "phare/phare.hpp" +#include "core/utilities/meta/meta_utilities.hpp" +#include "amr/utilities/box/amr_box.hpp" + +#include "tests/amr/test_hierarchy_fixtures.hpp" +#include "tests/core/data/gridlayout/test_gridlayout.hpp" + +#include +#include +#include + + +#include "gtest/gtest.h" + +#include +#include + + +static constexpr std::size_t ppc = 100; + +template +struct TestParam +{ + auto constexpr static dim = _dim; + using PhareTypes = PHARE::core::PHARE_Types; + using GridLayout_t = TestGridLayout; + using Box_t = PHARE::core::Box; + using ParticleArray_t = PHARE::core::ParticleArray; + + using Hierarchy_t + = AfullHybridBasicHierarchy()>; +}; + +template +struct FieldScheduleHierarchyTest : public ::testing::Test +{ + using TestParam = TestParam_; + using Hierarchy_t = typename TestParam::Hierarchy_t; + using ResourceManager_t = typename Hierarchy_t::ResourcesManagerT; + auto constexpr static dim = TestParam::dim; + + std::string const configFile + = "test_fields_schedules_inputs/" + std::to_string(dim) + "d_L0.txt"; + Hierarchy_t hierarchy{configFile}; +}; + +using FieldDatas = testing::Types, TestParam<1, 2>, TestParam<1, 3>, + TestParam<2, 1>, TestParam<2, 2>, TestParam<2, 3>>; + + +TYPED_TEST_SUITE(FieldScheduleHierarchyTest, FieldDatas); +namespace PHARE::core +{ + + + +TYPED_TEST(FieldScheduleHierarchyTest, testing_hyhy_schedules) +{ + auto constexpr static dim = TypeParam::dim; + using GridLayout_t = TestFixture::TestParam::GridLayout_t; + using FieldData_t = TestFixture::ResourceManager_t::UserField_t::patch_data_type; + auto constexpr static interp = GridLayout_t::interp_order; + auto constexpr static ghost_cells = GridLayout_t::nbrGhosts(); + + auto lvl0 = this->hierarchy.basicHierarchy->hierarchy()->getPatchLevel(0); + auto& rm = *this->hierarchy.resourcesManagerHybrid; + auto& ions = this->hierarchy.hybridModel->state.ions; + + for (auto& patch : *lvl0) + { + auto const layout = PHARE::amr::layoutFromPatch(*patch); + + auto dataOnPatch = rm.setOnPatch(*patch, ions); + for (auto& pop : ions) + { + auto const domGhostBox = layout.AMRGhostBoxFor(pop.flux()[0].physicalQuantity()); + auto const primal_box = shrink(domGhostBox, ghost_cells); + + for (auto const& bix : layout.AMRToLocal(primal_box)) + { + for (auto& f : pop.flux()) + f(bix) = .2; + pop.density()(bix) = .2; + } + } + } + + this->hierarchy.messenger->fillFluxBorders(ions, *lvl0, 0); + this->hierarchy.messenger->fillDensityBorders(ions, *lvl0, 0); + + + auto proton_flux_x_id = *rm.getID("protons_flux_x"); + + for (auto& patch : *lvl0) + { + auto dataOnPatch = rm.setOnPatch(*patch, ions); + + auto const field_data = SAMRAI_SHARED_PTR_CAST( + patch->getPatchData(proton_flux_x_id)); + + SAMRAI::hier::HierarchyNeighbors const hier_nbrs{ + *this->hierarchy.basicHierarchy->hierarchy(), patch->getPatchLevelNumber(), + patch->getPatchLevelNumber()}; + + auto domainSamBox = patch->getBox(); + auto const domainBox = phare_box_from(domainSamBox); + + auto const neighbors = core::generate( + [](auto const& el) { return phare_box_from(el); }, + hier_nbrs.getSameLevelNeighbors(domainSamBox, patch->getPatchLevelNumber())); + auto const ncells = core::sum_from( + neighbors, [&](auto& el) { return (*(grow(el, 1) * domainBox)).size(); }); + + if (mpi::rank() == 0) + for (auto& pop : ions) + { + if (pop.name() == "protons") + { + auto const layout = PHARE::amr::layoutFromPatch(*patch); + + std::vector vexpected(pop.flux()[0].size(), 1); + auto expected = core::make_array_view(vexpected.data(), pop.flux()[0].shape()); + + // auto const domGhostBox + // = layout.AMRGhostBoxFor(pop.flux()[0].physicalQuantity()); + + // for (auto const& neighbor : neighbors) + // if (auto p2poverlap = (domGhostBox * grow(neighbor, ghost_cells))) + // for (auto const& bix : layout.AMRToLocal(*p2poverlap)) + // expected(*bix) += 1; + + auto const domGhostBox + = layout.AMRGhostBoxFor(pop.flux()[0].physicalQuantity()); + auto const primalDomainBox = [&]() { + auto box = domainBox; + box.upper += 1; + return box; + }(); + auto const noverlap = shrink(primalDomainBox, 1 + (interp > 1)); + + for (auto const ghost_layer : domGhostBox.remove(noverlap)) + for (auto const& neighbor : neighbors) + { + auto nghostbox = grow(neighbor, GridLayout_t::nbrGhosts()); + nghostbox.upper += 1; + if (auto p2poverlap = (nghostbox * ghost_layer)) + for (auto const& bix : layout.AMRToLocal(*p2poverlap)) + expected(*bix) += 1; + } + + if (vexpected != field_data->field.vector()) + field_data->gridLayout.evalOnGhostBox(pop.flux()[0], [&](auto... args) { + PHARE_LOG_LINE_SS((Point{args...}.as_signed() + domGhostBox.lower) + << " " << pop.flux()[0](args...)); + }); + + for (auto const& e : field_data->field) + EXPECT_NE(e, 0); // :/ + } + } + } +} + + + +} // namespace PHARE::core + + +int main(int argc, char** argv) +{ + PHARE::SamraiLifeCycle samsam{argc, argv}; + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tests/amr/data/field/test_fields_schedules_inputs/1d_L0.txt b/tests/amr/data/field/test_fields_schedules_inputs/1d_L0.txt new file mode 100644 index 000000000..9a2bf35ab --- /dev/null +++ b/tests/amr/data/field/test_fields_schedules_inputs/1d_L0.txt @@ -0,0 +1,45 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0) , (64) ] + x_lo = 0.0 + x_up = 1.0 + periodic_dimension = 1 +} +Main +{ + dim = 1 +} +PatchHierarchy +{ + max_levels = 1 + // vector of coarse ratio with dim dimension + + largest_patch_size + { + level_0 = 32 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 32 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10 +} diff --git a/tests/amr/data/field/test_fields_schedules_inputs/1d_config.txt b/tests/amr/data/field/test_fields_schedules_inputs/1d_config.txt new file mode 100644 index 000000000..fedd6f142 --- /dev/null +++ b/tests/amr/data/field/test_fields_schedules_inputs/1d_config.txt @@ -0,0 +1,59 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0) , (64) ] + x_lo = 0.0 + x_up = 1.0 + periodic_dimension = 1 +} +Main +{ + dim = 1 +} +PatchHierarchy +{ + max_levels = 2 + // vector of coarse ratio with dim dimension + ratio_to_coarser + { + level_1 = 2 + } + largest_patch_size + { + level_0 = 64 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 10 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + at_0 + { + cycle = 0 + tag_0 + { + tagging_method = "REFINE_BOXES" + level_0 + { + boxes = [ (10) , (50)] + } + } + } +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10 +} diff --git a/tests/amr/data/field/test_fields_schedules_inputs/2d_L0.txt b/tests/amr/data/field/test_fields_schedules_inputs/2d_L0.txt new file mode 100644 index 000000000..3a0b6cea2 --- /dev/null +++ b/tests/amr/data/field/test_fields_schedules_inputs/2d_L0.txt @@ -0,0 +1,59 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0, 0) , (64, 64) ] + x_lo = 0.0, 0.0 + x_up = 1.0, 1.0 + periodic_dimension = 1, 1 +} +Main +{ + dim = 2 +} +PatchHierarchy +{ + max_levels = 2 + // vector of coarse ratio with dim dimension + ratio_to_coarser + { + level_1 = 2, 2 + } + largest_patch_size + { + level_0 = 64, 64 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 10, 10 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + at_0 + { + cycle = 0 + tag_0 + { + tagging_method = "REFINE_BOXES" + level_0 + { + boxes = [ (20, 20) , (24,24)] + } + } + } +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10,10 +} diff --git a/tests/amr/data/field/test_fields_schedules_inputs/2d_config.txt b/tests/amr/data/field/test_fields_schedules_inputs/2d_config.txt new file mode 100644 index 000000000..3a0b6cea2 --- /dev/null +++ b/tests/amr/data/field/test_fields_schedules_inputs/2d_config.txt @@ -0,0 +1,59 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0, 0) , (64, 64) ] + x_lo = 0.0, 0.0 + x_up = 1.0, 1.0 + periodic_dimension = 1, 1 +} +Main +{ + dim = 2 +} +PatchHierarchy +{ + max_levels = 2 + // vector of coarse ratio with dim dimension + ratio_to_coarser + { + level_1 = 2, 2 + } + largest_patch_size + { + level_0 = 64, 64 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 10, 10 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + at_0 + { + cycle = 0 + tag_0 + { + tagging_method = "REFINE_BOXES" + level_0 + { + boxes = [ (20, 20) , (24,24)] + } + } + } +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10,10 +} diff --git a/tests/amr/data/particles/CMakeLists.txt b/tests/amr/data/particles/CMakeLists.txt index aeb14361e..2d7b670a4 100644 --- a/tests/amr/data/particles/CMakeLists.txt +++ b/tests/amr/data/particles/CMakeLists.txt @@ -2,3 +2,31 @@ add_subdirectory(copy) add_subdirectory(copy_overlap) add_subdirectory(stream_pack) add_subdirectory(refine) + +function(_add_amr_particles_test src_name) + + add_executable(${src_name} ${src_name}.cpp) + + target_include_directories(${src_name} PRIVATE + $ + ${GTEST_INCLUDE_DIRS} + ) + + target_link_directories(${src_name} PRIVATE + ${HDF5_LIBRARY_PATH} + ) + + target_link_libraries(${src_name} PRIVATE + phare_amr + ${GTEST_LIBS} + ) + + add_phare_test(${src_name} ${CMAKE_CURRENT_BINARY_DIR}) + +endfunction(_add_amr_particles_test) + +file(COPY "test_particles_schedules_inputs" DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + +_add_amr_particles_test(test_particles_data) +_add_amr_particles_test(test_particles_schedules) + diff --git a/tests/amr/data/particles/test_particles_data.cpp b/tests/amr/data/particles/test_particles_data.cpp new file mode 100644 index 000000000..d808d0bf2 --- /dev/null +++ b/tests/amr/data/particles/test_particles_data.cpp @@ -0,0 +1,368 @@ + +#include "phare_core.hpp" +#include "phare/phare.hpp" +#include "core/utilities/types.hpp" +#include "amr/data/particles/particles_data.hpp" + +#include "tests/core/data/gridlayout/test_gridlayout.hpp" +#include "tests/core/data/particles/test_particles_fixtures.hpp" + +#include "gtest/gtest.h" + +#include + + +std::size_t constexpr static interp = 1; +std::size_t constexpr static cells = 5; +std::size_t constexpr static ppc = 10; +std::size_t constexpr static ghost_cells = 1; + + +template +struct Patch +{ + auto constexpr static dim = AParticlesData::dim; + using GridLayout_t = AParticlesData::GridLayout_t; + using ParticleArray_t = AParticlesData::ParticleArray_t; + using ParticlesData_t = PHARE::amr::ParticlesData; + auto constexpr static nbPartGhosts = GridLayout_t::nbrParticleGhosts(); + + Patch(auto const& box) + : layout{box} + { + } + + auto static overlap(Patch const& src, Patch& dst) + { + return overlap(src, dst, PHARE::core::ConstArray()); + } + + auto static overlap(Patch const& src, Patch& dst, auto const shift) + { + bool constexpr static overwriteInterior{true}; + + SAMRAI::hier::IntVector shiftVec{dimension}; + for (std::size_t i = 0; i < dim; ++i) + shiftVec[i] = -shift[i]; + + SAMRAI::hier::Box const srcMask{src.data->getGhostBox()}; + SAMRAI::hier::Box const fillBox{dst.data->getGhostBox()}; + SAMRAI::hier::Transformation const transformation{shiftVec}; + return std::shared_ptr{ + std::dynamic_pointer_cast(dst.geom->calculateOverlap( + *src.geom, srcMask, fillBox, overwriteInterior, transformation))}; + } + + SAMRAI::tbox::Dimension static inline const dimension{dim}; + SAMRAI::hier::BlockId static inline const blockId{0}; + SAMRAI::hier::IntVector static inline const ghostVec{dimension, ghost_cells}; + + GridLayout_t const layout; + SAMRAI::hier::Box const domain{PHARE::amr::samrai_box_from(layout.AMRBox())}; + std::shared_ptr const geom{ + std::make_shared(domain, ghostVec)}; + std::unique_ptr data{ + std::make_unique(domain, ghostVec, "name")}; + SAMRAI::hier::Box const mask{data->getGhostBox()}; +}; + +template +auto static make_shift_for(PHARE::core::Box const& box) +{ + int const span = cells * 3; + int const mid = cells * 3 / 2; + + if constexpr (D == 1) + { + auto shifts = PHARE::core::for_N<1, PHARE::core::for_N_R_mode::make_array>( + [&](auto i) { return PHARE::core::Point{0}; }); + PHARE::core::for_N([&](auto i) { + int const shift = box.upper[i] < mid ? 1 : -1; + shifts[i][i] = span * shift; + }); + + return shifts; + } + if constexpr (D == 2) + { + auto shifts = PHARE::core::for_N<3, PHARE::core::for_N_R_mode::make_array>( + [&](auto i) { return PHARE::core::Point{0, 0}; }); + PHARE::core::for_N([&](auto i) { + int const shift = box.upper[i] < mid ? 1 : -1; + shifts[i][i] = span * shift; + }); + + shifts[2] = {shifts[0][0], shifts[1][1]}; + + return shifts; + } + if constexpr (D == 3) + { + auto shifts = PHARE::core::for_N<7, PHARE::core::for_N_R_mode::make_array>( + [&](auto i) { return PHARE::core::Point{0, 0, 0}; }); + PHARE::core::for_N([&](auto i) { + int const shift = box.upper[i] < mid ? 1 : -1; + shifts[i][i] = span * shift; + }); + + shifts[3] = {shifts[0][0], shifts[1][1], 0}; + shifts[4] = {0, shifts[1][1], shifts[2][2]}; + shifts[5] = {shifts[0][0], 0, shifts[2][2]}; + shifts[6] = {shifts[0][0], shifts[1][1], shifts[2][2]}; + + return shifts; + } +} + +template +struct TestParam +{ + auto constexpr static dim = _dim; + using PhareTypes = PHARE::core::PHARE_Types; + using GridLayout_t = TestGridLayout; + using Box_t = PHARE::core::Box; + using ParticleArray_t = PHARE::core::ParticleArray; +}; + + + +template +struct AParticlesDataTest; + +template +struct AParticlesDataTest<1, TestParam> +{ + using Box_t = TestParam::Box_t; + using Patch_t = Patch; + auto constexpr static dim = TestParam::dim; + + AParticlesDataTest() + { + patches.reserve(3); + auto const off = cells - 1; + for (std::uint8_t i = 0; i < 3; ++i) + { + auto const cellx = i * cells; + patches.emplace_back(Box_t{PHARE::core::Point{cellx}, PHARE::core::Point{cellx + off}}); + } + } + + std::vector patches; +}; + + +template +struct AParticlesDataTest<2, TestParam> +{ + using Box_t = TestParam::Box_t; + using Patch_t = Patch; + auto constexpr static dim = TestParam::dim; + + AParticlesDataTest() + { + patches.reserve(3 * 3); + auto const off = cells - 1; + for (std::uint8_t i = 0; i < 3; ++i) + for (std::uint8_t j = 0; j < 3; ++j) + { + auto const cellx = i * cells; + auto const celly = j * cells; + patches.emplace_back(Box_t{PHARE::core::Point{cellx, celly}, + PHARE::core::Point{cellx + off, celly + off}}); + } + } + + std::vector patches; +}; + + +template +struct AParticlesDataTest<3, TestParam> +{ + using Box_t = TestParam::Box_t; + auto constexpr static dim = TestParam::dim; + using Patch_t = Patch; + + AParticlesDataTest() + { + patches.reserve(3 * 3); + auto const off = cells - 1; + for (std::uint8_t i = 0; i < 3; ++i) + for (std::uint8_t j = 0; j < 3; ++j) + for (std::uint8_t k = 0; k < 3; ++k) + { + auto const cellx = i * cells; + auto const celly = j * cells; + auto const cellz = k * cells; + patches.emplace_back( + Box_t{PHARE::core::Point{cellx, celly, cellz}, + PHARE::core::Point{cellx + off, celly + off, cellz + off}}); + } + } + + std::vector patches; +}; + + +template +struct ParticlesDataTest : public ::testing::Test, + public AParticlesDataTest +{ + using Super = AParticlesDataTest; + using Patch_t = Super::Patch_t; + using ParticleArray_t = TestParam::ParticleArray_t; + using GridLayout_t = TestParam::GridLayout_t; + auto constexpr static dim = TestParam::dim; + using Super::patches; + + ParticlesDataTest() + { + assert(!PHARE::core::any_overlaps_in( + patches, [](auto const& patch) { return patch.layout.AMRBox(); })); + + for (auto& patch : patches) + PHARE::core::add_particles_in(patch.data->domainParticles, patch.layout.AMRBox(), ppc); + } + + auto static overlap(Patch_t const& src, Patch_t& dst) { return Patch_t::overlap(src, dst); } + auto static overlap(Patch_t const& src, Patch_t& dst, auto const shift) + { + return Patch_t::overlap(src, dst, shift); + } + + auto mid_pid() const { return int(((patches.size() + 1) / 2) - 1); } +}; + + +using ParticlesDatas = testing::Types, TestParam<2> /*,TestParam<3>*/>; + + +TYPED_TEST_SUITE(ParticlesDataTest, ParticlesDatas); + +namespace PHARE::core +{ +TYPED_TEST(ParticlesDataTest, copyWorks) +{ + auto constexpr static dim = TestFixture::dim; + std::size_t const pid = this->mid_pid(); + auto& dst = this->patches[pid]; + + for (std::size_t i = 0; i < pid; ++i) + dst.data->copy(*this->patches[i].data); + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + dst.data->copy(*this->patches[i].data); + + std::size_t const expected + = (std::pow(cells + (ghost_cells * 2), dim) - std::pow(cells, dim)) * ppc; + EXPECT_EQ(expected, dst.data->patchGhostParticles.size()); +} + +TYPED_TEST(ParticlesDataTest, copyAtPeriodicBoundaryWorks) +{ + auto constexpr static dim = TestFixture::dim; + std::size_t const pid = 0; + auto& dst = this->patches[pid]; + auto const dst_ghostbox = grow(dst.layout.AMRBox(), 1); + + // non-periodic neighbours + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + if (auto const overlap = dst_ghostbox * this->patches[i].layout.AMRBox()) + dst.data->copy(*this->patches[i].data); + + // // periodic neighbours + for (auto const& shifter : make_shift_for(dst.layout.AMRBox())) + { + auto const shift_box = shift(dst.layout.AMRBox(), shifter); + auto const ghostbox = grow(shift_box, 1); + + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + if (auto const overlap = ghostbox * this->patches[i].layout.AMRBox()) + { + auto const celloverlap = TestFixture::overlap(this->patches[i], dst, shifter); + dst.data->copy(*this->patches[i].data, *celloverlap); + } + } + + std::size_t const expected + = (std::pow(cells + (ghost_cells * 2), dim) - std::pow(cells, dim)) * ppc; + EXPECT_EQ(expected, dst.data->patchGhostParticles.size()); +} + +TYPED_TEST(ParticlesDataTest, packWorks) +{ + auto constexpr static dim = TestFixture::dim; + std::size_t const pid = this->mid_pid(); + auto& dst = this->patches[pid]; + + auto for_neighbour = [&](auto i) { + auto const cellOverlap = TestFixture::overlap(this->patches[i], dst); + auto const& src = this->patches[i]; + SAMRAI::tbox::MessageStream particlesWriteStream; + src.data->packStream(particlesWriteStream, *cellOverlap); + SAMRAI::tbox::MessageStream particlesReadStream{particlesWriteStream.getCurrentSize(), + SAMRAI::tbox::MessageStream::Read, + particlesWriteStream.getBufferStart()}; + + dst.data->unpackStream(particlesReadStream, *cellOverlap); + }; + + for (std::size_t i = 0; i < pid; ++i) + for_neighbour(i); + + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + for_neighbour(i); + + std::size_t const expected + = (std::pow(cells + (ghost_cells * 2), dim) - std::pow(cells, dim)) * ppc; + EXPECT_EQ(expected, dst.data->patchGhostParticles.size()); +} + +TYPED_TEST(ParticlesDataTest, packAtPeriodicBoundaryWorks) +{ + auto constexpr static dim = TestFixture::dim; + std::size_t const pid = 0; + auto& dst = this->patches[pid]; + auto const dst_ghostbox = grow(dst.layout.AMRBox(), 1); + + auto for_neighbour = [&](auto i, auto shift) { + auto const cellOverlap = TestFixture::overlap(this->patches[i], dst, shift); + auto const& src = this->patches[i]; + SAMRAI::tbox::MessageStream particlesWriteStream; + src.data->packStream(particlesWriteStream, *cellOverlap); + SAMRAI::tbox::MessageStream particlesReadStream{particlesWriteStream.getCurrentSize(), + SAMRAI::tbox::MessageStream::Read, + particlesWriteStream.getBufferStart()}; + + dst.data->unpackStream(particlesReadStream, *cellOverlap); + }; + + // non-periodic neighbours + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + if (auto const overlap = dst_ghostbox * this->patches[i].layout.AMRBox()) + for_neighbour(i, PHARE::core::ConstArray()); + + // periodic neighbours + for (auto const& shifter : make_shift_for(dst.layout.AMRBox())) + { + auto const shift_box = shift(dst.layout.AMRBox(), shifter); + auto const ghostbox = grow(shift_box, 1); + + for (std::size_t i = pid + 1; i < this->patches.size(); ++i) + if (auto const overlap = ghostbox * this->patches[i].layout.AMRBox()) + for_neighbour(i, shifter); + } + + std::size_t const expected + = (std::pow(cells + (ghost_cells * 2), dim) - std::pow(cells, dim)) * ppc; + EXPECT_EQ(expected, dst.data->patchGhostParticles.size()); +} + +} // namespace PHARE::core + + +int main(int argc, char** argv) +{ + PHARE::SamraiLifeCycle samsam{argc, argv}; + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tests/amr/data/particles/test_particles_schedules.cpp b/tests/amr/data/particles/test_particles_schedules.cpp new file mode 100644 index 000000000..9592d8c38 --- /dev/null +++ b/tests/amr/data/particles/test_particles_schedules.cpp @@ -0,0 +1,124 @@ + +#include "phare_core.hpp" +#include "phare/phare.hpp" +#include +#include +#include "core/utilities/meta/meta_utilities.hpp" +#include + +#include "tests/core/data/gridlayout/test_gridlayout.hpp" +#include "tests/core/data/particles/test_particles_fixtures.hpp" +#include "tests/amr/test_hierarchy_fixtures.hpp" + +#include "gtest/gtest.h" + +#include +#include + +static constexpr std::size_t ppc = 10; + +template +struct TestParam +{ + auto constexpr static dim = _dim; + auto constexpr static interp = _interp; + using PhareTypes = PHARE::core::PHARE_Types; + using GridLayout_t = TestGridLayout; + using Box_t = PHARE::core::Box; + using ParticleArray_t = PHARE::core::ParticleArray; + + using Hierarchy_t + = AfullHybridBasicHierarchy()>; +}; + +template +struct ParticleScheduleHierarchyTest : public ::testing::Test +{ + auto constexpr static dim = TestParam::dim; + using Hierarchy_t = TestParam::Hierarchy_t; + using GridLayout_t = TestParam::GridLayout_t; + using ResourceManager_t = Hierarchy_t::ResourcesManagerT; + using DomainGhostPartRefinerPool + = RefinerPool; + + std::string configFile = "test_particles_schedules_inputs/" + std::to_string(dim) + "d_L0.txt"; + Hierarchy_t hierarchy{configFile}; +}; + +using ParticlesDatas = testing::Types, TestParam<1, 2>, TestParam<1, 3>, + TestParam<2, 1>, TestParam<2, 2>, TestParam<2, 3>>; + + +TYPED_TEST_SUITE(ParticleScheduleHierarchyTest, ParticlesDatas); +namespace PHARE::core +{ + + +TYPED_TEST(ParticleScheduleHierarchyTest, testing_inject_ghost_layer) +{ + using GridLayout_t = TypeParam::GridLayout_t; + auto constexpr static dim = TypeParam::dim; + auto constexpr static ghost_cells = GridLayout_t::nbrParticleGhosts(); + + auto lvl0 = this->hierarchy.basicHierarchy->hierarchy()->getPatchLevel(0); + auto& rm = *this->hierarchy.resourcesManagerHybrid; + auto& ions = this->hierarchy.hybridModel->state.ions; + + for (auto& patch : *lvl0) + { + auto dataOnPatch = rm.setOnPatch(*patch, ions); + for (auto& pop : ions) + { + pop.domainParticles().clear(); + EXPECT_EQ(pop.domainParticles().size(), 0); + + GridLayout_t const layout{phare_box_from(patch->getBox())}; + auto const ghostBox = grow(layout.AMRBox(), ghost_cells); + for (auto const& box : ghostBox.remove(layout.AMRBox())) + core::add_particles_in(pop.patchGhostParticles(), box, ppc); + } + rm.setTime(ions, *patch, 1); + } + + this->hierarchy.messenger->fillIonGhostParticles(ions, *lvl0, 0); + + for (auto& patch : *lvl0) + { + auto dataOnPatch = rm.setOnPatch(*patch, ions); + + SAMRAI::hier::HierarchyNeighbors const hier_nbrs{ + *this->hierarchy.basicHierarchy->hierarchy(), patch->getPatchLevelNumber(), + patch->getPatchLevelNumber()}; + + auto domainSamBox = patch->getBox(); + auto const domainBox = phare_box_from(domainSamBox); + + auto const neighbors = core::generate( + [](auto const& el) { return phare_box_from(el); }, + hier_nbrs.getSameLevelNeighbors(domainSamBox, patch->getPatchLevelNumber())); + auto const ncells = core::sum_from( + neighbors, [&](auto& el) { return (*(grow(el, ghost_cells) * domainBox)).size(); }); + + for (auto& pop : ions) + { + EXPECT_EQ(pop.domainParticles().size(), ncells * ppc); + + for (auto const& p : pop.domainParticles()) + { + EXPECT_TRUE(isIn(p, domainBox)); + } + } + } +} + + + +} // namespace PHARE::core + + +int main(int argc, char** argv) +{ + PHARE::SamraiLifeCycle samsam{argc, argv}; + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tests/amr/data/particles/test_particles_schedules_inputs/1d_L0.txt b/tests/amr/data/particles/test_particles_schedules_inputs/1d_L0.txt new file mode 100644 index 000000000..9a2bf35ab --- /dev/null +++ b/tests/amr/data/particles/test_particles_schedules_inputs/1d_L0.txt @@ -0,0 +1,45 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0) , (64) ] + x_lo = 0.0 + x_up = 1.0 + periodic_dimension = 1 +} +Main +{ + dim = 1 +} +PatchHierarchy +{ + max_levels = 1 + // vector of coarse ratio with dim dimension + + largest_patch_size + { + level_0 = 32 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 32 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10 +} diff --git a/tests/amr/data/particles/test_particles_schedules_inputs/2d_L0.txt b/tests/amr/data/particles/test_particles_schedules_inputs/2d_L0.txt new file mode 100644 index 000000000..cd1f951c4 --- /dev/null +++ b/tests/amr/data/particles/test_particles_schedules_inputs/2d_L0.txt @@ -0,0 +1,48 @@ +CartesianGridGeometry +{ + domain_boxes = [ (0, 0) , (64, 64) ] + x_lo = 0.0, 0.0 + x_up = 1.0, 1.0 + periodic_dimension = 1, 1 +} +Main +{ + dim = 2 +} +PatchHierarchy +{ + max_levels = 2 + // vector of coarse ratio with dim dimension + ratio_to_coarser + { + level_1 = 2, 2 + } + largest_patch_size + { + level_0 = 64, 64 + // All finer level will use same values in as level_0 + } + smallest_patch_size + { + level_0 = 10, 10 + // All finer level will use same values in as level_0 + } +} +LoadBalancer +{ +} +StandardTagAndInitialize +{ + +} +GriddingAlgorithm +{ +} +TimeRefinementIntegrator +{ + start_time = 0.0 + end_time = 1.0 + max_integrator_steps = 20 + regrid_interval = 1 + tag_buffer = 10,10 +} diff --git a/tests/amr/messengers/input/input_1d_ratio_2.txt b/tests/amr/messengers/input/input_1d_ratio_2.txt index b87f74e1d..fedd6f142 100644 --- a/tests/amr/messengers/input/input_1d_ratio_2.txt +++ b/tests/amr/messengers/input/input_1d_ratio_2.txt @@ -55,5 +55,5 @@ TimeRefinementIntegrator end_time = 1.0 max_integrator_steps = 20 regrid_interval = 1 - tag_buffer = 10,10 + tag_buffer = 10 } diff --git a/tests/amr/messengers/test_messengers.cpp b/tests/amr/messengers/test_messengers.cpp index d9ba9eefd..e6d858580 100644 --- a/tests/amr/messengers/test_messengers.cpp +++ b/tests/amr/messengers/test_messengers.cpp @@ -260,16 +260,12 @@ class HybridMessengers : public ::testing::Test auto hybridModel = std::make_unique(createDict(), resourcesManagerHybrid); auto mhdModel = std::make_unique(resourcesManagerMHD); - hybridModel->resourcesManager->registerResources(hybridModel->state.electromag); - hybridModel->resourcesManager->registerResources(hybridModel->state.ions); - - mhdModel->resourcesManager->registerResources(mhdModel->state.B); - mhdModel->resourcesManager->registerResources(mhdModel->state.V); + hybridModel->resourcesManager->registerResources(hybridModel->state); + mhdModel->resourcesManager->registerResources(mhdModel->state); models.push_back(std::move(mhdModel)); models.push_back(std::move(hybridModel)); - auto mhdmhdMessenger{ messengerFactory.create("MHDModel-MHDModel", *models[0], *models[0], 0)}; auto mhdHybridMessenger{ @@ -308,6 +304,7 @@ TEST_F(HybridMessengers, receiveQuantitiesFromHybridModelsOnlyAndHybridSolver) { auto hybridSolver = std::make_unique>( createDict()["simulation"]["algo"]); + hybridSolver->registerResources(*models[1]); MessengerRegistration::registerQuantities(*messengers[2], *models[1], *models[1], *hybridSolver); } @@ -477,7 +474,6 @@ struct AfullHybridBasicHierarchy std::make_shared>(std::move(hybhybStrat))}; std::shared_ptr> solver{ - std::make_shared>( createDict()["simulation"]["algo"])}; diff --git a/tests/amr/tagging/test_tagging.cpp b/tests/amr/tagging/test_tagging.cpp index 7474303ec..388b2d4bc 100644 --- a/tests/amr/tagging/test_tagging.cpp +++ b/tests/amr/tagging/test_tagging.cpp @@ -190,7 +190,7 @@ struct TestTagger : public ::testing::Test std::vector tags; TestTagger() - : layout{TestGridLayout::make(20)} + : layout{TestGridLayout::make(20u)} , B{"EM_B", layout, HybridQuantity::Vector::B} , E{"EM_E", layout, HybridQuantity::Vector::E} , model{createDict()} diff --git a/tests/amr/test_hierarchy_fixtures.hpp b/tests/amr/test_hierarchy_fixtures.hpp new file mode 100644 index 000000000..73ada818e --- /dev/null +++ b/tests/amr/test_hierarchy_fixtures.hpp @@ -0,0 +1,298 @@ +#ifndef PHARE_TEST_AMR_HIERARCHY_FIXTURES_HPP +#define PHARE_TEST_AMR_HIERARCHY_FIXTURES_HPP + +#include "phare_solver.hpp" +#include "amr/types/amr_types.hpp" + +#include "tests/initializer/init_functions.hpp" +#include "tests/amr/messengers/test_integrator_strat.hpp" +#include "tests/amr/messengers/test_messenger_tag_strategy.hpp" + + +template +using InitFunctionT = PHARE::initializer::InitFunction; + + +template +struct DimDict +{ +}; + +template<> +struct DimDict<1> +{ + static constexpr uint8_t dim = 1; + static void set(PHARE::initializer::PHAREDict& dict) + { + using namespace PHARE::initializer::test_fn::func_1d; // density/etc are here + + dict["ions"]["pop0"]["particle_initializer"]["density"] + = static_cast>(density); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_x"] + = static_cast>(vx); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_y"] + = static_cast>(vy); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_z"] + = static_cast>(vz); + + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_x"] + = static_cast>(vthx); + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_y"] + = static_cast>(vthy); + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_z"] + = static_cast>(vthz); + + dict["ions"]["pop1"]["particle_initializer"]["density"] + = static_cast>(density); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_x"] + = static_cast>(vx); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_y"] + = static_cast>(vy); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_z"] + = static_cast>(vz); + + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_x"] + = static_cast>(vthx); + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_y"] + = static_cast>(vthy); + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_z"] + = static_cast>(vthz); + + dict["electromag"]["magnetic"]["initializer"]["x_component"] + = static_cast>(bx); + dict["electromag"]["magnetic"]["initializer"]["y_component"] + = static_cast>(by); + dict["electromag"]["magnetic"]["initializer"]["z_component"] + = static_cast>(bz); + + dict["simulation"]["algo"]["ion_updater"]["pusher"]["name"] = std::string{"modified_boris"}; + } +}; + + + + +template<> +struct DimDict<2> +{ + static constexpr uint8_t dim = 2; + static void set(PHARE::initializer::PHAREDict& dict) + { + using namespace PHARE::initializer::test_fn::func_2d; // density/etc are here + dict["simulation"]["algo"]["pusher"]["name"] = std::string{"modified_boris"}; + + dict["ions"]["pop0"]["particle_initializer"]["density"] + = static_cast>(density); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_x"] + = static_cast>(vx); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_y"] + = static_cast>(vy); + + dict["ions"]["pop0"]["particle_initializer"]["bulk_velocity_z"] + = static_cast>(vz); + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_x"] + = static_cast>(vthx); + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_y"] + = static_cast>(vthy); + + dict["ions"]["pop0"]["particle_initializer"]["thermal_velocity_z"] + = static_cast>(vthz); + + dict["ions"]["pop1"]["particle_initializer"]["density"] + = static_cast>(density); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_x"] + = static_cast>(vx); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_y"] + = static_cast>(vy); + + dict["ions"]["pop1"]["particle_initializer"]["bulk_velocity_z"] + = static_cast>(vz); + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_x"] + = static_cast>(vthx); + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_y"] + = static_cast>(vthy); + + dict["ions"]["pop1"]["particle_initializer"]["thermal_velocity_z"] + = static_cast>(vthz); + + dict["electromag"]["magnetic"]["initializer"]["x_component"] + = static_cast>(bx); + dict["electromag"]["magnetic"]["initializer"]["y_component"] + = static_cast>(by); + dict["electromag"]["magnetic"]["initializer"]["z_component"] + = static_cast>(bz); + + dict["simulation"]["algo"]["ion_updater"]["pusher"]["name"] = std::string{"modified_boris"}; + } +}; + +template +PHARE::initializer::PHAREDict createDict() +{ + PHARE::initializer::PHAREDict dict; + + dict["simulation"]["algo"]["pusher"]["name"] = std::string{"modified_boris"}; + + dict["simulation"]["algo"]["ohm"]["resistivity"] = 0.0; + dict["simulation"]["algo"]["ohm"]["hyper_resistivity"] = 0.0001; + + dict["ions"]["nbrPopulations"] = std::size_t{2}; + dict["ions"]["pop0"]["name"] = std::string{"protons"}; + dict["ions"]["pop0"]["mass"] = 1.; + dict["ions"]["pop0"]["particle_initializer"]["name"] = std::string{"maxwellian"}; + + dict["ions"]["pop0"]["particle_initializer"]["nbr_part_per_cell"] = int{100}; + dict["ions"]["pop0"]["particle_initializer"]["charge"] = -1.; + dict["ions"]["pop0"]["particle_initializer"]["basis"] = std::string{"cartesian"}; + + dict["ions"]["pop1"]["name"] = std::string{"alpha"}; + dict["ions"]["pop1"]["mass"] = 1.; + dict["ions"]["pop1"]["particle_initializer"]["name"] = std::string{"maxwellian"}; + + dict["ions"]["pop1"]["particle_initializer"]["nbr_part_per_cell"] = int{100}; + dict["ions"]["pop1"]["particle_initializer"]["charge"] = -1.; + dict["ions"]["pop1"]["particle_initializer"]["basis"] = std::string{"cartesian"}; + + dict["electromag"]["name"] = std::string{"EM"}; + dict["electromag"]["electric"]["name"] = std::string{"E"}; + dict["electromag"]["magnetic"]["name"] = std::string{"B"}; + + dict["electrons"]["pressure_closure"]["name"] = std::string{"isothermal"}; + dict["electrons"]["pressure_closure"]["Te"] = 0.12; + + + DimDict::set(dict); + + return dict; +} + + +class BasicHierarchy +{ +public: + BasicHierarchy( + int const ratio, short unsigned const dimension, + SAMRAI::mesh::StandardTagAndInitStrategy* tagStrat, + std::shared_ptr const& integratorStrat, + std::string const& inputConfigFile) + : inputDatabase_{SAMRAI::tbox::InputManager::getManager()->parseInputFile(inputConfigFile)} + , patchHierarchyDatabase_{inputDatabase_->getDatabase("PatchHierarchy")} + , dimension_{dimension} + + , gridGeometry_{std::make_shared( + dimension_, "cartesian", inputDatabase_->getDatabase("CartesianGridGeometry"))} + , hierarchy_{std::make_shared("PatchHierarchy", gridGeometry_, + patchHierarchyDatabase_)} + , loadBalancer_{std::make_shared( + dimension_, "LoadBalancer", inputDatabase_->getDatabase("LoadBalancer"))} + , standardTag_{std::make_shared( + "StandardTagAndInitialize", tagStrat, + inputDatabase_->getDatabase("StandardTagAndInitialize"))} + , clustering_{std::make_shared( + dimension_, inputDatabase_->getDatabaseWithDefault( + "BergerRigoutsos", std::shared_ptr()))} + , gridding{std::make_shared( + hierarchy_, "GriddingAlgorithm", inputDatabase_->getDatabase("GriddingAlgorithm"), + standardTag_, clustering_, loadBalancer_)} + , integrator{std::make_shared( + "TimeRefinementIntegrator", inputDatabase_->getDatabase("TimeRefinementIntegrator"), + hierarchy_, integratorStrat, gridding)} + { + integrator->initializeHierarchy(); + } + + + SAMRAI::hier::PatchHierarchy& getHierarchy() { return *hierarchy_; } + auto& hierarchy() { return hierarchy_; } + +private: + std::shared_ptr inputDatabase_; + std::shared_ptr patchHierarchyDatabase_; + + SAMRAI::tbox::Dimension dimension_; + + std::shared_ptr gridGeometry_; + + std::shared_ptr hierarchy_; + std::shared_ptr loadBalancer_; + + std::shared_ptr standardTag_; + std::shared_ptr clustering_; + +public: + std::shared_ptr gridding; + std::shared_ptr integrator; +}; + + +template +struct AfullHybridBasicHierarchy +{ + using Phare_core_Types = PHARE::core::PHARE_Types; + using Phare_amr_Types = PHARE::amr::PHARE_Types; + using Phare_solver_Types = PHARE::solver::PHARE_Types; + using HybridModelT = Phare_solver_Types::HybridModel_t; + using MHDModelT = Phare_solver_Types::MHDModel_t; + using ResourcesManagerT = typename HybridModelT::resources_manager_type; + + int const firstHybLevel{0}; + int const ratio{2}; + + using HybridHybridT + = HybridHybridMessengerStrategy; + + AfullHybridBasicHierarchy(std::string const& inputConfigFile) + { + hybridModel->resourcesManager->registerResources(hybridModel->state); + + solver->registerResources(*hybridModel); + + tagStrat = std::make_shared>(hybridModel, solver, messenger); + integrator = std::make_shared(); + basicHierarchy = std::make_shared(ratio, dim, tagStrat.get(), integrator, + inputConfigFile); + } + + PHARE::initializer::PHAREDict dict{createDict()}; + SAMRAI::tbox::SAMRAI_MPI mpi{MPI_COMM_WORLD}; + + std::shared_ptr resourcesManagerHybrid{ + std::make_shared()}; + + std::shared_ptr hybridModel{ + std::make_shared(dict, resourcesManagerHybrid)}; + + std::shared_ptr> messenger{ + std::make_shared>( + std::make_unique(resourcesManagerHybrid, firstHybLevel))}; + + std::shared_ptr> solver{ + std::make_shared>(dict["simulation"]["algo"])}; + + std::shared_ptr> tagStrat; + std::shared_ptr integrator; + std::shared_ptr basicHierarchy; +}; + +#endif /*PHARE_TEST_AMR_HIERARCHY_FIXTURES_HPP*/ diff --git a/tests/core/data/electromag/test_electromag_fixtures.hpp b/tests/core/data/electromag/test_electromag_fixtures.hpp index 462895498..7ab9f8472 100644 --- a/tests/core/data/electromag/test_electromag_fixtures.hpp +++ b/tests/core/data/electromag/test_electromag_fixtures.hpp @@ -34,6 +34,15 @@ class UsableElectromag : public Electromag> _set(); } + template + UsableElectromag(GridLayout const& layout, initializer::PHAREDict const& dict) + : Super{dict} + , E{"EM_E", layout, HybridQuantity::Vector::E} + , B{"EM_B", layout, HybridQuantity::Vector::B} + { + _set(); + } + UsableElectromag(UsableElectromag&& that) : Super{std::forward(that)} , E{std::move(that.E)} diff --git a/tests/core/data/field/test_field.hpp b/tests/core/data/field/test_field.hpp index a085ba1bb..8ca1ed8be 100644 --- a/tests/core/data/field/test_field.hpp +++ b/tests/core/data/field/test_field.hpp @@ -48,12 +48,14 @@ void test_fields(GridLayout const& layout, Field const& field0, T1 const& field1 EXPECT_EQ(field0.shape(), field1.shape()); + for (std::size_t i = 0; i < field0.size(); ++i) { auto const& v0 = field0.data()[i]; auto const& v1 = field1.data()[i]; if (std::isnan(v0) || std::isnan(v1)) - throw std::runtime_error("This 1dfield should not be NaN index " + std::to_string(i)); + throw std::runtime_error(field0.name() + " should not be NaN index " + + std::to_string(i)); EXPECT_FLOAT_EQ(v0, v1); } } diff --git a/tests/core/data/gridlayout/test_gridlayout.hpp b/tests/core/data/gridlayout/test_gridlayout.hpp index d31f630a6..004db6e85 100644 --- a/tests/core/data/gridlayout/test_gridlayout.hpp +++ b/tests/core/data/gridlayout/test_gridlayout.hpp @@ -1,9 +1,11 @@ #ifndef TESTS_CORE_DATA_GRIDLAYOUT_TEST_GRIDLAYOUT_HPP #define TESTS_CORE_DATA_GRIDLAYOUT_TEST_GRIDLAYOUT_HPP -#include "core/data/grid/gridlayout.hpp" -#include "core/data/grid/gridlayoutimplyee.hpp" -#include "core/utilities/types.hpp" +#include "core/utilities/box/box.hpp" +#include "core/utilities/point/point.hpp" + +#include +#include template @@ -14,14 +16,30 @@ class TestGridLayout : public GridLayout TestGridLayout() = default; - TestGridLayout(std::uint32_t cells) + TestGridLayout(std::uint32_t const& cells) : GridLayout{PHARE::core::ConstArray(1.0 / cells), PHARE::core::ConstArray(cells), PHARE::core::Point{PHARE::core::ConstArray(0)}} { } - auto static make(std::uint32_t cells) { return TestGridLayout{cells}; } + + TestGridLayout(PHARE::core::Box const& amrbox) + : GridLayout{PHARE::core::ConstArray(.1), + amrbox.shape().template toArray(), + PHARE::core::Point{PHARE::core::ConstArray(0)}, + amrbox} + { + } + + template + TestGridLayout(Args&&... args) + requires(std::is_constructible_v) + : GridLayout{args...} + { + } + + auto static make(auto const& cells) { return TestGridLayout{cells}; } }; #endif /*TESTS_CORE_DATA_GRIDLAYOUT_TEST_GRIDLAYOUT_HPP*/ diff --git a/tests/core/data/ion_population/test_ion_population_fixtures.hpp b/tests/core/data/ion_population/test_ion_population_fixtures.hpp index e26ee34e3..11b34d5bb 100644 --- a/tests/core/data/ion_population/test_ion_population_fixtures.hpp +++ b/tests/core/data/ion_population/test_ion_population_fixtures.hpp @@ -37,29 +37,56 @@ struct UsableIonsDefaultTypes using IonPopulation_t = IonPopulation; }; +auto inline pop_dict(std::string const& name, std::size_t const ppc = 0) +{ + initializer::PHAREDict popdict; + popdict["name"] = name; + popdict["mass"] = 1.0; + auto particle_initializer = initializer::PHAREDict{}; + particle_initializer["nbr_part_per_cell"] = static_cast(ppc); + particle_initializer["charge"] = 1.; + particle_initializer["basis"] = std::string{"cartesian"}; + popdict["particle_initializer"] = particle_initializer; + return popdict; +} template class UsableIonsPopulation : public _defaults::IonPopulation_t { - using GridLayout_t = typename _defaults::GridLayout_t; - using VecField_t = typename _defaults::VecField_t; - using TensorField_t = typename _defaults::TensorField_t; - using ParticleArray_t = typename _defaults::ParticleArray_t; + using GridLayout_t = _defaults::GridLayout_t; + using VecField_t = _defaults::VecField_t; + using TensorField_t = _defaults::TensorField_t; + using ParticleArray_t = _defaults::ParticleArray_t; using Super = IonPopulation; + void set() + { + auto&& [_F, _M, _d, _particles] = Super::getCompileTimeResourcesViewList(); + F.set_on(_F); + M.set_on(_M); + _d.setBuffer(&rho); + _particles.setBuffer(&particles.pack()); + } + public: UsableIonsPopulation(initializer::PHAREDict const& dict, GridLayout_t const& layout) : Super{dict} , rho{this->name() + "_rho", layout, HybridQuantity::Scalar::rho} , F{this->name() + "_flux", layout, HybridQuantity::Vector::V} , M{this->name() + "_momentumTensor", layout, HybridQuantity::Tensor::M} - , particles{this->name(), layout.AMRBox()} + , particles{this->name(), grow(layout.AMRBox(), GridLayout_t::nbrParticleGhosts() + 1)} { - auto&& [_F, _M, _d, _particles] = Super::getCompileTimeResourcesViewList(); - F.set_on(_F); - M.set_on(_M); - _d.setBuffer(&rho); - _particles.setBuffer(&particles.pack()); + set(); + } + + UsableIonsPopulation(UsableIonsPopulation const& that) + : Super{pop_dict(that.name())} + , rho{that.rho} + , F{that.F} + , M{that.M} + , particles{that.particles} + { + set(); } @@ -68,9 +95,9 @@ class UsableIonsPopulation : public _defaults::IonPopulation_t auto& operator*() { return view(); } auto& operator*() const { return view(); } - typename _defaults::Grid_t rho; - typename _defaults::UsableVecField_t F; - typename _defaults::UsableTensorField_t M; + _defaults::Grid_t rho; + _defaults::UsableVecField_t F; + _defaults::UsableTensorField_t M; UsableParticlesPopulation particles; }; @@ -79,50 +106,77 @@ template class UsableIons : public Ions { - using GridLayout_t = typename _defaults::GridLayout_t; + using GridLayout_t = _defaults::GridLayout_t; using Super = Ions; - auto static pop_dict(std::string name) - { - initializer::PHAREDict popdict; - popdict["name"] = name; - popdict["mass"] = 1.0; - popdict["particle_initializer"] = initializer::PHAREDict{}; - return popdict; - } + template - auto static super(PopNames const& pop_names) + auto static super(PopNames const& pop_names, std::size_t const ppc) { initializer::PHAREDict dict; dict["nbrPopulations"] = pop_names.size(); for (std::size_t i = 0; i < pop_names.size(); ++i) - dict["pop" + std::to_string(i)] = pop_dict(pop_names[i]); + dict["pop" + std::to_string(i)] = pop_dict(pop_names[i], ppc); return dict; } -public: - UsableIons(GridLayout_t const& layout, std::vector const& pop_names) - : Super{super(pop_names)} - , rho{"rho", HybridQuantity::Scalar::rho, layout.allocSize(HybridQuantity::Scalar::rho)} - , Vi{"bulkVel", layout, HybridQuantity::Vector::V} - , M{"momentumTensor", layout, HybridQuantity::Tensor::M} + + auto static super(Super const supe) + { + initializer::PHAREDict dict; + dict["nbrPopulations"] = supe.size(); + for (std::size_t i = 0; i < supe.size(); ++i) + dict["pop" + std::to_string(i)] = pop_dict(supe[i].name()); + return dict; + } + + void set() { auto&& [_bV, _M, _d, _md] = Super::getCompileTimeResourcesViewList(); Vi.set_on(_bV); M.set_on(_M); _d.setBuffer(&rho); - for (std::size_t i = 0; i < pop_names.size(); ++i) - populations.emplace_back(pop_dict(pop_names[i]), layout); - Super::getRunTimeResourcesViewList().clear(); + auto& super_pops = Super::getRunTimeResourcesViewList(); + super_pops.clear(); for (auto& pop : populations) - Super::getRunTimeResourcesViewList().emplace_back(*pop); + super_pops.emplace_back(*pop); + } + +public: + UsableIons(GridLayout_t const& layout, initializer::PHAREDict const& dict) + : Super{dict} + , rho{"rho", HybridQuantity::Scalar::rho, layout.allocSize(HybridQuantity::Scalar::rho)} + , Vi{"bulkVel", layout, HybridQuantity::Vector::V} + , M{"momentumTensor", layout, HybridQuantity::Tensor::M} + { + auto& super_pops = Super::getRunTimeResourcesViewList(); + populations.reserve(super_pops.size()); + for (std::size_t i = 0; i < super_pops.size(); ++i) + populations.emplace_back(dict["pop" + std::to_string(i)], layout); + set(); + } + + UsableIons(GridLayout_t const& layout, std::vector const& pop_names, + std::size_t const ppc = 0) + : UsableIons{layout, super(pop_names, ppc)} + { + } + + UsableIons(GridLayout_t const& layout, std::string const& pop_name, std::size_t const ppc = 0) + : UsableIons{layout, std::vector{pop_name}, ppc} + { } - UsableIons(GridLayout_t const& layout, std::string const& pop_name) - : UsableIons{layout, std::vector{pop_name}} + UsableIons(UsableIons const& that) + : Super(super(*that)) + , rho{that.rho} + , Vi{that.Vi} + , M{that.M} + , populations{that.populations} { + set(); } Super& view() { return *this; } @@ -130,9 +184,9 @@ class UsableIons auto& operator*() { return view(); } auto& operator*() const { return view(); } - typename _defaults::Grid_t rho; - typename _defaults::UsableVecField_t Vi; - typename _defaults::UsableTensorField_t M; + _defaults::Grid_t rho; + _defaults::UsableVecField_t Vi; + _defaults::UsableTensorField_t M; std::vector> populations; }; diff --git a/tests/core/data/particles/test_particle_array_consistency.cpp b/tests/core/data/particles/test_particle_array_consistency.cpp index 4a49ed8db..d3c0745ff 100644 --- a/tests/core/data/particles/test_particle_array_consistency.cpp +++ b/tests/core/data/particles/test_particle_array_consistency.cpp @@ -12,8 +12,8 @@ namespace PHARE::core { -std::size_t static constexpr cells = 3; -std::size_t static constexpr ppc = 10; +std::uint32_t static constexpr cells = 3; +std::size_t static constexpr ppc = 10; template diff --git a/tests/core/data/particles/test_particles_fixtures.hpp b/tests/core/data/particles/test_particles_fixtures.hpp index cba9887e7..429b43e1c 100644 --- a/tests/core/data/particles/test_particles_fixtures.hpp +++ b/tests/core/data/particles/test_particles_fixtures.hpp @@ -1,6 +1,9 @@ #ifndef PHARE_TEST_CORE_DATA_PARTICLES_TEST_PARTICLES_FIXTURES_HPP #define PHARE_TEST_CORE_DATA_PARTICLES_TEST_PARTICLES_FIXTURES_HPP +#include +#include + #include namespace PHARE::core @@ -16,22 +19,59 @@ struct UsableParticlesPopulation { } + UsableParticlesPopulation(UsableParticlesPopulation const& that) + : name{that.name} + , domain_particles{that.domain_particles} + , patch_ghost_particles{that.patch_ghost_particles} + , level_ghost_particles{that.level_ghost_particles} + , levelGhostParticlesOld{that.levelGhostParticlesOld} + , levelGhostParticlesNew{that.levelGhostParticlesNew} + { + } + auto& pack() { return particles_pack; } auto& pack() const { return particles_pack; } std::string name; ParticleArray_t domain_particles; - ParticleArray_t patch_ghost_particles = domain_particles; - ParticleArray_t level_ghost_particles = domain_particles; + ParticleArray_t patch_ghost_particles = domain_particles; + ParticleArray_t level_ghost_particles = domain_particles; + ParticleArray_t levelGhostParticlesOld = domain_particles; + ParticleArray_t levelGhostParticlesNew = domain_particles; core::ParticlesPack particles_pack{name, - &domain_particles, // + &domain_particles, &patch_ghost_particles, &level_ghost_particles, - /*levelGhostParticlesOld=*/nullptr, - /*levelGhostParticlesNew=*/nullptr}; + &levelGhostParticlesOld, + &levelGhostParticlesNew}; }; +template> +Particle_t particle(std::array const& icell) +{ + return {/*.weight = */ .001, + /*.charge = */ 1, + /*.iCell = */ icell, + /*.delta = */ ConstArray(.51), + /*.v = */ {{.002002002002, .003003003003, .004004004004}}}; +} + +template +Particle particle(int const icell = 15) +{ + return particle(ConstArray(icell)); +} + +template +void add_particles_in(Particles& particles, Box const& box, std::size_t const ppc) +{ + for (auto const& bix : box) + for (std::size_t i = 0; i < ppc; ++i) + particles.emplace_back(particle(*bix)); +} + + } // namespace PHARE::core diff --git a/tests/core/numerics/ion_updater/test_updater.cpp b/tests/core/numerics/ion_updater/test_updater.cpp index 2d939cf7d..51e2b61a8 100644 --- a/tests/core/numerics/ion_updater/test_updater.cpp +++ b/tests/core/numerics/ion_updater/test_updater.cpp @@ -1,11 +1,14 @@ #include "gtest/gtest.h" +#include #include "phare_core.hpp" #include "core/numerics/ion_updater/ion_updater.hpp" #include "tests/core/data/vecfield/test_vecfield_fixtures.hpp" +#include "tests/core/data/electromag/test_electromag_fixtures.hpp" #include "tests/core/data/tensorfield/test_tensorfield_fixtures.hpp" +#include "tests/core/data/ion_population/test_ion_population_fixtures.hpp" using namespace PHARE::core; @@ -66,8 +69,7 @@ Return bz(Param x) - -int nbrPartPerCell = 1000; +std::size_t nbrPartPerCell = 1000; using InitFunctionT = PHARE::initializer::InitFunction<1>; @@ -103,9 +105,10 @@ PHARE::initializer::PHAREDict createDict() = static_cast(vthz); - dict["ions"]["pop0"]["particle_initializer"]["nbr_part_per_cell"] = int{nbrPartPerCell}; - dict["ions"]["pop0"]["particle_initializer"]["charge"] = 1.; - dict["ions"]["pop0"]["particle_initializer"]["basis"] = std::string{"cartesian"}; + dict["ions"]["pop0"]["particle_initializer"]["nbr_part_per_cell"] + = static_cast(nbrPartPerCell); + dict["ions"]["pop0"]["particle_initializer"]["charge"] = 1.; + dict["ions"]["pop0"]["particle_initializer"]["basis"] = std::string{"cartesian"}; dict["ions"]["pop1"]["name"] = std::string{"alpha"}; dict["ions"]["pop1"]["mass"] = 1.; @@ -132,9 +135,10 @@ PHARE::initializer::PHAREDict createDict() = static_cast(vthz); - dict["ions"]["pop1"]["particle_initializer"]["nbr_part_per_cell"] = int{nbrPartPerCell}; - dict["ions"]["pop1"]["particle_initializer"]["charge"] = 1.; - dict["ions"]["pop1"]["particle_initializer"]["basis"] = std::string{"cartesian"}; + dict["ions"]["pop1"]["particle_initializer"]["nbr_part_per_cell"] + = static_cast(nbrPartPerCell); + dict["ions"]["pop1"]["particle_initializer"]["charge"] = 1.; + dict["ions"]["pop1"]["particle_initializer"]["basis"] = std::string{"cartesian"}; dict["electromag"]["name"] = std::string{"EM"}; dict["electromag"]["electric"]["name"] = std::string{"E"}; @@ -159,188 +163,6 @@ struct DimInterp -// the Electromag and Ions used in this test -// need their resources pointers (Fields and ParticleArrays) to set manually -// to buffers. ElectromagBuffer and IonsBuffer encapsulate these buffers - - - -template -struct ElectromagBuffers -{ - using PHARETypes = PHARE::core::PHARE_Types; - using Grid = typename PHARETypes::Grid_t; - using GridLayout = typename PHARETypes::GridLayout_t; - using Electromag = typename PHARETypes::Electromag_t; - using UsableVecFieldND = UsableVecField; - - UsableVecFieldND B, E; - - ElectromagBuffers(GridLayout const& layout) - : B{"EM_B", layout, HybridQuantity::Vector::B} - , E{"EM_E", layout, HybridQuantity::Vector::E} - { - } - - ElectromagBuffers(ElectromagBuffers const& source, GridLayout const& layout) - : ElectromagBuffers{layout} - { - B.copyData(source.B); - E.copyData(source.E); - } - - - void setBuffers(Electromag& EM) - { - B.set_on(EM.B); - E.set_on(EM.E); - } -}; - - - - -template -struct IonsBuffers -{ - using PHARETypes = PHARE::core::PHARE_Types; - using UsableVecFieldND = UsableVecField; - using Grid = typename PHARETypes::Grid_t; - using GridLayout = typename PHARETypes::GridLayout_t; - using Ions = typename PHARETypes::Ions_t; - using ParticleArray = typename PHARETypes::ParticleArray_t; - using ParticleInitializerFactory = typename PHARETypes::ParticleInitializerFactory; - - Grid ionDensity; - Grid ionMassDensity; - Grid protonDensity; - Grid alphaDensity; - - UsableVecFieldND protonF, alphaF, Vi; - UsableTensorField M, alpha_M, protons_M; - - static constexpr int ghostSafeMapLayer = ghostWidthForParticles() + 1; - - ParticleArray protonDomain; - ParticleArray protonPatchGhost; - ParticleArray protonLevelGhost; - ParticleArray protonLevelGhostOld; - ParticleArray protonLevelGhostNew; - - ParticleArray alphaDomain; - ParticleArray alphaPatchGhost; - ParticleArray alphaLevelGhost; - ParticleArray alphaLevelGhostOld; - ParticleArray alphaLevelGhostNew; - - ParticlesPack protonPack; - ParticlesPack alphaPack; - - IonsBuffers(GridLayout const& layout) - : ionDensity{"rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , ionMassDensity{"massDensity", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , protonDensity{"protons_rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , alphaDensity{"alpha_rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , protonF{"protons_flux", layout, HybridQuantity::Vector::V} - , alphaF{"alpha_flux", layout, HybridQuantity::Vector::V} - , Vi{"bulkVel", layout, HybridQuantity::Vector::V} - , M{"momentumTensor", layout, HybridQuantity::Tensor::M} - , alpha_M{"alpha_momentumTensor", layout, HybridQuantity::Tensor::M} - , protons_M{"protons_momentumTensor", layout, HybridQuantity::Tensor::M} - , protonDomain{grow(layout.AMRBox(), ghostSafeMapLayer)} - , protonPatchGhost{grow(layout.AMRBox(), ghostSafeMapLayer)} - , protonLevelGhost{grow(layout.AMRBox(), ghostSafeMapLayer)} - , protonLevelGhostOld{grow(layout.AMRBox(), ghostSafeMapLayer)} - , protonLevelGhostNew{grow(layout.AMRBox(), ghostSafeMapLayer)} - , alphaDomain{grow(layout.AMRBox(), ghostSafeMapLayer)} - , alphaPatchGhost{grow(layout.AMRBox(), ghostSafeMapLayer)} - , alphaLevelGhost{grow(layout.AMRBox(), ghostSafeMapLayer)} - , alphaLevelGhostOld{grow(layout.AMRBox(), ghostSafeMapLayer)} - , alphaLevelGhostNew{grow(layout.AMRBox(), ghostSafeMapLayer)} - , protonPack{"protons", &protonDomain, &protonPatchGhost, - &protonLevelGhost, &protonLevelGhostOld, &protonLevelGhostNew} - , alphaPack{"alpha", &alphaDomain, &alphaPatchGhost, - &alphaLevelGhost, &alphaLevelGhostOld, &alphaLevelGhostNew} - { - } - - - IonsBuffers(IonsBuffers const& source, GridLayout const& layout) - : ionDensity{"rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , ionMassDensity{"massDensity", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , protonDensity{"protons_rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , alphaDensity{"alpha_rho", HybridQuantity::Scalar::rho, - layout.allocSize(HybridQuantity::Scalar::rho)} - , protonF{"protons_flux", layout, HybridQuantity::Vector::V} - , alphaF{"alpha_flux", layout, HybridQuantity::Vector::V} - , Vi{"bulkVel", layout, HybridQuantity::Vector::V} - , M{"momentumTensor", layout, HybridQuantity::Tensor::M} - , alpha_M{"alpha_momentumTensor", layout, HybridQuantity::Tensor::M} - , protons_M{"protons_momentumTensor", layout, HybridQuantity::Tensor::M} - , protonDomain{source.protonDomain} - , protonPatchGhost{source.protonPatchGhost} - , protonLevelGhost{source.protonLevelGhost} - , protonLevelGhostOld{source.protonLevelGhostOld} - , protonLevelGhostNew{source.protonLevelGhostNew} - , alphaDomain{source.alphaDomain} - , alphaPatchGhost{source.alphaPatchGhost} - , alphaLevelGhost{source.alphaLevelGhost} - , alphaLevelGhostOld{source.alphaLevelGhostOld} - , alphaLevelGhostNew{source.alphaLevelGhostNew} - , protonPack{"protons", &protonDomain, &protonPatchGhost, - &protonLevelGhost, &protonLevelGhostOld, &protonLevelGhostNew} - , alphaPack{"alpha", &alphaDomain, &alphaPatchGhost, - &alphaLevelGhost, &alphaLevelGhostOld, &alphaLevelGhostNew} - - { - ionDensity.copyData(source.ionDensity); - ionMassDensity.copyData(source.ionMassDensity); - protonDensity.copyData(source.protonDensity); - alphaDensity.copyData(source.alphaDensity); - - protonF.copyData(source.protonF); - alphaF.copyData(source.alphaF); - Vi.copyData(source.Vi); - } - - void setBuffers(Ions& ions) - { - { - auto const& [V, m, d, md] = ions.getCompileTimeResourcesViewList(); - Vi.set_on(V); - M.set_on(m); - d.setBuffer(&ionDensity); - md.setBuffer(&ionMassDensity); - } - - auto& pops = ions.getRunTimeResourcesViewList(); - { - auto const& [F, M, d, particles] = pops[0].getCompileTimeResourcesViewList(); - d.setBuffer(&protonDensity); - protons_M.set_on(M); - protonF.set_on(F); - particles.setBuffer(&protonPack); - } - - { - auto const& [F, M, d, particles] = pops[1].getCompileTimeResourcesViewList(); - d.setBuffer(&alphaDensity); - alpha_M.set_on(M); - alphaF.set_on(F); - particles.setBuffer(&alphaPack); - } - } -}; - - - template struct IonUpdaterTest : public ::testing::Test @@ -348,45 +170,32 @@ struct IonUpdaterTest : public ::testing::Test static constexpr auto dim = DimInterpT::dimension; static constexpr auto interp_order = DimInterpT::interp_order; using PHARETypes = PHARE::core::PHARE_Types; - using Ions = typename PHARETypes::Ions_t; - using Electromag = typename PHARETypes::Electromag_t; - using GridLayout = typename PHARE::core::GridLayout>; - using ParticleArray = typename PHARETypes::ParticleArray_t; - using ParticleInitializerFactory = typename PHARETypes::ParticleInitializerFactory; - - using IonUpdater = typename PHARE::core::IonUpdater; + using Ions = PHARETypes::Ions_t; + using Electromag = PHARETypes::Electromag_t; + using GridLayout = PHARE::core::GridLayout>; + using ParticleArray = PHARETypes::ParticleArray_t; + using ParticleInitializerFactory = PHARETypes::ParticleInitializerFactory; + using UsableVecFieldND = UsableVecField; + using IonUpdater = PHARE::core::IonUpdater; + using Boxing_t = PHARE::core::UpdaterSelectionBoxing; - double dt{0.01}; + double const dt{0.01}; // grid configuration - std::array ncells; - GridLayout layout; + std::array const ncells = ConstArray(100); + GridLayout const layout{{0.1}, {100u}, {{0.}}}; + // assumes no level ghost cells + Boxing_t const boxing{layout, grow(layout.AMRBox(), GridLayout::nbrParticleGhosts())}; - // data for electromagnetic fields - using Field = typename PHARETypes::Grid_t; - using VecField = typename PHARETypes::VecField_t; - using UsableVecFieldND = UsableVecField; - - ElectromagBuffers emBuffers; - IonsBuffers ionsBuffers; - - Electromag EM{init_dict["electromag"]}; - Ions ions{init_dict["ions"]}; + UsableElectromag EM{layout, init_dict["electromag"]}; + UsableIons_t ions{layout, init_dict["ions"]}; IonUpdaterTest() - : ncells{100} - , layout{{0.1}, {100u}, {{0.}}} - , emBuffers{layout} - , ionsBuffers{layout} { - emBuffers.setBuffers(EM); - ionsBuffers.setBuffers(ions); - - // ok all resources pointers are set to buffers // now let's initialize Electromag fields to user input functions // and ion population particles to user supplied moments @@ -398,9 +207,9 @@ struct IonUpdaterTest : public ::testing::Test auto const& info = pop.particleInitializerInfo(); auto particleInitializer = ParticleInitializerFactory::create(info); particleInitializer->loadParticles(pop.domainParticles(), layout); + EXPECT_GT(pop.domainParticles().size(), 0ull); } - // now all domain particles are loaded we need to manually insert // ghost particles (this is in reality SAMRAI's job) // these are needed if we want all used nodes to be complete @@ -479,7 +288,6 @@ struct IonUpdaterTest : public ::testing::Test } - std::copy(std::begin(levelGhostPartOld), std::end(levelGhostPartOld), std::back_inserter(levelGhostPartNew)); @@ -488,34 +296,12 @@ struct IonUpdaterTest : public ::testing::Test std::back_inserter(levelGhostPart)); - // now let's create patchGhostParticles on the right of the domain - // by copying those on the last cell - - - for (auto const& part : domainPart) - { - if constexpr (interp_order == 2 or interp_order == 3) - { - if (part.iCell[0] == lastAMRCell[0] or part.iCell[0] == lastAMRCell[0] - 1) - { - auto p{part}; - p.iCell[0] += 2; - patchGhostPart.push_back(p); - } - } - else if constexpr (interp_order == 1) - { - if (part.iCell[0] == lastAMRCell[0]) - { - auto p{part}; - p.iCell[0] += 1; - patchGhostPart.push_back(p); - } - } - } + EXPECT_GT(pop.domainParticles().size(), 0ull); + EXPECT_GT(levelGhostPartOld.size(), 0ull); + EXPECT_EQ(patchGhostPart.size(), 0); } // end 1D - } // end pop loop + } // end pop loop PHARE::core::depositParticles(ions, layout, Interpolator{}, PHARE::core::DomainDeposit{}); @@ -539,9 +325,6 @@ struct IonUpdaterTest : public ::testing::Test for (auto& pop : this->ions) { - interpolate(makeIndexRange(pop.patchGhostParticles()), pop.density(), pop.flux(), - layout); - double alpha = 0.5; interpolate(makeIndexRange(pop.levelGhostParticlesNew()), pop.density(), pop.flux(), layout, @@ -556,7 +339,7 @@ struct IonUpdaterTest : public ::testing::Test - void checkMomentsHaveEvolved(IonsBuffers const& ionsBufferCpy) + void checkMomentsHaveEvolved(auto const& ionsBufferCpy) { auto& populations = this->ions.getRunTimeResourcesViewList(); @@ -573,18 +356,20 @@ struct IonUpdaterTest : public ::testing::Test auto ix0 = this->layout.physicalStartIndex(QtyCentering::primal, Direction::X); auto ix1 = this->layout.physicalEndIndex(QtyCentering::primal, Direction::X); - auto nonZero = [&](auto const& field) { + auto nonZero = [&](auto const& field, std::string const type) { auto sum = 0.; for (auto ix = ix0; ix <= ix1; ++ix) { sum += std::abs(field(ix)); } EXPECT_GT(sum, 0.); + if (sum == 0) + std::cout << "nonZero failed for " << type << " : " << field.name() << "\n"; }; auto check = [&](auto const& newField, auto const& originalField) { - nonZero(newField); - nonZero(originalField); + nonZero(newField, "newField"); + // nonZero(originalField, "originalField"); for (auto ix = ix0; ix <= ix1; ++ix) { auto evolution = std::abs(newField(ix) - originalField(ix)); @@ -597,22 +382,22 @@ struct IonUpdaterTest : public ::testing::Test } }; - check(protonDensity, ionsBufferCpy.protonDensity); - check(protonFx, ionsBufferCpy.protonF(Component::X)); - check(protonFy, ionsBufferCpy.protonF(Component::Y)); - check(protonFz, ionsBufferCpy.protonF(Component::Z)); + check(protonDensity, ionsBufferCpy[0].density()); + check(protonFx, ionsBufferCpy[0].flux()(Component::X)); + check(protonFy, ionsBufferCpy[0].flux()(Component::Y)); + check(protonFz, ionsBufferCpy[0].flux()(Component::Z)); - check(alphaDensity, ionsBufferCpy.alphaDensity); - check(alphaFx, ionsBufferCpy.alphaF(Component::X)); - check(alphaFy, ionsBufferCpy.alphaF(Component::Y)); - check(alphaFz, ionsBufferCpy.alphaF(Component::Z)); + check(alphaDensity, ionsBufferCpy[1].density()); + check(alphaFx, ionsBufferCpy[1].flux()(Component::X)); + check(alphaFy, ionsBufferCpy[1].flux()(Component::Y)); + check(alphaFz, ionsBufferCpy[1].flux()(Component::Z)); - check(ions.density(), ionsBufferCpy.ionDensity); - check(ions.velocity().getComponent(Component::X), ionsBufferCpy.Vi(Component::X)); - check(ions.velocity().getComponent(Component::Y), ionsBufferCpy.Vi(Component::Y)); - check(ions.velocity().getComponent(Component::Z), ionsBufferCpy.Vi(Component::Z)); + check(ions.density(), ionsBufferCpy.density()); + check(ions.velocity().getComponent(Component::X), ionsBufferCpy.velocity()(Component::X)); + check(ions.velocity().getComponent(Component::Y), ionsBufferCpy.velocity()(Component::Y)); + check(ions.velocity().getComponent(Component::Z), ionsBufferCpy.velocity()(Component::Z)); } @@ -683,7 +468,7 @@ TYPED_TEST(IonUpdaterTest, loadsDomainPatchAndLevelGhostParticles) { auto check = [this](std::size_t nbrGhostCells, auto& pop) { EXPECT_EQ(this->layout.nbrCells()[0] * nbrPartPerCell, pop.domainParticles().size()); - EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.patchGhostParticles().size()); + EXPECT_EQ(0, pop.patchGhostParticles().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticlesOld().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticlesNew().size()); EXPECT_EQ(nbrGhostCells * nbrPartPerCell, pop.levelGhostParticles().size()); @@ -709,39 +494,6 @@ TYPED_TEST(IonUpdaterTest, loadsDomainPatchAndLevelGhostParticles) -TYPED_TEST(IonUpdaterTest, loadsPatchGhostParticlesOnRightGhostArea) -{ - int lastPhysCell = this->layout.physicalEndIndex(QtyCentering::dual, Direction::X); - auto lastAMRCell = this->layout.localToAMR(Point{lastPhysCell}); - - if constexpr (TypeParam::dimension == 1) - { - for (auto& pop : this->ions) - { - if constexpr (TypeParam::interp_order == 1) - { - for (auto const& part : pop.patchGhostParticles()) - { - EXPECT_EQ(lastAMRCell[0] + 1, part.iCell[0]); - } - } - else if constexpr (TypeParam::interp_order == 2 or TypeParam::interp_order == 3) - { - typename IonUpdaterTest::ParticleArray copy{pop.patchGhostParticles()}; - auto firstInOuterMostCell = std::partition( - std::begin(copy), std::end(copy), [&lastAMRCell](auto const& particle) { - return particle.iCell[0] == lastAMRCell[0] + 1; - }); - EXPECT_EQ(nbrPartPerCell, std::distance(std::begin(copy), firstInOuterMostCell)); - EXPECT_EQ(nbrPartPerCell, std::distance(firstInOuterMostCell, std::end(copy))); - } - } - } -} - - - - TYPED_TEST(IonUpdaterTest, loadsLevelGhostParticlesOnLeftGhostArea) { int firstPhysCell = this->layout.physicalStartIndex(QtyCentering::dual, Direction::X); @@ -784,9 +536,10 @@ TYPED_TEST(IonUpdaterTest, particlesUntouchedInMomentOnlyMode) typename IonUpdaterTest::IonUpdater ionUpdater{ init_dict["simulation"]["algo"]["ion_updater"]}; - IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; + auto ionsBufferCpy = this->ions; + - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); @@ -811,15 +564,15 @@ TYPED_TEST(IonUpdaterTest, particlesUntouchedInMomentOnlyMode) } }; - checkIsUnTouched(populations[0].patchGhostParticles(), ionsBufferCpy.protonPatchGhost); - checkIsUnTouched(populations[0].levelGhostParticles(), ionsBufferCpy.protonLevelGhost); - checkIsUnTouched(populations[0].levelGhostParticlesOld(), ionsBufferCpy.protonLevelGhostOld); - checkIsUnTouched(populations[0].levelGhostParticlesNew(), ionsBufferCpy.protonLevelGhostNew); + auto& cpy_pops = ionsBufferCpy.getRunTimeResourcesViewList(); - checkIsUnTouched(populations[1].patchGhostParticles(), ionsBufferCpy.alphaPatchGhost); - checkIsUnTouched(populations[1].levelGhostParticles(), ionsBufferCpy.alphaLevelGhost); - checkIsUnTouched(populations[1].levelGhostParticlesOld(), ionsBufferCpy.alphaLevelGhost); - checkIsUnTouched(populations[1].levelGhostParticlesNew(), ionsBufferCpy.alphaLevelGhost); + checkIsUnTouched(populations[0].levelGhostParticles(), cpy_pops[0].levelGhostParticles()); + checkIsUnTouched(populations[0].levelGhostParticlesOld(), cpy_pops[0].levelGhostParticlesOld()); + checkIsUnTouched(populations[0].levelGhostParticlesNew(), cpy_pops[0].levelGhostParticlesNew()); + + checkIsUnTouched(populations[1].levelGhostParticles(), cpy_pops[1].levelGhostParticles()); + checkIsUnTouched(populations[1].levelGhostParticlesOld(), cpy_pops[1].levelGhostParticlesOld()); + checkIsUnTouched(populations[1].levelGhostParticlesNew(), cpy_pops[1].levelGhostParticlesNew()); } @@ -832,7 +585,7 @@ TYPED_TEST(IonUpdaterTest, particlesUntouchedInMomentOnlyMode) // // IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; // -// ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, +// ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, // UpdaterMode::particles_and_moments); // // this->fillIonsMomentsGhosts(); @@ -855,9 +608,11 @@ TYPED_TEST(IonUpdaterTest, momentsAreChangedInParticlesAndMomentsMode) typename IonUpdaterTest::IonUpdater ionUpdater{ init_dict["simulation"]["algo"]["ion_updater"]}; - IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; + auto ionsBufferCpy = this->ions; + + assert(ionsBufferCpy.density().data() != this->ions.density().data()); - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, UpdaterMode::all); + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::all); this->fillIonsMomentsGhosts(); @@ -875,9 +630,11 @@ TYPED_TEST(IonUpdaterTest, momentsAreChangedInMomentsOnlyMode) typename IonUpdaterTest::IonUpdater ionUpdater{ init_dict["simulation"]["algo"]["ion_updater"]}; - IonsBuffers ionsBufferCpy{this->ionsBuffers, this->layout}; + auto ionsBufferCpy = this->ions; + + assert(ionsBufferCpy.density().data() != this->ions.density().data()); - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); @@ -895,7 +652,7 @@ TYPED_TEST(IonUpdaterTest, thatNoNaNsExistOnPhysicalNodesMoments) typename IonUpdaterTest::IonUpdater ionUpdater{ init_dict["simulation"]["algo"]["ion_updater"]}; - ionUpdater.updatePopulations(this->ions, this->EM, this->layout, this->dt, + ionUpdater.updatePopulations(this->ions, this->EM, this->boxing, this->dt, UpdaterMode::domain_only); this->fillIonsMomentsGhosts(); diff --git a/tests/functional/harris/harris_2d.py b/tests/functional/harris/harris_2d.py index b9f06ced4..ab10ccef4 100644 --- a/tests/functional/harris/harris_2d.py +++ b/tests/functional/harris/harris_2d.py @@ -25,12 +25,12 @@ startMPI() diag_outputs = "phare_outputs/test/harris/2d" -time_step_nbr = 1000 +time_step_nbr = 10 time_step = 0.001 final_time = time_step * time_step_nbr dt = 10 * time_step nt = final_time / dt + 1 -timestamps = dt * np.arange(nt) +timestamps = [0, final_time] # dt * np.arange(nt) def config(): @@ -161,6 +161,8 @@ def main(): m_plotting.plot_run_timer_data(diag_outputs, cpp.mpi_rank()) except ImportError: print("Phlop not found - install with: `pip install phlop`") + except FileNotFoundError: + print("Phlop installed but not active`") cpp.mpi_barrier() diff --git a/tests/simulator/advance/test_particles_advance_1d.py b/tests/simulator/advance/test_particles_advance_1d.py index d2d14e6ba..9ca3e68f2 100644 --- a/tests/simulator/advance/test_particles_advance_1d.py +++ b/tests/simulator/advance/test_particles_advance_1d.py @@ -23,19 +23,6 @@ def per_interp(dic): @ddt class AdvanceTest(AdvanceTestBase): - @data( - *per_interp({}), - *per_interp({"L0": [Box1D(10, 20)]}), - *per_interp({"L0": [Box1D(2, 12), Box1D(13, 25)]}), - ) - @unpack - def test_overlapped_particledatas_have_identical_particles( - self, interp_order, refinement_boxes - ): - self._test_overlapped_particledatas_have_identical_particles( - ndim, interp_order, refinement_boxes - ) - @data(*interp_orders) def test_L0_particle_number_conservation(self, interp): self._test_L0_particle_number_conservation(ndim, interp) diff --git a/tests/simulator/advance/test_particles_advance_2d.py b/tests/simulator/advance/test_particles_advance_2d.py index c82d8eba2..d84070db6 100644 --- a/tests/simulator/advance/test_particles_advance_2d.py +++ b/tests/simulator/advance/test_particles_advance_2d.py @@ -24,24 +24,6 @@ def per_interp(dic): @ddt class AdvanceTest(AdvanceTestBase): - @data( - *per_interp({}), - *per_interp({"L0": [Box2D(10, 19)]}), - *per_interp({"L0": [Box2D(5, 9), Box2D(10, 14)]}), - ) - @unpack - def test_overlapped_particledatas_have_identical_particles( - self, interp_order, refinement_boxes - ): - self._test_overlapped_particledatas_have_identical_particles( - ndim, - interp_order, - refinement_boxes, - ppc=ppc, - cells=40, - largest_patch_size=20, - ) - @data(*interp_orders) def test_L0_particle_number_conservation(self, interp): self._test_L0_particle_number_conservation(ndim, interp, ppc=ppc) diff --git a/tests/simulator/initialize/test_particles_init_1d.py b/tests/simulator/initialize/test_particles_init_1d.py index 60c44d692..9d4554fc4 100644 --- a/tests/simulator/initialize/test_particles_init_1d.py +++ b/tests/simulator/initialize/test_particles_init_1d.py @@ -73,16 +73,6 @@ def test_no_patch_ghost_on_refined_level_case(self, simInput): print(f"{self._testMethodName}_{ndim}d") self._test_patch_ghost_on_refined_level_case(ndim, False, **simInput) - @data({"cells": 40, "interp_order": 1}) - def test_has_patch_ghost_on_refined_level_case(self, simInput): - print(f"{self._testMethodName}_{ndim}d") - from pyphare.pharein.simulation import check_patch_size - - _, smallest_patch_size = check_patch_size(ndim, **simInput) - simInput["smallest_patch_size"] = smallest_patch_size - simInput["largest_patch_size"] = smallest_patch_size - self._test_patch_ghost_on_refined_level_case(ndim, True, **simInput) - @data("berger", "tile") def test_amr_clustering(self, clustering): dim = 1 diff --git a/tests/simulator/initialize/test_particles_init_2d.py b/tests/simulator/initialize/test_particles_init_2d.py index cc56392f8..b726b7f40 100644 --- a/tests/simulator/initialize/test_particles_init_2d.py +++ b/tests/simulator/initialize/test_particles_init_2d.py @@ -23,7 +23,7 @@ def per_interp(dic): @ddt -class Initialization1DTest(InitializationTest): +class Initialization2DTest(InitializationTest): @data(*interp_orders) def test_nbr_particles_per_cell_is_as_provided(self, interp_order): print(f"{self._testMethodName}_{ndim}d") @@ -88,20 +88,6 @@ def test_no_patch_ghost_on_refined_level_case(self, simInput): f"\n{self._testMethodName}_{ndim}d took {self.datetime_diff(now)} seconds" ) - @data({"cells": 40, "interp_order": 1, "nbr_part_per_cell": ppc}) - def test_has_patch_ghost_on_refined_level_case(self, simInput): - print(f"\n{self._testMethodName}_{ndim}d") - from pyphare.pharein.simulation import check_patch_size - - _, smallest_patch_size = check_patch_size(ndim, **simInput) - simInput["smallest_patch_size"] = smallest_patch_size - simInput["largest_patch_size"] = smallest_patch_size - now = self.datetime_now() - self._test_patch_ghost_on_refined_level_case(ndim, True, **simInput) - print( - f"\n{self._testMethodName}_{ndim}d took {self.datetime_diff(now)} seconds" - ) - if __name__ == "__main__": unittest.main() diff --git a/tests/simulator/per_test.hpp b/tests/simulator/per_test.hpp index d4a8be4e0..a887f34d0 100644 --- a/tests/simulator/per_test.hpp +++ b/tests/simulator/per_test.hpp @@ -2,10 +2,10 @@ #define PHARE_TEST_SIMULATOR_PER_TEST_HPP #include "phare/phare.hpp" +#include "simulator/simulator.hpp" #include "initializer/python_data_provider.hpp" #include "tests/core/data/field/test_field.hpp" -#include "gmock/gmock.h" #include "gtest/gtest.h" diff --git a/tests/simulator/test_restarts.py b/tests/simulator/test_restarts.py index d0e07edb4..32d777190 100644 --- a/tests/simulator/test_restarts.py +++ b/tests/simulator/test_restarts.py @@ -204,6 +204,7 @@ def count_levels_and_patches(qty): f"FAILED domain particles at time {time} {ilvl} {patch1.box} {patch0.box}" ) else: + print(f"checking {pd_key}") np.testing.assert_equal(pd0.dataset[:], pd1.dataset[:]) checks += 1 @@ -214,6 +215,7 @@ def count_levels_and_patches(qty): self.assertGreaterEqual(n_patches, n_levels) # at least one patch per level @data( + *permute(dup(dict()), expected_num_levels=2), # refinement boxes set later *permute( dup( dict( @@ -223,7 +225,6 @@ def count_levels_and_patches(qty): ), expected_num_levels=3, ), - *permute(dup(dict()), expected_num_levels=2), # refinement boxes set later ) @unpack def test_restarts(self, ndim, interp, simInput, expected_num_levels): diff --git a/tools/compare_diags.py b/tools/compare_diags.py new file mode 100644 index 000000000..a0b21c11f --- /dev/null +++ b/tools/compare_diags.py @@ -0,0 +1,49 @@ +import sys + +from pyphare.pharesee.run import Run +from pyphare.pharesee.hierarchy.hierarchy_utils import hierarchy_compare + + +run0 = Run(sys.argv[2]) +run1 = Run(sys.argv[1]) +atol = 1e-15 + +# +for time in run0.all_times()["B"]: + print("\ntime :", time) + eqr = hierarchy_compare( + run0.GetB(time, all_primal=False), run1.GetB(time, all_primal=False), atol=atol + ) + print("\n\t B", eqr) + + eqr = hierarchy_compare( + run0.GetE(time, all_primal=False), run1.GetE(time, all_primal=False), atol=atol + ) + print("\n\t E", eqr) + + pops = run0.all_pops() + for pop in pops: + eqr = hierarchy_compare( + run0.GetParticles(time, pop_name=pop), + run1.GetParticles(time, pop_name=pop), + atol=atol, + ) + print(f"\n\t domain particles {pop}", eqr) + + eqr = hierarchy_compare( + run0.GetN(time, pop_name=pop), run1.GetN(time, pop_name=pop), atol=atol + ) + print(f"\n\t rho {pop}", eqr) + + eqr = hierarchy_compare( + run0.GetFlux(time, pop_name=pop), + run1.GetFlux(time, pop_name=pop), + atol=atol, + ) + print(f"\n\t F {pop}", eqr) + + eqr = hierarchy_compare(run0.GetVi(time), run1.GetVi(time), atol=atol) + print(f"\n\t GetVi ", eqr) + + eqr = hierarchy_compare(run0.GetNi(time), run1.GetNi(time), atol=atol) + print(f"\n\t GetNi ", eqr)