|
| 1 | +// ------------------------------------------------------------------------ |
| 2 | +// |
| 3 | +// SPDX-License-Identifier: LGPL-2.1-or-later |
| 4 | +// Copyright (C) 2016 - 2023 by the deal.II authors |
| 5 | +// |
| 6 | +// This file is part of the deal.II library. |
| 7 | +// |
| 8 | +// Part of the source code is dual licensed under Apache-2.0 WITH |
| 9 | +// LLVM-exception OR LGPL-2.1-or-later. Detailed license information |
| 10 | +// governing the source code and code contributions can be found in |
| 11 | +// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II. |
| 12 | +// |
| 13 | +// ------------------------------------------------------------------------ |
| 14 | + |
| 15 | + |
| 16 | +// Check MGTransferMatrixFree by comparison with MGTransferPrebuilt on a |
| 17 | +// series of meshes with uniform meshes for FE_Q |
| 18 | + |
| 19 | +#include <deal.II/distributed/tria.h> |
| 20 | + |
| 21 | +#include <deal.II/fe/fe_q.h> |
| 22 | + |
| 23 | +#include <deal.II/grid/grid_generator.h> |
| 24 | + |
| 25 | +#include <deal.II/lac/la_parallel_vector.h> |
| 26 | + |
| 27 | +#include <deal.II/multigrid/mg_base.h> |
| 28 | +#include <deal.II/multigrid/mg_base.templates.h> |
| 29 | +#include <deal.II/multigrid/mg_transfer.h> |
| 30 | +#include <deal.II/multigrid/mg_transfer_matrix_free.h> |
| 31 | + |
| 32 | +#include "../tests.h" |
| 33 | + |
| 34 | +using namespace dealii; |
| 35 | + |
| 36 | +template <typename Number2> |
| 37 | +void |
| 38 | +copy_to_host( |
| 39 | + LinearAlgebra::distributed::Vector<Number2, dealii::MemorySpace::Host> &dst, |
| 40 | + const LinearAlgebra::distributed::Vector<Number2, MemorySpace::Default> &src) |
| 41 | +{ |
| 42 | + LinearAlgebra::ReadWriteVector<Number2> rw_vector( |
| 43 | + src.get_partitioner()->locally_owned_range()); |
| 44 | + rw_vector.import_elements(src, VectorOperation::insert); |
| 45 | + |
| 46 | + dst.reinit(src.get_partitioner()); |
| 47 | + dst.import_elements(rw_vector, VectorOperation::insert); |
| 48 | +} |
| 49 | + |
| 50 | +template <typename Number2> |
| 51 | +void |
| 52 | +copy_from_host( |
| 53 | + LinearAlgebra::distributed::Vector<Number2, MemorySpace::Default> &dst, |
| 54 | + const LinearAlgebra::distributed::Vector<Number2, dealii::MemorySpace::Host> |
| 55 | + &src) |
| 56 | +{ |
| 57 | + LinearAlgebra::ReadWriteVector<Number2> rw_vector( |
| 58 | + src.get_partitioner()->locally_owned_range()); |
| 59 | + rw_vector.import_elements(src, VectorOperation::insert); |
| 60 | + |
| 61 | + if (dst.size() == 0) |
| 62 | + dst.reinit(src.get_partitioner()); |
| 63 | + dst.import_elements(rw_vector, VectorOperation::insert); |
| 64 | +} |
| 65 | + |
| 66 | +template <int dim, typename Number> |
| 67 | +void |
| 68 | +check(const unsigned int fe_degree) |
| 69 | +{ |
| 70 | + FE_Q<dim> fe(fe_degree); |
| 71 | + deallog << "FE: " << fe.get_name() << std::endl; |
| 72 | + |
| 73 | + // run a few different sizes... |
| 74 | + unsigned int sizes[] = {1, 2, 3, 4, 5, 6, 8}; |
| 75 | + for (unsigned int cycle = 0; cycle < sizeof(sizes) / sizeof(unsigned int); |
| 76 | + ++cycle) |
| 77 | + { |
| 78 | + unsigned int n_refinements = 0; |
| 79 | + unsigned int n_subdiv = sizes[cycle]; |
| 80 | + if (n_subdiv > 1) |
| 81 | + while (n_subdiv % 2 == 0) |
| 82 | + { |
| 83 | + n_refinements += 1; |
| 84 | + n_subdiv /= 2; |
| 85 | + } |
| 86 | + n_refinements += 3 - dim; |
| 87 | + if (fe_degree < 3) |
| 88 | + n_refinements += 1; |
| 89 | + parallel::distributed::Triangulation<dim> tr( |
| 90 | + MPI_COMM_WORLD, |
| 91 | + Triangulation<dim>::limit_level_difference_at_vertices, |
| 92 | + parallel::distributed::Triangulation< |
| 93 | + dim>::construct_multigrid_hierarchy); |
| 94 | + GridGenerator::subdivided_hyper_cube(tr, n_subdiv); |
| 95 | + tr.refine_global(n_refinements); |
| 96 | + deallog << "no. cells: " << tr.n_global_active_cells() << std::endl; |
| 97 | + |
| 98 | + DoFHandler<dim> mgdof(tr); |
| 99 | + mgdof.distribute_dofs(fe); |
| 100 | + mgdof.distribute_mg_dofs(); |
| 101 | + |
| 102 | + MGConstrainedDoFs mg_constrained_dofs; |
| 103 | + mg_constrained_dofs.initialize(mgdof); |
| 104 | + mg_constrained_dofs.make_zero_boundary_constraints(mgdof, {0}); |
| 105 | + |
| 106 | + // build host reference |
| 107 | + MGTransferMatrixFree<dim, Number, dealii::MemorySpace::Host> |
| 108 | + transfer_host(mg_constrained_dofs); |
| 109 | + transfer_host.build(mgdof); |
| 110 | + |
| 111 | + // build device transfer |
| 112 | + MGTransferMatrixFree<dim, Number, dealii::MemorySpace::Default> |
| 113 | + transfer_device(mg_constrained_dofs); |
| 114 | + transfer_device.build(mgdof); |
| 115 | + |
| 116 | + // check prolongation for all levels using random vector |
| 117 | + for (unsigned int level = 1; |
| 118 | + level < mgdof.get_triangulation().n_global_levels(); |
| 119 | + ++level) |
| 120 | + { |
| 121 | + LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> v1, v2; |
| 122 | + LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> |
| 123 | + v1_cpy, v2_cpy, v3; |
| 124 | + v1.reinit(mgdof.locally_owned_mg_dofs(level - 1), MPI_COMM_WORLD); |
| 125 | + v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD); |
| 126 | + v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD); |
| 127 | + for (unsigned int i = 0; i < v1.locally_owned_size(); ++i) |
| 128 | + v1.local_element(i) = random_value<double>(); |
| 129 | + |
| 130 | + copy_from_host(v1_cpy, v1); |
| 131 | + |
| 132 | + transfer_host.prolongate(level, v2, v1); |
| 133 | + transfer_device.prolongate(level, v3, v1_cpy); |
| 134 | + |
| 135 | + copy_from_host(v2_cpy, v2); |
| 136 | + |
| 137 | + v3 -= v2_cpy; |
| 138 | + deallog << "Diff prolongate l" << level << ": " << v3.l2_norm() |
| 139 | + << std::endl; |
| 140 | + } |
| 141 | + |
| 142 | + // check restriction for all levels using random vector |
| 143 | + for (unsigned int level = 1; |
| 144 | + level < mgdof.get_triangulation().n_global_levels(); |
| 145 | + ++level) |
| 146 | + { |
| 147 | + LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> v1, v2; |
| 148 | + LinearAlgebra::distributed::Vector<Number, MemorySpace::Default> |
| 149 | + v1_cpy, v2_cpy, v3; |
| 150 | + v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD); |
| 151 | + v2.reinit(mgdof.locally_owned_mg_dofs(level - 1), MPI_COMM_WORLD); |
| 152 | + v3.reinit(mgdof.locally_owned_mg_dofs(level - 1), MPI_COMM_WORLD); |
| 153 | + for (unsigned int i = 0; i < v1.locally_owned_size(); ++i) |
| 154 | + v1.local_element(i) = random_value<double>(); |
| 155 | + copy_from_host(v1_cpy, v1); |
| 156 | + transfer_host.restrict_and_add(level, v2, v1); |
| 157 | + transfer_device.restrict_and_add(level, v3, v1_cpy); |
| 158 | + copy_from_host(v2_cpy, v2); |
| 159 | + v3 -= v2_cpy; |
| 160 | + deallog << "Diff restrict l" << level << ": " << v3.l2_norm() |
| 161 | + << std::endl; |
| 162 | + |
| 163 | + v2 = 1.; |
| 164 | + v3 = 1.; |
| 165 | + transfer_host.restrict_and_add(level, v2, v1); |
| 166 | + transfer_device.restrict_and_add(level, v3, v1_cpy); |
| 167 | + copy_from_host(v2_cpy, v2); |
| 168 | + v3 -= v2_cpy; |
| 169 | + deallog << "Diff restrict add l" << level << ": " << v3.l2_norm() |
| 170 | + << std::endl; |
| 171 | + } |
| 172 | + deallog << std::endl; |
| 173 | + } |
| 174 | +} |
| 175 | + |
| 176 | + |
| 177 | +int |
| 178 | +main(int argc, char **argv) |
| 179 | +{ |
| 180 | + // no threading in this test... |
| 181 | + Utilities::MPI::MPI_InitFinalize mpi(argc, argv, 1); |
| 182 | + mpi_initlog(); |
| 183 | + |
| 184 | + check<2, double>(1); |
| 185 | + check<2, double>(3); |
| 186 | + check<3, double>(1); |
| 187 | + check<3, double>(3); |
| 188 | + check<2, float>(2); |
| 189 | + check<3, float>(2); |
| 190 | +} |
0 commit comments