From: Martin Kronbichler Date: Mon, 11 Jan 2016 13:11:26 +0000 (+0100) Subject: Implement matrix-free multigrid level transfer for FE_Q/FE_DGQ X-Git-Tag: v8.4.0-rc2~76^2~7 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ca4fe0a8bb2ca29505b9fc77f05eb923d9d85acb;p=dealii.git Implement matrix-free multigrid level transfer for FE_Q/FE_DGQ --- diff --git a/include/deal.II/multigrid/mg_transfer_matrix_free.h b/include/deal.II/multigrid/mg_transfer_matrix_free.h new file mode 100644 index 0000000000..2999a62e9f --- /dev/null +++ b/include/deal.II/multigrid/mg_transfer_matrix_free.h @@ -0,0 +1,232 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 - 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii__mg_transfer_matrix_free_h +#define dealii__mg_transfer_matrix_free_h + +#include + +#include +#include +#include +#include +#include +#include + +#include + + +DEAL_II_NAMESPACE_OPEN + + +/*!@addtogroup mg */ +/*@{*/ + +/** + * Implementation of the MGTransferBase interface for which the transfer + * operations is implemented in a matrix-free way based on the interpolation + * matrices of the underlying finite element. This requires considerably less + * memory than MGTransferPrebuilt and can also be considerably faster than + * that variant. + * + * This class currently only works for tensor-product finite elements based on + * FE_Q and FE_DGQ elements, including systems involving multiple components + * of one of these elements. Systems with different elements or other elements + * are currently not implemented. + * + * @author Martin Kronbichler + * @date 2016 + */ +template +class MGTransferMatrixFree : public MGLevelGlobalTransfer > +{ +public: + /** + * Constructor without constraint matrices. Use this constructor only with + * discontinuous finite elements or with no local refinement. + */ + MGTransferMatrixFree (); + + /** + * Constructor with constraints. Equivalent to the default constructor + * followed by initialize_constraints(). + */ + MGTransferMatrixFree (const MGConstrainedDoFs &mg_constrained_dofs); + + /** + * Destructor. + */ + virtual ~MGTransferMatrixFree (); + + /** + * Initialize the constraints to be used in build(). + */ + void initialize_constraints (const MGConstrainedDoFs &mg_constrained_dofs); + + /** + * Reset the object to the state it had right after the default constructor. + */ + void clear (); + + /** + * Actually build the information for the prolongation for each level. + */ + void build (const DoFHandler &mg_dof); + + /** + * Prolongate a vector from level to_level-1 to level + * to_level using the embedding matrices of the underlying finite + * element. The previous content of dst is overwritten. + * + * @arg src is a vector with as many elements as there are degrees of + * freedom on the coarser level involved. + * + * @arg dst has as many elements as there are degrees of freedom on the + * finer level. + */ + virtual void prolongate (const unsigned int to_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; + + /** + * Restrict a vector from level from_level to level + * from_level-1 using the transpose operation of the @p prolongate + * method. If the region covered by cells on level from_level is + * smaller than that of level from_level-1 (local refinement), then + * some degrees of freedom in dst are active and will not be + * altered. For the other degrees of freedom, the result of the restriction + * is added. + * + * @arg src is a vector with as many elements as there are degrees of + * freedom on the finer level involved. + * + * @arg dst has as many elements as there are degrees of freedom on the + * coarser level. + */ + virtual void restrict_and_add (const unsigned int from_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; + + /** + * Finite element does not provide prolongation matrices. + */ + DeclException0(ExcNoProlongation); + + /** + * Memory used by this object. + */ + std::size_t memory_consumption () const; + +private: + + /** + * Stores the degree of the finite element contained in the DoFHandler + * passed to build(). The selection of the computational kernel is based on + * this number. + */ + unsigned int fe_degree; + + /** + * Stores whether the element is continuous and there is a joint degree of + * freedom in the center of the 1D line. + */ + bool element_is_continuous; + + /** + * Stores the number of components in the finite element contained in the + * DoFHandler passed to build(). + */ + unsigned int n_components; + + /** + * Stores the number of degrees of freedom on all child cells. It is + * 2dim*fe.dofs_per_cell for DG elements and somewhat + * less for continuous elements. + */ + unsigned int n_child_cell_dofs; + + /** + * Holds the indices for cells on a given level, extracted from DoFHandler + * for fast access. All DoF indices on a given level are stored as a plain + * array (since this class assumes constant DoFs per cell). To index into + * this array, use the cell number times dofs_per_cell. + * + * This array first is arranged such that all locally owned level cells come + * first (found in the variable n_owned_level_cells) and then other cells + * necessary for the transfer to the next level. + */ + std::vector > level_dof_indices; + + /** + * Stores the connectivity from parent to child cell numbers. + */ + std::vector > > parent_child_connect; + + /** + * Stores the number of cells owned on a given process (sets the bounds for + * the worker loops). + */ + std::vector n_owned_level_cells; + + /** + * Holds the one-dimensional embedding (prolongation) matrix from mother + * element to the children. + */ + internal::MatrixFreeFunctions::ShapeInfo shape_info; + + /** + * Holds the temporary values for the tensor evaluation + */ + mutable AlignedVector > evaluation_data; + + /** + * For continuous elements, restriction is not additive and we need to + * weight the result at the end of prolongation (and at the start of + * restriction) by the valence of the degrees of freedom, i.e., on how many + * elements they appear. We store the data in vectorized form to allow for + * cheap access. Moreover, we utilize the fact that we only need to store + * 3dim indices. + */ + std::vector > > weights_on_refined; + + /** + * Stores the local indices of Dirichlet boundary conditions on cells. + */ + std::vector > > dirichlet_indices; + + /** + * Performs templated prolongation operation + */ + template + void do_prolongate_add(const unsigned int to_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; + + /** + * Performs templated restriction operation + */ + template + void do_restrict_add(const unsigned int from_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; +}; + + +/*@}*/ + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/source/multigrid/CMakeLists.txt b/source/multigrid/CMakeLists.txt index ac23a6af19..f974aba797 100644 --- a/source/multigrid/CMakeLists.txt +++ b/source/multigrid/CMakeLists.txt @@ -21,6 +21,7 @@ SET(_src mg_tools.cc mg_transfer_block.cc mg_transfer_component.cc + mg_transfer_matrix_free.cc mg_transfer_prebuilt.cc multigrid.cc ) @@ -31,6 +32,7 @@ SET(_inst mg_tools.inst.in mg_transfer_block.inst.in mg_transfer_component.inst.in + mg_transfer_matrix_free.inst.in mg_transfer_prebuilt.inst.in multigrid.inst.in ) diff --git a/source/multigrid/mg_transfer_matrix_free.cc b/source/multigrid/mg_transfer_matrix_free.cc new file mode 100644 index 0000000000..31f97d48dc --- /dev/null +++ b/source/multigrid/mg_transfer_matrix_free.cc @@ -0,0 +1,942 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 - 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +DEAL_II_NAMESPACE_OPEN + + +template +MGTransferMatrixFree::MGTransferMatrixFree () + : + fe_degree(0), + element_is_continuous(false), + n_components(0), + n_child_cell_dofs(0) +{} + + + +template +MGTransferMatrixFree::MGTransferMatrixFree (const MGConstrainedDoFs &mg_c) + : + fe_degree(0), + element_is_continuous(false), + n_components(0), + n_child_cell_dofs(0) +{ + this->mg_constrained_dofs = &mg_c; +} + + + +template +MGTransferMatrixFree::~MGTransferMatrixFree () +{} + + + +template +void MGTransferMatrixFree::initialize_constraints +(const MGConstrainedDoFs &mg_c) +{ + this->mg_constrained_dofs = &mg_c; +} + + + +template +void MGTransferMatrixFree::clear () +{ + this->MGLevelGlobalTransfer >::clear(); + fe_degree = 0; + element_is_continuous = false; + n_components = 0; + n_child_cell_dofs = 0; + level_dof_indices.clear(); + parent_child_connect.clear(); + n_owned_level_cells.clear(); + shape_info = internal::MatrixFreeFunctions::ShapeInfo(); + evaluation_data.clear(); + weights_on_refined.clear(); +} + + + +namespace +{ + // given the collection of child cells in lexicographic ordering as seen + // from the parent, compute the first index of the given child + template + unsigned int + compute_shift_within_children(const unsigned int child, + const unsigned int fe_shift_1d, + const unsigned int fe_degree) + { + // we put the degrees of freedom of all child cells in + // lexicographic ordering + unsigned int c_tensor_index[dim]; + unsigned int tmp = child; + for (unsigned int d=0; d + void add_child_indices(const unsigned int child, + const unsigned int fe_shift_1d, + const unsigned int fe_degree, + const std::vector &lexicographic_numbering, + const std::vector &local_dof_indices, + types::global_dof_index *target_indices) + { + const unsigned int n_child_dofs_1d = fe_degree + 1 + fe_shift_1d; + const unsigned int shift = + compute_shift_within_children(child, fe_shift_1d, fe_degree); + const unsigned int n_components = + local_dof_indices.size()/Utilities::fixed_power(fe_degree+1); + types::global_dof_index *indices = target_indices + shift; + const unsigned int n_scalar_cell_dofs = Utilities::fixed_power(n_child_dofs_1d); + for (unsigned int c=0, m=0; c2 ? (fe_degree+1) : 1); ++k) + for (unsigned int j=0; j<(dim>1 ? (fe_degree+1) : 1); ++j) + for (unsigned int i=0; i<(fe_degree+1); ++i, ++m) + { + const unsigned int index = c*n_scalar_cell_dofs+k*n_child_dofs_1d* + n_child_dofs_1d+j*n_child_dofs_1d+i; + Assert(indices[index] == numbers::invalid_dof_index || + indices[index] == local_dof_indices[lexicographic_numbering[m]], + ExcInternalError()); + indices[index] = local_dof_indices[lexicographic_numbering[m]]; + } + } + + + + // initialize the vectors needed for the transfer (and merge with the + // content in copy_indices_global_mine) + template + void + reinit_ghosted_vector(const IndexSet &locally_owned, + std::vector &ghosted_level_dofs, + const MPI_Comm &communicator, + parallel::distributed::Vector &ghosted_level_vector, + std::vector > ©_indices_global_mine) + { + std::sort(ghosted_level_dofs.begin(), ghosted_level_dofs.end()); + IndexSet ghosted_dofs(locally_owned.size()); + ghosted_dofs.add_indices(ghosted_level_dofs.begin(), + std::unique(ghosted_level_dofs.begin(), + ghosted_level_dofs.end())); + ghosted_dofs.compress(); + + // Add possible ghosts from the previous content in the vector + if (ghosted_level_vector.size() == locally_owned.size()) + { + // shift the local number of the copy indices according to the new + // partitioner that we are going to use for the vector + const std_cxx11::shared_ptr part + = ghosted_level_vector.get_partitioner(); + ghosted_dofs.add_indices(part->ghost_indices()); + for (unsigned int i=0; ilocal_to_global(copy_indices_global_mine[i].second)); + } + ghosted_level_vector.reinit(locally_owned, ghosted_dofs, communicator); + } + + // Transform the ghost indices to local index space for the vector + void + copy_indices_to_mpi_local_numbers(const Utilities::MPI::Partitioner &part, + const std::vector &mine, + const std::vector &remote, + std::vector &localized_indices) + { + localized_indices.resize(mine.size()+remote.size(), + numbers::invalid_unsigned_int); + for (unsigned int i=0; i +void MGTransferMatrixFree::build +(const DoFHandler &mg_dof) +{ + this->fill_and_communicate_copy_indices(mg_dof); + + // we collect all child DoFs of a mother cell together. For faster + // tensorized operations, we align the degrees of freedom + // lexicographically. We distinguish FE_Q elements and FE_DGQ elements + + const Triangulation &tria = mg_dof.get_triangulation(); + + // ---------------------------- 1. Extract 1D info about the finite element + // step 1.1: create a 1D copy of the finite element from FETools where we + // substitute the template argument + AssertDimension(mg_dof.get_fe().n_base_elements(), 1); + std::string fe_name = mg_dof.get_fe().base_element(0).get_name(); + { + const std::size_t template_starts = fe_name.find_first_of('<'); + Assert (fe_name[template_starts+1] == (dim==1?'1':(dim==2?'2':'3')), + ExcInternalError()); + fe_name[template_starts+1] = '1'; + } + std_cxx11::shared_ptr > fe_1d + (FETools::get_fe_from_name<1>(fe_name)); + const FiniteElement<1> &fe = *fe_1d; + unsigned int n_child_dofs_1d = numbers::invalid_unsigned_int; + + { + // currently, we have only FE_Q and FE_DGQ type elements implemented + n_components = mg_dof.get_fe().element_multiplicity(0); + AssertDimension(Utilities::fixed_power(fe.dofs_per_cell)*n_components, + mg_dof.get_fe().dofs_per_cell); + AssertDimension(fe.degree, mg_dof.get_fe().degree); + fe_degree = fe.degree; + element_is_continuous = fe.dofs_per_vertex > 0; + Assert(fe.dofs_per_vertex < 2, ExcNotImplemented()); + + // step 1.2: get renumbering of 1D basis functions to lexicographic + // numbers. The distinction according to fe.dofs_per_vertex is to support + // both continuous and discontinuous bases. + std::vector renumbering(fe.dofs_per_cell); + { + AssertIndexRange(fe.dofs_per_vertex, 2); + renumbering[0] = 0; + for (unsigned int i=0; i::vertices_per_cell*fe.dofs_per_vertex + i; + if (fe.dofs_per_vertex > 0) + renumbering[fe.dofs_per_cell-fe.dofs_per_vertex] = fe.dofs_per_vertex; + } + + // step 1.3: create a 1D quadrature formula from the finite element that + // collects the support points of the basis functions on the two children. + std::vector > basic_support_points = fe.get_unit_support_points(); + Assert(fe.dofs_per_vertex == 0 || fe.dofs_per_vertex == 1, + ExcNotImplemented()); + std::vector > points_refined(fe.dofs_per_vertex > 0 ? + (2 * fe.dofs_per_cell - 1) : + (2 * fe.dofs_per_cell)); + const unsigned int shift = fe.dofs_per_cell - fe.dofs_per_vertex; + for (unsigned int c=0; c::max_children_per_cell; ++c) + for (unsigned int j=0; j(n_child_dofs_1d); + + // step 1.4: evaluate the polynomials and store the data in ShapeInfo + const Quadrature<1> quadrature(points_refined); + shape_info.reinit(quadrature, mg_dof.get_fe(), 0); + + for (unsigned int c=0; c::max_children_per_cell; ++c) + for (unsigned int i=0; i::epsilon(),1e-12), + ExcInternalError()); + } + + // -------------- 2. Extract and match dof indices between child and parent + const unsigned int n_levels = tria.n_global_levels(); + level_dof_indices.resize(n_levels); + parent_child_connect.resize(n_levels-1); + n_owned_level_cells.resize(n_levels-1); + std::vector > coarse_level_indices(n_levels-1); + for (unsigned int level=0; level local_dof_indices(mg_dof.get_fe().dofs_per_cell); + dirichlet_indices.resize(n_levels-1); + + // We use the vectors stored ghosted_level_vector in the base class for + // keeping ghosted transfer indices. To avoid keeping two very similar + // vectors, we merge them here. + if (this->ghosted_level_vector.max_level() != n_levels-1) + this->ghosted_level_vector.resize(0, n_levels-1); + + for (unsigned int level=n_levels-1; level > 0; --level) + { + unsigned int counter = 0; + std::vector global_level_dof_indices; + std::vector global_level_dof_indices_remote; + std::vector ghosted_level_dofs; + std::vector global_level_dof_indices_l0; + std::vector ghosted_level_dofs_l0; + + // step 2.1: loop over the cells on the coarse side + for (typename DoFHandler::cell_iterator cell = mg_dof.begin(level-1); + cell != mg_dof.end(level-1); ++cell) + { + // need to look into a cell if it has children and it is locally owned + if (!cell->has_children()) + continue; + + bool consider_cell = false; + if (tria.locally_owned_subdomain()==numbers::invalid_subdomain_id + || cell->level_subdomain_id()==tria.locally_owned_subdomain() + ) + consider_cell = true; + + // due to the particular way we store DoF indices (via children), we + // also need to add the DoF indices for coarse cells where we own at + // least one child + bool cell_is_remote = !consider_cell; + for (unsigned int c=0; c::max_children_per_cell; ++c) + if (cell->child(c)->level_subdomain_id()==tria.locally_owned_subdomain()) + consider_cell = true; + + if (!consider_cell) + continue; + + // step 2.2: loop through children and append the dof indices to the + // appropriate list. We need separate lists for the owned coarse + // cell case (which will be part of restriction/prolongation between + // level-1 and level) and the remote case (which needs to store DoF + // indices for the operations between level and level+1). + AssertDimension(cell->n_children(), + GeometryInfo::max_children_per_cell); + std::vector &next_indices = + cell_is_remote ? global_level_dof_indices_remote : global_level_dof_indices; + const std::size_t start_index = next_indices.size(); + next_indices.resize(start_index + n_child_cell_dofs, + numbers::invalid_dof_index); + for (unsigned int c=0; c::max_children_per_cell; ++c) + { + if (cell_is_remote && cell->child(c)->level_subdomain_id() != + tria.locally_owned_subdomain()) + continue; + cell->child(c)->get_mg_dof_indices(local_dof_indices); + + const IndexSet &owned_level_dofs = mg_dof.locally_owned_mg_dofs(level); + for (unsigned int i=0; i(c, fe.dofs_per_cell - fe.dofs_per_vertex, + fe.degree, shape_info.lexicographic_numbering, + local_dof_indices, + &next_indices[start_index]); + + // step 2.3 store the connectivity to the parent + if (cell->child(c)->has_children() && + (tria.locally_owned_subdomain()==numbers::invalid_subdomain_id + || cell->child(c)->level_subdomain_id()==tria.locally_owned_subdomain() + )) + { + const unsigned int child_index = coarse_level_indices[level][cell->child(c)->index()]; + AssertIndexRange(child_index, parent_child_connect[level].size()); + unsigned int parent_index = counter; + // remote cells, i.e., cells where we work on a further + // level but are not treated on the current level, need to + // be placed at the end of the list; however, we do not yet + // know the exact position in the array, so shift their + // parent index by the number of cells so we can set the + // correct number after the end of this loop + if (cell_is_remote) + parent_index = start_index/n_child_cell_dofs + tria.n_cells(level); + parent_child_connect[level][child_index] = + std::make_pair(parent_index, c); + AssertIndexRange(mg_dof.get_fe().dofs_per_cell, + static_cast(-1)); + + // set Dirichlet boundary conditions (as a list of + // constrained DoFs) for the child + if (this->mg_constrained_dofs != 0) + for (unsigned int i=0; img_constrained_dofs->is_boundary_index(level, local_dof_indices[shape_info.lexicographic_numbering[i]])) + dirichlet_indices[level][child_index].push_back(i); + } + } + if (!cell_is_remote) + { + AssertIndexRange(static_cast(cell->index()), + coarse_level_indices[level-1].size()); + coarse_level_indices[level-1][cell->index()] = counter++; + } + + // step 2.4: include indices for the coarsest cells. we still insert + // the indices as if they were from a child in order to use the same + // code (the coarsest level does not matter much in terms of memory, + // so we gain in code simplicity) + if (level == 1 && !cell_is_remote) + { + cell->get_mg_dof_indices(local_dof_indices); + + const IndexSet &owned_level_dofs_l0 = mg_dof.locally_owned_mg_dofs(0); + for (unsigned int i=0; i(0, fe.dofs_per_cell - fe.dofs_per_vertex, + fe.degree, shape_info.lexicographic_numbering, + local_dof_indices, + &global_level_dof_indices_l0[start_index]); + + dirichlet_indices[0].push_back(std::vector()); + if (this->mg_constrained_dofs != 0) + for (unsigned int i=0; img_constrained_dofs->is_boundary_index(0, local_dof_indices[shape_info.lexicographic_numbering[i]])) + dirichlet_indices[0].back().push_back(i); + } + } + + // step 2.5: store information about the current level and prepare the + // Dirichlet indices and parent-child relationship for the next coarser + // level + AssertDimension(counter*n_child_cell_dofs, global_level_dof_indices.size()); + n_owned_level_cells[level-1] = counter; + dirichlet_indices[level-1].resize(counter); + parent_child_connect[level-1]. + resize(counter, std::make_pair(numbers::invalid_unsigned_int, + numbers::invalid_unsigned_int)); + + // step 2.6: put the cells with remotely owned parent to the end of the + // list (these are needed for the transfer from level to level+1 but not + // for the transfer from level-1 to level). + if (level < n_levels-1) + for (std::vector >::iterator + i=parent_child_connect[level].begin(); i!=parent_child_connect[level].end(); ++i) + if (i->first >= tria.n_cells(level)) + { + i->first -= tria.n_cells(level); + i->first += counter; + } + + // step 2.7: Initialize the ghosted vector + const parallel::Triangulation *ptria = + (dynamic_cast*> (&tria)); + const MPI_Comm communicator = + ptria != 0 ? ptria->get_communicator() : MPI_COMM_SELF; + + reinit_ghosted_vector(mg_dof.locally_owned_mg_dofs(level), + ghosted_level_dofs, communicator, + this->ghosted_level_vector[level], + this->copy_indices_global_mine[level]); + + copy_indices_to_mpi_local_numbers(*this->ghosted_level_vector[level].get_partitioner(), + global_level_dof_indices, + global_level_dof_indices_remote, + level_dof_indices[level]); + + // step 2.8: Initialize the ghosted vector for level 0 + if (level == 1) + { + for (unsigned int i = 0; ighosted_level_vector[0], + this->copy_indices_global_mine[0]); + + copy_indices_to_mpi_local_numbers(*this->ghosted_level_vector[0].get_partitioner(), + global_level_dof_indices_l0, + std::vector(), + level_dof_indices[0]); + } + } + + // ------------------------ 3. compute weights to make restriction additive + // + // get the valence of the individual components and compute the weights as + // the inverse of the valence + weights_on_refined.resize(n_levels); + for (unsigned int level = 1; levelghosted_level_vector[level] = 0; + for (unsigned int c=0; cghosted_level_vector[level].local_element(level_dof_indices[level][n_child_cell_dofs*c+j]) += Number(1.); + this->ghosted_level_vector[level].compress(VectorOperation::add); + this->ghosted_level_vector[level].update_ghost_values(); + + const unsigned int vec_size = VectorizedArray::n_array_elements; + std::vector degree_to_3 (n_child_dofs_1d); + degree_to_3[0] = 0; + for (unsigned int i=1; i(3)); + for (unsigned int c=0; c2 ? n_child_dofs_1d : 1); ++k) + for (unsigned int j=0; j<(dim>1 ? n_child_dofs_1d : 1); ++j) + { + unsigned int shift = 9*degree_to_3[k] + 3*degree_to_3[j]; + for (unsigned int i=0; i(3)+shift+degree_to_3[i]][v] = Number(1.)/ + this->ghosted_level_vector[level].local_element(level_dof_indices[level][n_child_cell_dofs*c+m]); + } + } + } + + evaluation_data.resize(3*n_child_cell_dofs); +} + + + +template +void MGTransferMatrixFree +::prolongate (const unsigned int to_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const +{ + Assert ((to_level >= 1) && (to_level<=level_dof_indices.size()), + ExcIndexRange (to_level, 1, level_dof_indices.size()+1)); + + AssertDimension(this->ghosted_level_vector[to_level].local_size(), + dst.local_size()); + AssertDimension(this->ghosted_level_vector[to_level-1].local_size(), + src.local_size()); + + this->ghosted_level_vector[to_level-1] = src; + this->ghosted_level_vector[to_level-1].update_ghost_values(); + this->ghosted_level_vector[to_level] = 0.; + + // the implementation in do_prolongate_add is templated in the degree of the + // element (for efficiency reasons), so we need to find the appropriate + // kernel here... + if (fe_degree == 0) + do_prolongate_add<0>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 1) + do_prolongate_add<1>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 2) + do_prolongate_add<2>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 3) + do_prolongate_add<3>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 4) + do_prolongate_add<4>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 5) + do_prolongate_add<5>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 6) + do_prolongate_add<6>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 7) + do_prolongate_add<7>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 8) + do_prolongate_add<8>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 9) + do_prolongate_add<9>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else if (fe_degree == 10) + do_prolongate_add<10>(to_level, this->ghosted_level_vector[to_level], + this->ghosted_level_vector[to_level-1]); + else + AssertThrow(false, ExcNotImplemented("Only degrees 0 up to 10 implemented.")); + + this->ghosted_level_vector[to_level].compress(VectorOperation::add); + dst = this->ghosted_level_vector[to_level]; +} + + + +template +void MGTransferMatrixFree +::restrict_and_add (const unsigned int from_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const +{ + Assert ((from_level >= 1) && (from_level<=level_dof_indices.size()), + ExcIndexRange (from_level, 1, level_dof_indices.size()+1)); + + AssertDimension(this->ghosted_level_vector[from_level].local_size(), + src.local_size()); + AssertDimension(this->ghosted_level_vector[from_level-1].local_size(), + dst.local_size()); + + this->ghosted_level_vector[from_level] = src; + this->ghosted_level_vector[from_level].update_ghost_values(); + this->ghosted_level_vector[from_level-1] = 0.; + + if (fe_degree == 0) + do_restrict_add<0>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 1) + do_restrict_add<1>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 2) + do_restrict_add<2>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 3) + do_restrict_add<3>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 4) + do_restrict_add<4>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 5) + do_restrict_add<5>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 6) + do_restrict_add<6>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 7) + do_restrict_add<7>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 8) + do_restrict_add<8>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 9) + do_restrict_add<9>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else if (fe_degree == 10) + do_restrict_add<10>(from_level, this->ghosted_level_vector[from_level-1], + this->ghosted_level_vector[from_level]); + else + AssertThrow(false, ExcNotImplemented("Only degrees 0 up to 10 implemented.")); + + this->ghosted_level_vector[from_level-1].compress(VectorOperation::add); + dst += this->ghosted_level_vector[from_level-1]; +} + + + +namespace +{ + template + void + perform_tensorized_op(const Eval &evaluator, + const unsigned int n_child_cell_dofs, + const unsigned int n_components, + AlignedVector > &evaluation_data) + { + AssertDimension(n_components * Eval::n_q_points, n_child_cell_dofs); + VectorizedArray *t0 = &evaluation_data[0]; + VectorizedArray *t1 = &evaluation_data[n_child_cell_dofs]; + VectorizedArray *t2 = &evaluation_data[2*n_child_cell_dofs]; + + for (unsigned int c=0; c(t0, t2); + else if (dim == 2) + { + evaluator.template values<0,prolongate,false>(t0, t1); + evaluator.template values<1,prolongate,false>(t1, t2); + } + else if (dim == 3) + { + evaluator.template values<0,prolongate,false>(t0, t2); + evaluator.template values<1,prolongate,false>(t2, t1); + evaluator.template values<2,prolongate,false>(t1, t2); + } + else + Assert(false, ExcNotImplemented()); + if (prolongate) + { + t0 += Eval::dofs_per_cell; + t2 += Eval::n_q_points; + } + else + { + t0 += Eval::n_q_points; + t2 += Eval::dofs_per_cell; + } + } + } + + template + void weight_dofs_on_child (const VectorizedArray *weights, + const unsigned int n_components, + VectorizedArray *data) + { + Assert(degree > 0, ExcNotImplemented()); + const unsigned int loop_length = 2*degree+1; + unsigned int degree_to_3 [loop_length]; + degree_to_3[0] = 0; + for (unsigned int i=1; i2 ? loop_length : 1); ++k) + for (unsigned int j=0; j<(dim>1 ? loop_length : 1); ++j) + { + const unsigned int shift = 9*degree_to_3[k] + 3*degree_to_3[j]; + data[0] *= weights[shift]; + for (unsigned int i=1; i +template +void MGTransferMatrixFree +::do_prolongate_add (const unsigned int to_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const +{ + const unsigned int vec_size = VectorizedArray::n_array_elements; + const unsigned int n_child_dofs_1d = 2*(fe_degree+1) - element_is_continuous; + const unsigned int n_scalar_cell_dofs = Utilities::fixed_power(n_child_dofs_1d); + const unsigned int three_to_dim = Utilities::fixed_int_power<3,dim>::value; + + for (unsigned int cell=0; cell < n_owned_level_cells[to_level-1]; + cell += vec_size) + { + const unsigned int n_chunks = cell+vec_size > n_owned_level_cells[to_level-1] ? + n_owned_level_cells[to_level-1] - cell : vec_size; + + // read from source vector + for (unsigned int v=0; v + (parent_child_connect[to_level-1][cell+v].second, + degree+1-element_is_continuous, degree); + const unsigned int *indices = &level_dof_indices[to_level-1][parent_child_connect[to_level-1][cell+v].first*n_child_cell_dofs+shift]; + for (unsigned int c=0, m=0; c2 ? (degree+1) : 1); ++k) + for (unsigned int j=0; j<(dim>1 ? (degree+1) : 1); ++j) + for (unsigned int i=0; i<(degree+1); ++i, ++m) + evaluation_data[m][v] = + src.local_element(indices[c*n_scalar_cell_dofs + + k*n_child_dofs_1d*n_child_dofs_1d+ + j*n_child_dofs_1d+i]); + + // apply Dirichlet boundary conditions on parent cell + for (std::vector::const_iterator i=dirichlet_indices[to_level-1][cell+v].begin(); i!=dirichlet_indices[to_level-1][cell+v].end(); ++i) + evaluation_data[*i][v] = 0.; + } + } + + // perform tensorized operation + Assert(shape_info.element_type == + internal::MatrixFreeFunctions::tensor_symmetric, ExcNotImplemented()); + if (element_is_continuous) + { + AssertDimension(shape_info.shape_val_evenodd.size(), + (degree+1)*(degree+1)); + typedef internal::EvaluatorTensorProduct > Evaluator; + Evaluator evaluator(shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd); + perform_tensorized_op(evaluator, + n_child_cell_dofs, + n_components, + evaluation_data); + weight_dofs_on_child(&weights_on_refined[to_level][(cell/vec_size)*three_to_dim], + n_components, + &evaluation_data[2*n_child_cell_dofs]); + } + else + { + AssertDimension(shape_info.shape_val_evenodd.size(), + (degree+1)*(degree+1)); + typedef internal::EvaluatorTensorProduct > Evaluator; + Evaluator evaluator(shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd); + perform_tensorized_op(evaluator, + n_child_cell_dofs, + n_components, + evaluation_data); + } + + // write into dst vector + const unsigned int *indices = &level_dof_indices[to_level][cell* + n_child_cell_dofs]; + for (unsigned int v=0; v +template +void MGTransferMatrixFree +::do_restrict_add (const unsigned int from_level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const +{ + const unsigned int vec_size = VectorizedArray::n_array_elements; + const unsigned int n_child_dofs_1d = 2*(fe_degree+1) - element_is_continuous; + const unsigned int n_scalar_cell_dofs = Utilities::fixed_power(n_child_dofs_1d); + const unsigned int three_to_dim = Utilities::fixed_int_power<3,dim>::value; + + for (unsigned int cell=0; cell < n_owned_level_cells[from_level-1]; + cell += vec_size) + { + const unsigned int n_chunks = cell+vec_size > n_owned_level_cells[from_level-1] ? + n_owned_level_cells[from_level-1] - cell : vec_size; + + // read from source vector + { + const unsigned int *indices = &level_dof_indices[from_level][cell* + n_child_cell_dofs]; + for (unsigned int v=0; v > Evaluator; + Evaluator evaluator(shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd); + weight_dofs_on_child(&weights_on_refined[from_level][(cell/vec_size)*three_to_dim], + n_components, + &evaluation_data[0]); + perform_tensorized_op(evaluator, + n_child_cell_dofs, + n_components, + evaluation_data); + } + else + { + AssertDimension(shape_info.shape_val_evenodd.size(), + (degree+1)*(degree+1)); + typedef internal::EvaluatorTensorProduct > Evaluator; + Evaluator evaluator(shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd, + shape_info.shape_val_evenodd); + perform_tensorized_op(evaluator, + n_child_cell_dofs, + n_components, + evaluation_data); + } + + // write into dst vector + for (unsigned int v=0; v + (parent_child_connect[from_level-1][cell+v].second, + degree+1-element_is_continuous, degree); + AssertIndexRange(parent_child_connect[from_level-1][cell+v].first* + n_child_cell_dofs+n_child_cell_dofs-1, + level_dof_indices[from_level-1].size()); + const unsigned int *indices = &level_dof_indices[from_level-1][parent_child_connect[from_level-1][cell+v].first*n_child_cell_dofs+shift]; + for (unsigned int c=0, m=0; c::const_iterator i=dirichlet_indices[from_level-1][cell+v].begin(); i!=dirichlet_indices[from_level-1][cell+v].end(); ++i) + evaluation_data[2*n_child_cell_dofs+(*i)][v] = 0.; + + for (unsigned int k=0; k<(dim>2 ? (degree+1) : 1); ++k) + for (unsigned int j=0; j<(dim>1 ? (degree+1) : 1); ++j) + for (unsigned int i=0; i<(degree+1); ++i, ++m) + dst.local_element(indices[c*n_scalar_cell_dofs + + k*n_child_dofs_1d*n_child_dofs_1d+ + j*n_child_dofs_1d+i]) + += evaluation_data[2*n_child_cell_dofs+m][v]; + } + } + } +} + + + +template +std::size_t +MGTransferMatrixFree::memory_consumption() const +{ + std::size_t memory = MGLevelGlobalTransfer >::memory_consumption(); + memory += MemoryConsumption::memory_consumption(level_dof_indices); + memory += MemoryConsumption::memory_consumption(parent_child_connect); + memory += MemoryConsumption::memory_consumption(n_owned_level_cells); + memory += shape_info.memory_consumption(); + memory += MemoryConsumption::memory_consumption(evaluation_data); + memory += MemoryConsumption::memory_consumption(weights_on_refined); + memory += MemoryConsumption::memory_consumption(dirichlet_indices); + return memory; +} + + +// explicit instantiation +#include "mg_transfer_matrix_free.inst" + + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/multigrid/mg_transfer_matrix_free.inst.in b/source/multigrid/mg_transfer_matrix_free.inst.in new file mode 100644 index 0000000000..25a523b46a --- /dev/null +++ b/source/multigrid/mg_transfer_matrix_free.inst.in @@ -0,0 +1,21 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 1998 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +for (deal_II_dimension : DIMENSIONS; S1 : REAL_SCALARS) + { + template class MGTransferMatrixFree< deal_II_dimension, S1 >; + }