From ec7087b88ec2f56427d623842675845f2404bf77 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Tue, 20 Nov 2018 22:31:19 +0000 Subject: [PATCH] Add support for distributed CUDA MatrixFree --- .../matrix_free/cuda_hanging_nodes_internal.h | 25 +- .../deal.II/matrix_free/cuda_matrix_free.h | 153 ++++- .../matrix_free/cuda_matrix_free.templates.h | 527 +++++++++++++----- 3 files changed, 568 insertions(+), 137 deletions(-) diff --git a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h index 75a2696808..4a4df3be8e 100644 --- a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h +++ b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h @@ -58,9 +58,11 @@ namespace CUDAWrappers */ template void - setup_constraints(std::vector &dof_indices, - const CellIterator & cell, - unsigned int & mask) const; + setup_constraints( + std::vector & dof_indices, + const CellIterator & cell, + const std::unique_ptr &partitioner, + unsigned int & mask) const; private: /** @@ -272,9 +274,10 @@ namespace CUDAWrappers template void HangingNodes::setup_constraints( - std::vector &dof_indices, - const CellIterator & cell, - unsigned int & mask) const + std::vector & dof_indices, + const CellIterator & cell, + const std::unique_ptr &partitioner, + unsigned int & mask) const { mask = 0; const unsigned int n_dofs_1d = fe_degree + 1; @@ -311,6 +314,11 @@ namespace CUDAWrappers // Get indices to read neighbor->face(neighbor_face)->get_dof_indices(neighbor_dofs); + // If the vector is distributed, we need to transform the + // global indices to local ones. + if (partitioner) + for (auto &index : neighbor_dofs) + index = partitioner->global_to_local(index); if (dim == 2) { @@ -554,6 +562,11 @@ namespace CUDAWrappers neighbor_dofs.resize(n_dofs_1d * n_dofs_1d * n_dofs_1d); neighbor_cell->get_dof_indices(neighbor_dofs); + // If the vector is distributed, we need to transform + // the global indices to local ones. + if (partitioner) + for (auto &index : neighbor_dofs) + index = partitioner->global_to_local(index); for (unsigned int i = 0; i < n_dofs_1d; ++i) { diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index b75ff1f9cd..75af6c004e 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -21,6 +21,7 @@ #ifdef DEAL_II_COMPILER_CUDA_AWARE +# include # include # include @@ -32,6 +33,7 @@ # include # include +# include DEAL_II_NAMESPACE_OPEN @@ -142,6 +144,9 @@ namespace CUDAWrappers */ MatrixFree(); + /** + * Return the length of the padding. + */ unsigned int get_padding_length() const; @@ -151,7 +156,35 @@ namespace CUDAWrappers * degrees of freedom, the DoFHandler and the mapping describe the * transformation from unit to real cell, and the finite element * underlying the DoFHandler together with the quadrature formula - * describe the local operations. + * describe the local operations. This function supports distributed + * computation (MPI). + */ + void + reinit(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + const MPI_Comm & comm, + const AdditionalData additional_data = AdditionalData()); + + /** + * Initializes the data structures. Same as above but using a Q1 mapping. + */ + void + reinit(const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + const MPI_Comm & comm, + const AdditionalData AdditionalData = AdditionalData()); + + /** + * Extracts the information needed to perform loops over cells. The + * DoFHandler and AffineConstraints objects describe the layout of + * degrees of freedom, the DoFHandler and the mapping describe the + * transformation from unit to real cell, and the finite element + * underlying the DoFHandler together with the quadrature formula + * describe the local operations. This function does not support distributed + * computation. */ void reinit(const Mapping & mapping, @@ -185,10 +218,19 @@ namespace CUDAWrappers const VectorType &src, VectorType & dst) const; + /** + * Copy the values of the constrained entries from @p src to @p dst. This is + * used to impose zero Dirichlet boundary condition. + */ template void copy_constrained_values(const VectorType &src, VectorType &dst) const; + /** + * Set the entries in @p dst corresponding to constrained values to @p val. + * The main purpose of this function is to set the constrained entries of + * the source vector used in cell_loop() to zero. + */ template void set_constrained_values(const Number val, VectorType &dst) const; @@ -206,6 +248,107 @@ namespace CUDAWrappers memory_consumption() const; private: + /** + * Initializes the data structures. + */ + void + reinit(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + std::shared_ptr comm, + const AdditionalData additional_data); + + /** + * Helper function. Loop over all the cells and apply the functor on each + * element in parallel. This function is used when MPI is not used. + */ + template + void + serial_cell_loop(const functor & func, + const VectorType &src, + VectorType & dst) const; + + /** + * Helper function. Loop over all the cells and apply the functor on each + * element in parallel. This function is used when MPI is used. + */ + template + void + distributed_cell_loop( + const functor & func, + const LinearAlgebra::distributed::Vector &src, + LinearAlgebra::distributed::Vector &dst) const; + + /** + * This function should never be called. Calling it results in an internal + * error. This function exists only because cell_loop needs + * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector. + */ + template + void + distributed_cell_loop( + const functor & func, + const LinearAlgebra::CUDAWrappers::Vector &src, + LinearAlgebra::CUDAWrappers::Vector & dst) const; + + /** + * Helper function. Copy the values of the constrained entries of @p src to + * @p dst. This function is used when MPI is not used. + */ + template + void + serial_copy_constrained_values(const VectorType &src, + VectorType & dst) const; + + /** + * Helper function. Copy the values of the constrained entries of @p src to + * @p dst. This function is used when MPI is used. + */ + void + distributed_copy_constrained_values( + const LinearAlgebra::distributed::Vector &src, + LinearAlgebra::distributed::Vector &dst) const; + + /** + * This function should never be called. Calling it results in an internal + * error. This function exists only because copy_constrained_values needs + * distributed_copy_constrained_values() to exist for + * LinearAlgebra::CUDAWrappers::Vector. + */ + void + distributed_copy_constrained_values( + const LinearAlgebra::CUDAWrappers::Vector &src, + LinearAlgebra::CUDAWrappers::Vector & dst) const; + + /** + * Helper function. Set the constrained entries of @p dst to @p val. This + * function is used when MPI is not used. + */ + template + void + serial_set_constrained_values(const Number val, VectorType &dst) const; + + /** + * Helper function. Set the constrained entries of @p dst to @p val. This + * function is used when MPI is used. + */ + void + distributed_set_constrained_values( + const Number val, + LinearAlgebra::distributed::Vector &dst) const; + + /** + * This function should never be called. Calling it results in an internal + * error. This function exists only because set_constrained_values needs + * distributed_set_constrained_values() to exist for + * LinearAlgebra::CUDAWrappers::Vector. + */ + void + distributed_set_constrained_values( + const Number val, + LinearAlgebra::CUDAWrappers::Vector &dst) const; + /** * Parallelization scheme used, parallelization over degrees of freedom or * over cells. @@ -284,7 +427,13 @@ namespace CUDAWrappers */ std::vector block_dim; - // Parallelization parameter + /** + * Unique pointer to a Partitioner for distributed Vectors used in + * cell_loop. When MPI is not used the pointer is null. + */ + std::unique_ptr partitioner; + + // Parallelization parameters unsigned int cells_per_block; dim3 constraint_grid_dim; dim3 constraint_block_dim; diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index 435b53b6a0..89625a885c 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -23,6 +23,9 @@ # include # include +# include + +# include # include @@ -103,6 +106,10 @@ namespace CUDAWrappers cudaError_t error_code = cudaMalloc(array_device, n * sizeof(Number1)); AssertCuda(error_code); + // TODO This is very dangerous because we are doing a memcopy but with + // different data types. However this is very useful to move Point to the + // device where they are stored as Tensor. Thus, we need Point on the + // device in order to make this function safer. error_code = cudaMemcpy(*array_device, array_host.data(), n * sizeof(Number1), @@ -137,7 +144,10 @@ namespace CUDAWrappers template void - get_cell_data(const CellFilter &cell, const unsigned int cell_id); + get_cell_data( + const CellFilter & cell, + const unsigned int cell_id, + const std::unique_ptr &partitioner); void alloc_and_copy_arrays(const unsigned int cell); @@ -274,17 +284,25 @@ namespace CUDAWrappers template template void - ReinitHelper::get_cell_data(const CellFilter & cell, - const unsigned int cell_id) + ReinitHelper::get_cell_data( + const CellFilter & cell, + const unsigned int cell_id, + const std::unique_ptr &partitioner) { cell->get_dof_indices(local_dof_indices); - + // When using MPI, we need to transform the local_dof_indices, which + // contains global dof indices, to get local (to the current MPI process) + // dof indices. + if (partitioner) + for (auto &index : local_dof_indices) + index = partitioner->global_to_local(index); for (unsigned int i = 0; i < dofs_per_cell; ++i) lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]]; hanging_nodes.setup_constraints(lexicographic_dof_indices, cell, + partitioner, constraint_mask_host[cell_id]); memcpy(&local_to_global_host[cell_id * padding_length], @@ -414,12 +432,16 @@ namespace CUDAWrappers copy_constrained_dofs( const dealii::types::global_dof_index *constrained_dofs, const unsigned int n_constrained_dofs, + const unsigned int size, const Number * src, Number * dst) { const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); - if (dof < n_constrained_dofs) + // When working with distributed vectors, the constrained dofs are + // computed for ghosted vectors but we want to copy the values of the + // constrained dofs of non-ghosted vectors. + if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size)) dst[constrained_dofs[dof]] = src[constrained_dofs[dof]]; } @@ -430,12 +452,16 @@ namespace CUDAWrappers set_constrained_dofs( const dealii::types::global_dof_index *constrained_dofs, const unsigned int n_constrained_dofs, + const unsigned int size, Number val, Number * dst) { const unsigned int dof = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); - if (dof < n_constrained_dofs) + // When working with distributed vectors, the constrained dofs are + // computed for ghosted vectors but we want to set the values of the + // constrained dofs of non-ghosted vectors. + if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size)) dst[constrained_dofs[dof]] = val; } @@ -491,6 +517,247 @@ namespace CUDAWrappers const DoFHandler & dof_handler, const AffineConstraints &constraints, const Quadrature<1> & quad, + const MPI_Comm & comm, + const AdditionalData additional_data) + { + reinit(mapping, + dof_handler, + constraints, + quad, + std::make_shared(comm), + additional_data); + } + + + + template + void + MatrixFree::reinit(const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + const MPI_Comm & comm, + const AdditionalData additional_data) + { + reinit(StaticMappingQ1::mapping, + dof_handler, + constraints, + quad, + std::make_shared(comm), + additional_data); + } + + + + template + void + MatrixFree::reinit(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + const AdditionalData additional_data) + { + reinit(mapping, dof_handler, constraints, quad, nullptr, additional_data); + } + + + + template + void + MatrixFree::reinit(const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + const AdditionalData additional_data) + { + reinit(StaticMappingQ1::mapping, + dof_handler, + constraints, + quad, + nullptr, + additional_data); + } + + + + template + MatrixFree::Data + MatrixFree::get_data(unsigned int color) const + { + Data data_copy; + data_copy.q_points = q_points[color]; + data_copy.local_to_global = local_to_global[color]; + data_copy.inv_jacobian = inv_jacobian[color]; + data_copy.JxW = JxW[color]; + data_copy.constraint_mask = constraint_mask[color]; + data_copy.n_cells = n_cells[color]; + data_copy.padding_length = padding_length; + data_copy.row_start = row_start[color]; + + return data_copy; + } + + + + template + void + MatrixFree::free() + { + for (unsigned int i = 0; i < q_points.size(); ++i) + { + if (q_points[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(q_points[i]); + AssertCuda(cuda_error); + q_points[i] = nullptr; + } + } + + for (unsigned int i = 0; i < local_to_global.size(); ++i) + { + if (local_to_global[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(local_to_global[i]); + AssertCuda(cuda_error); + local_to_global[i] = nullptr; + } + } + + for (unsigned int i = 0; i < inv_jacobian.size(); ++i) + { + if (inv_jacobian[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(inv_jacobian[i]); + AssertCuda(cuda_error); + inv_jacobian[i] = nullptr; + } + } + + for (unsigned int i = 0; i < JxW.size(); ++i) + { + if (JxW[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(JxW[i]); + AssertCuda(cuda_error); + JxW[i] = nullptr; + } + } + + for (unsigned int i = 0; i < constraint_mask.size(); ++i) + { + if (constraint_mask[i] != nullptr) + { + cudaError_t cuda_error = cudaFree(constraint_mask[i]); + AssertCuda(cuda_error); + constraint_mask[i] = nullptr; + } + } + + + q_points.clear(); + local_to_global.clear(); + inv_jacobian.clear(); + JxW.clear(); + constraint_mask.clear(); + + if (constrained_dofs != nullptr) + { + cudaError_t cuda_error = cudaFree(constrained_dofs); + AssertCuda(cuda_error); + constrained_dofs = nullptr; + } + } + + + + template + template + void + MatrixFree::copy_constrained_values(const VectorType &src, + VectorType & dst) const + { + static_assert( + std::is_same::value, + "VectorType::value_type and Number should be of the same type."); + if (partitioner) + distributed_copy_constrained_values(src, dst); + else + serial_copy_constrained_values(src, dst); + } + + + + template + template + void + MatrixFree::set_constrained_values(Number val, + VectorType &dst) const + { + static_assert( + std::is_same::value, + "VectorType::value_type and Number should be of the same type."); + if (partitioner) + distributed_set_constrained_values(val, dst); + else + serial_set_constrained_values(val, dst); + } + + + + template + unsigned int + MatrixFree::get_padding_length() const + { + return padding_length; + } + + + + template + template + void + MatrixFree::cell_loop(const functor & func, + const VectorType &src, + VectorType & dst) const + { + if (partitioner) + distributed_cell_loop(func, src, dst); + else + serial_cell_loop(func, src, dst); + } + + + + template + std::size_t + MatrixFree::memory_consumption() const + { + // First compute the size of n_cells, row_starts, kernel launch parameters, + // and constrained_dofs + std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 + + 2 * n_colors * sizeof(dim3) + + n_constrained_dofs * sizeof(unsigned int); + + // For each color, add local_to_global, inv_jacobian, JxW, and q_points. + for (unsigned int i = 0; i < n_colors; ++i) + { + bytes += n_cells[i] * padding_length * sizeof(unsigned int) + + n_cells[i] * padding_length * dim * dim * sizeof(Number) + + n_cells[i] * padding_length * sizeof(Number) + + n_cells[i] * padding_length * sizeof(point_type) + + n_cells[i] * sizeof(unsigned int); + } + + return bytes; + } + + + + template + void + MatrixFree::reinit(const Mapping & mapping, + const DoFHandler & dof_handler, + const AffineConstraints &constraints, + const Quadrature<1> & quad, + std::shared_ptr comm, const AdditionalData additional_data) { if (typeid(Number) == typeid(double)) @@ -568,6 +835,15 @@ namespace CUDAWrappers n_colors = graph.size(); helper.setup_color_arrays(n_colors); + + IndexSet locally_relevant_dofs; + if (comm) + { + DoFTools::extract_locally_relevant_dofs(dof_handler, + locally_relevant_dofs); + partitioner = std_cxx14::make_unique( + dof_handler.locally_owned_dofs(), locally_relevant_dofs, *comm); + } for (unsigned int i = 0; i < n_colors; ++i) { n_cells[i] = graph[i].size(); @@ -575,7 +851,7 @@ namespace CUDAWrappers typename std::vector::iterator cell = graph[i].begin(), end_cell = graph[i].end(); for (unsigned int cell_id = 0; cell != end_cell; ++cell, ++cell_id) - helper.get_cell_data(*cell, cell_id); + helper.get_cell_data(*cell, cell_id, partitioner); helper.alloc_and_copy_arrays(i); } @@ -606,14 +882,33 @@ namespace CUDAWrappers std::vector constrained_dofs_host( n_constrained_dofs); - unsigned int i_constraint = 0; - const unsigned int n_dofs = dof_handler.n_dofs(); - for (unsigned int i = 0; i < n_dofs; ++i) + if (partitioner) + { + const unsigned int n_local_dofs = + locally_relevant_dofs.n_elements(); + unsigned int i_constraint = 0; + for (unsigned int i = 0; i < n_local_dofs; ++i) + { + // is_constrained uses a global dof id but constrained_dofs_host + // works on the local id + if (constraints.is_constrained(partitioner->local_to_global(i))) + { + constrained_dofs_host[i_constraint] = i; + ++i_constraint; + } + } + } + else { - if (constraints.is_constrained(i)) + const unsigned int n_local_dofs = dof_handler.n_dofs(); + unsigned int i_constraint = 0; + for (unsigned int i = 0; i < n_local_dofs; ++i) { - constrained_dofs_host[i_constraint] = i; - ++i_constraint; + if (constraints.is_constrained(i)) + { + constrained_dofs_host[i_constraint] = i; + ++i_constraint; + } } } @@ -634,91 +929,62 @@ namespace CUDAWrappers template - MatrixFree::Data - MatrixFree::get_data(unsigned int color) const + template + void + MatrixFree::serial_cell_loop(const functor & func, + const VectorType &src, + VectorType & dst) const { - Data data_copy; - data_copy.q_points = q_points[color]; - data_copy.local_to_global = local_to_global[color]; - data_copy.inv_jacobian = inv_jacobian[color]; - data_copy.JxW = JxW[color]; - data_copy.constraint_mask = constraint_mask[color]; - data_copy.n_cells = n_cells[color]; - data_copy.padding_length = padding_length; - data_copy.row_start = row_start[color]; - - return data_copy; + // Execute the loop on the cells + for (unsigned int i = 0; i < n_colors; ++i) + internal::apply_kernel_shmem + <<>>(func, + get_data(i), + src.get_values(), + dst.get_values()); } template + template void - MatrixFree::free() + MatrixFree::distributed_cell_loop( + const functor & func, + const LinearAlgebra::distributed::Vector &src, + LinearAlgebra::distributed::Vector &dst) const { - for (unsigned int i = 0; i < q_points.size(); ++i) - { - if (q_points[i] != nullptr) - { - cudaError_t cuda_error = cudaFree(q_points[i]); - AssertCuda(cuda_error); - q_points[i] = nullptr; - } - } - - for (unsigned int i = 0; i < local_to_global.size(); ++i) - { - if (local_to_global[i] != nullptr) - { - cudaError_t cuda_error = cudaFree(local_to_global[i]); - AssertCuda(cuda_error); - local_to_global[i] = nullptr; - } - } - - for (unsigned int i = 0; i < inv_jacobian.size(); ++i) - { - if (inv_jacobian[i] != nullptr) - { - cudaError_t cuda_error = cudaFree(inv_jacobian[i]); - AssertCuda(cuda_error); - inv_jacobian[i] = nullptr; - } - } - - for (unsigned int i = 0; i < JxW.size(); ++i) - { - if (JxW[i] != nullptr) - { - cudaError_t cuda_error = cudaFree(JxW[i]); - AssertCuda(cuda_error); - JxW[i] = nullptr; - } - } + // Create the ghosted source and the ghosted destination + LinearAlgebra::distributed::Vector ghosted_src( + partitioner); + LinearAlgebra::distributed::Vector ghosted_dst( + ghosted_src); + ghosted_src = src; + + // Execute the loop on the cells + for (unsigned int i = 0; i < n_colors; ++i) + internal::apply_kernel_shmem + <<>>(func, + get_data(i), + ghosted_src.get_values(), + ghosted_dst.get_values()); - for (unsigned int i = 0; i < constraint_mask.size(); ++i) - { - if (constraint_mask[i] != nullptr) - { - cudaError_t cuda_error = cudaFree(constraint_mask[i]); - AssertCuda(cuda_error); - constraint_mask[i] = nullptr; - } - } + // Add the ghosted values + ghosted_dst.compress(VectorOperation::add); + dst = ghosted_dst; + } - q_points.clear(); - local_to_global.clear(); - inv_jacobian.clear(); - JxW.clear(); - constraint_mask.clear(); - if (constrained_dofs != nullptr) - { - cudaError_t cuda_error = cudaFree(constrained_dofs); - AssertCuda(cuda_error); - constrained_dofs = nullptr; - } + template + template + void + MatrixFree::distributed_cell_loop( + const functor &, + const LinearAlgebra::CUDAWrappers::Vector &, + LinearAlgebra::CUDAWrappers::Vector &) const + { + Assert(false, ExcInternalError()); } @@ -726,15 +992,15 @@ namespace CUDAWrappers template template void - MatrixFree::copy_constrained_values(const VectorType &src, - VectorType & dst) const + MatrixFree::serial_copy_constrained_values(const VectorType &src, + VectorType &dst) const { - static_assert( - std::is_same::value, - "VectorType::value_type and Number should be of the same type."); + Assert(src.size() == dst.size(), + ExcMessage("src and dst vectors have different size.")); internal::copy_constrained_dofs <<>>(constrained_dofs, n_constrained_dofs, + src.size(), src.get_values(), dst.get_values()); } @@ -742,70 +1008,73 @@ namespace CUDAWrappers template - template void - MatrixFree::set_constrained_values(Number val, - VectorType &dst) const + MatrixFree::distributed_copy_constrained_values( + const LinearAlgebra::distributed::Vector &src, + LinearAlgebra::distributed::Vector &dst) const { - static_assert( - std::is_same::value, - "VectorType::value_type and Number should be of the same type."); - internal::set_constrained_dofs + Assert(src.size() == dst.size(), + ExcMessage("src and dst vectors have different local size.")); + internal::copy_constrained_dofs <<>>(constrained_dofs, n_constrained_dofs, - val, + src.local_size(), + src.get_values(), dst.get_values()); } template - unsigned int - MatrixFree::get_padding_length() const + void + MatrixFree::distributed_copy_constrained_values( + const LinearAlgebra::CUDAWrappers::Vector &, + LinearAlgebra::CUDAWrappers::Vector &) const { - return padding_length; + Assert(false, ExcInternalError()); } template - template + template void - MatrixFree::cell_loop(const functor & func, - const VectorType &src, - VectorType & dst) const + MatrixFree::serial_set_constrained_values(const Number val, + VectorType & dst) const { - for (unsigned int i = 0; i < n_colors; ++i) - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - src.get_values(), - dst.get_values()); + internal::set_constrained_dofs + <<>>(constrained_dofs, + n_constrained_dofs, + dst.size(), + val, + dst.get_values()); } template - std::size_t - MatrixFree::memory_consumption() const + void + MatrixFree::distributed_set_constrained_values( + const Number val, + LinearAlgebra::distributed::Vector &dst) const { - // First compute the size of n_cells, row_starts, kernel launch parameters, - // and constrained_dofs - std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 + - 2 * n_colors * sizeof(dim3) + - n_constrained_dofs * sizeof(unsigned int); + internal::set_constrained_dofs + <<>>(constrained_dofs, + n_constrained_dofs, + dst.local_size(), + val, + dst.get_values()); + } - // For each color, add local_to_global, inv_jacobian, JxW, and q_points. - for (unsigned int i = 0; i < n_colors; ++i) - { - bytes += n_cells[i] * padding_length * sizeof(unsigned int) + - n_cells[i] * padding_length * dim * dim * sizeof(Number) + - n_cells[i] * padding_length * sizeof(Number) + - n_cells[i] * padding_length * sizeof(point_type) + - n_cells[i] * sizeof(unsigned int); - } - return bytes; + + template + void + MatrixFree::distributed_set_constrained_values( + const Number, + LinearAlgebra::CUDAWrappers::Vector &) const + { + Assert(false, ExcInternalError()); } } // namespace CUDAWrappers -- 2.39.5