From f0e6dbd516596fcf6a4ee86e6a2ce52f79d584e5 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Fri, 2 Aug 2024 12:52:32 +0000 Subject: [PATCH] Fix Tpetra with MemorySpace::Default --- examples/step-86/step-86.cc | 2 +- include/deal.II/base/index_set.h | 11 ++- include/deal.II/lac/read_write_vector.h | 24 ++--- .../deal.II/lac/read_write_vector.templates.h | 57 +++++++----- ...nos_tpetra_block_sparse_matrix.templates.h | 5 +- .../trilinos_tpetra_communication_pattern.h | 6 +- .../trilinos_tpetra_sparse_matrix.templates.h | 39 +++++--- include/deal.II/lac/trilinos_tpetra_vector.h | 2 +- .../lac/trilinos_tpetra_vector.templates.h | 53 +++++++---- include/deal.II/lac/vector.templates.h | 16 +++- include/deal.II/lac/vector_element_access.h | 56 ++++++------ source/base/index_set.cc | 66 ++++++++++++-- .../trilinos_tpetra_communication_pattern.cc | 67 +++++++++----- .../lac/trilinos_tpetra_sparsity_pattern.cc | 88 ++++++++++++++++--- 14 files changed, 350 insertions(+), 142 deletions(-) diff --git a/examples/step-86/step-86.cc b/examples/step-86/step-86.cc index cc54612330..3c94ac0b4f 100644 --- a/examples/step-86/step-86.cc +++ b/examples/step-86/step-86.cc @@ -591,7 +591,7 @@ namespace Step86 // the constrained dofs corresponding to hanging nodes (i.e., those for // which the lines of the `current_constraints` contain at least one other // entry), and to the difference between the input vector and the actual - // solution on those constraints that correspond to boundary conditions.  + // solution on those constraints that correspond to boundary conditions. for (const auto &c : current_constraints.get_lines()) if (locally_owned_dofs.is_element(c.index)) { diff --git a/include/deal.II/base/index_set.h b/include/deal.II/base/index_set.h index 9d143f4dd4..d444b11356 100644 --- a/include/deal.II/base/index_set.h +++ b/include/deal.II/base/index_set.h @@ -136,9 +136,10 @@ public: /** * Constructor from a Trilinos Teuchos::RCP. */ + template explicit IndexSet( - const Teuchos::RCP> - &map); + const Teuchos::RCP< + const Tpetra::Map> &map); # endif // DEAL_II_TRILINOS_WITH_TPETRA /** @@ -608,11 +609,13 @@ public: const bool overlapping = false) const; # ifdef DEAL_II_TRILINOS_WITH_TPETRA - Tpetra::Map + template + Tpetra::Map make_tpetra_map(const MPI_Comm communicator = MPI_COMM_WORLD, const bool overlapping = false) const; - Teuchos::RCP> + template + Teuchos::RCP> make_tpetra_map_rcp(const MPI_Comm communicator = MPI_COMM_WORLD, const bool overlapping = false) const; # endif diff --git a/include/deal.II/lac/read_write_vector.h b/include/deal.II/lac/read_write_vector.h index 53598dfbb0..9657dd1c06 100644 --- a/include/deal.II/lac/read_write_vector.h +++ b/include/deal.II/lac/read_write_vector.h @@ -417,25 +417,25 @@ namespace LinearAlgebra * communication pattern is used multiple times. This can be used to improve * performance. */ - template + template std::enable_if_t && dealii::is_tpetra_type::value> import_elements( - const TpetraWrappers::Vector &tpetra_vec, - VectorOperation::values operation, + const TpetraWrappers::Vector &tpetra_vec, + VectorOperation::values operation, const std::shared_ptr &communication_pattern = {}); /** * @deprecated Use import_elements() instead. */ - template + template DEAL_II_DEPRECATED std::enable_if_t && dealii::is_tpetra_type::value> - import(const TpetraWrappers::Vector &V, - VectorOperation::values operation, - const std::shared_ptr - &communication_pattern = {}) + import(const TpetraWrappers::Vector &V, + VectorOperation::values operation, + const std::shared_ptr + &communication_pattern = {}) { import_elements(V, operation, communication_pattern); } @@ -741,11 +741,12 @@ namespace LinearAlgebra * vector @p tpetra_vector. This is an helper function and it should not be * used directly. */ - template + template std::enable_if_t && dealii::is_tpetra_type::value> import_elements( - const Tpetra::Vector + const Tpetra:: + Vector &tpetra_vector, const IndexSet &locally_owned_elements, VectorOperation::values operation, @@ -781,7 +782,8 @@ namespace LinearAlgebra * Return a TpetraWrappers::CommunicationPattern and store it for future * use. */ - TpetraWrappers::CommunicationPattern + template + TpetraWrappers::CommunicationPattern create_tpetra_comm_pattern(const IndexSet &source_index_set, const MPI_Comm mpi_comm); # endif diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index 0a252f6237..ef386c0fbf 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -509,18 +509,24 @@ namespace LinearAlgebra #ifdef DEAL_II_TRILINOS_WITH_TPETRA template - template + template std::enable_if_t && dealii::is_tpetra_type::value> ReadWriteVector::import_elements( - const Tpetra::Vector &vector, + const Tpetra::Vector + &vector, const IndexSet &source_elements, VectorOperation::values operation, const MPI_Comm mpi_comm, const std::shared_ptr &communication_pattern) { - std::shared_ptr + using MemorySpace = std::conditional_t< + std::is_same_v, + dealii::MemorySpace::Host, + dealii::MemorySpace::Default>; + + std::shared_ptr> tpetra_comm_pattern; // If no communication pattern is given, create one. Otherwise, use the one @@ -534,33 +540,35 @@ namespace LinearAlgebra (source_elements == source_stored_elements)) { tpetra_comm_pattern = std::dynamic_pointer_cast< - const TpetraWrappers::CommunicationPattern>(comm_pattern); + const TpetraWrappers::CommunicationPattern>( + comm_pattern); if (tpetra_comm_pattern == nullptr) - tpetra_comm_pattern = - std::make_shared( - create_tpetra_comm_pattern(source_elements, mpi_comm)); + tpetra_comm_pattern = std::make_shared< + const TpetraWrappers::CommunicationPattern>( + create_tpetra_comm_pattern(source_elements, + mpi_comm)); } else - tpetra_comm_pattern = - std::make_shared( - create_tpetra_comm_pattern(source_elements, mpi_comm)); + tpetra_comm_pattern = std::make_shared< + const TpetraWrappers::CommunicationPattern>( + create_tpetra_comm_pattern(source_elements, mpi_comm)); } else { - tpetra_comm_pattern = - std::dynamic_pointer_cast( - communication_pattern); + tpetra_comm_pattern = std::dynamic_pointer_cast< + const TpetraWrappers::CommunicationPattern>( + communication_pattern); AssertThrow(tpetra_comm_pattern != nullptr, ExcMessage( std::string("The communication pattern is not of type ") + "LinearAlgebra::TpetraWrappers::CommunicationPattern.")); } - Tpetra::Export tpetra_export( + Tpetra::Export tpetra_export( tpetra_comm_pattern->get_tpetra_export()); - Tpetra::Vector target_vector( - tpetra_export.getSourceMap()); + Tpetra::Vector + target_vector(tpetra_export.getSourceMap()); // Communicate the vector to the correct map. // Remark: We use here doImport on an Export object since we have to use @@ -807,12 +815,13 @@ namespace LinearAlgebra # ifdef DEAL_II_TRILINOS_WITH_TPETRA template - template + template std::enable_if_t && dealii::is_tpetra_type::value> ReadWriteVector::import_elements( - const LinearAlgebra::TpetraWrappers::Vector &trilinos_vec, - VectorOperation::values operation, + const LinearAlgebra::TpetraWrappers::Vector + &trilinos_vec, + VectorOperation::values operation, const std::shared_ptr &communication_pattern) { @@ -990,16 +999,18 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_TRILINOS # ifdef DEAL_II_TRILINOS_WITH_TPETRA template - TpetraWrappers::CommunicationPattern + template + TpetraWrappers::CommunicationPattern ReadWriteVector::create_tpetra_comm_pattern( const IndexSet &source_index_set, const MPI_Comm mpi_comm) { source_stored_elements = source_index_set; - TpetraWrappers::CommunicationPattern tpetra_comm_pattern( - source_stored_elements, stored_elements, mpi_comm); - comm_pattern = std::make_shared( + TpetraWrappers::CommunicationPattern tpetra_comm_pattern( source_stored_elements, stored_elements, mpi_comm); + comm_pattern = + std::make_shared>( + source_stored_elements, stored_elements, mpi_comm); return tpetra_comm_pattern; } diff --git a/include/deal.II/lac/trilinos_tpetra_block_sparse_matrix.templates.h b/include/deal.II/lac/trilinos_tpetra_block_sparse_matrix.templates.h index e80102f439..c1a32f66b5 100644 --- a/include/deal.II/lac/trilinos_tpetra_block_sparse_matrix.templates.h +++ b/include/deal.II/lac/trilinos_tpetra_block_sparse_matrix.templates.h @@ -102,7 +102,10 @@ namespace LinearAlgebra std::vector> tpetra_maps; for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i) tpetra_maps.push_back( - parallel_partitioning[i].make_tpetra_map(communicator, false)); + parallel_partitioning[i] + .template make_tpetra_map< + typename TpetraTypes::NodeType>(communicator, + false)); Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_rows(), ExcDimensionMismatch(tpetra_maps.size(), diff --git a/include/deal.II/lac/trilinos_tpetra_communication_pattern.h b/include/deal.II/lac/trilinos_tpetra_communication_pattern.h index ed276e12d9..4c0e3512d4 100644 --- a/include/deal.II/lac/trilinos_tpetra_communication_pattern.h +++ b/include/deal.II/lac/trilinos_tpetra_communication_pattern.h @@ -25,6 +25,7 @@ #ifdef DEAL_II_TRILINOS_WITH_TPETRA # include +# include # include # include @@ -40,10 +41,13 @@ namespace LinearAlgebra /** * This class implements a wrapper to Tpetra::Import and Tpetra::Export. */ + template class CommunicationPattern : public Utilities::MPI::CommunicationPatternBase { + static_assert(std::is_same_v || + std::is_same_v); + public: - using MemorySpace = dealii::MemorySpace::Host; /** * Initialize the communication pattern. * diff --git a/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h index 560b3b98b3..db4cf6a1f6 100644 --- a/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h +++ b/include/deal.II/lac/trilinos_tpetra_sparse_matrix.templates.h @@ -166,10 +166,14 @@ namespace LinearAlgebra // Get the Tpetra::Maps Teuchos::RCP> row_space_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); column_space_map = - column_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + column_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); if (column_space_map->getComm()->getRank() == 0) { @@ -280,10 +284,14 @@ namespace LinearAlgebra // Get the Tpetra::Maps Teuchos::RCP> row_space_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); column_space_map = - column_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + column_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); if (column_space_map->getComm()->getRank() == 0) { @@ -579,7 +587,8 @@ namespace LinearAlgebra const MPI_Comm communicator, const unsigned int n_max_entries_per_row) : column_space_map( - parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + parallel_partitioning.template make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, false)) , matrix(Utilities::Trilinos::internal::make_rcp< TpetraTypes::MatrixType>( column_space_map, @@ -595,7 +604,8 @@ namespace LinearAlgebra const MPI_Comm communicator, const std::vector &n_entries_per_row) : column_space_map( - parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + parallel_partitioning.template make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, false)) , compressed(false) { Teuchos::Array n_entries_per_row_array(n_entries_per_row.begin(), @@ -620,10 +630,12 @@ namespace LinearAlgebra const MPI_Comm communicator, const size_type n_max_entries_per_row) : column_space_map( - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + col_parallel_partitioning.template make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, false)) , matrix(Utilities::Trilinos::internal::make_rcp< TpetraTypes::MatrixType>( - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false), + row_parallel_partitioning.template make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, false), n_max_entries_per_row)) , compressed(false) {} @@ -637,7 +649,8 @@ namespace LinearAlgebra const MPI_Comm communicator, const std::vector &n_entries_per_row) : column_space_map( - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false)) + col_parallel_partitioning.template make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, false)) , compressed(false) { Teuchos::Array n_entries_per_row_array(n_entries_per_row.begin(), @@ -645,12 +658,16 @@ namespace LinearAlgebra # if DEAL_II_TRILINOS_VERSION_GTE(12, 16, 0) matrix = Utilities::Trilinos::internal::make_rcp< TpetraTypes::MatrixType>( - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false), + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false), n_entries_per_row_array); # else matrix = Utilities::Trilinos::internal::make_rcp< TpetraTypes::MatrixType>( - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false), + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false), Teuchos::arcpFromArray(n_entries_per_row_array)); # endif } diff --git a/include/deal.II/lac/trilinos_tpetra_vector.h b/include/deal.II/lac/trilinos_tpetra_vector.h index ec5f4b2a21..1c1e668264 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.h @@ -1063,7 +1063,7 @@ namespace LinearAlgebra * CommunicationPattern for the communication between the * source_stored_elements IndexSet and the current vector. */ - Teuchos::RCP + Teuchos::RCP> tpetra_comm_pattern; // Make the reference class a friend. diff --git a/include/deal.II/lac/trilinos_tpetra_vector.templates.h b/include/deal.II/lac/trilinos_tpetra_vector.templates.h index 4650e8e28a..98f3a318b8 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.templates.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.templates.h @@ -125,7 +125,8 @@ namespace LinearAlgebra , has_ghost(false) , vector(Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - parallel_partitioner.make_tpetra_map_rcp(communicator, true))) + parallel_partitioner.make_tpetra_map_rcp< + TpetraTypes::NodeType>(communicator, true))) {} @@ -144,14 +145,18 @@ namespace LinearAlgebra vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - parallel_partitioner.make_tpetra_map_rcp(communicator, true)); + parallel_partitioner + .template make_tpetra_map_rcp>( + communicator, true)); compressed = true; } else { Teuchos::RCP> map = - locally_owned_entries.make_tpetra_map_rcp(communicator, false); + locally_owned_entries + .template make_tpetra_map_rcp>( + communicator, false); vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>(map); @@ -159,7 +164,9 @@ namespace LinearAlgebra nonlocal_entries.subtract_set(locally_owned_entries); nonlocal_vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - nonlocal_entries.make_tpetra_map_rcp(communicator, true)); + nonlocal_entries + .template make_tpetra_map_rcp>( + communicator, true)); compressed = false; } @@ -206,7 +213,9 @@ namespace LinearAlgebra has_ghost = false; vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - parallel_partitioner.make_tpetra_map_rcp(communicator, true)); + parallel_partitioner + .template make_tpetra_map_rcp>( + communicator, true)); } @@ -230,14 +239,18 @@ namespace LinearAlgebra vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - parallel_partitioner.make_tpetra_map_rcp(communicator, true)); + parallel_partitioner + .template make_tpetra_map_rcp>( + communicator, true)); compressed = true; } else { Teuchos::RCP> map = - locally_owned_entries.make_tpetra_map_rcp(communicator, false); + locally_owned_entries + .template make_tpetra_map_rcp>( + communicator, false); if (!vector->getMap()->isSameAs(*map)) { @@ -253,7 +266,9 @@ namespace LinearAlgebra nonlocal_vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - nonlocal_entries.make_tpetra_map_rcp(communicator, true)); + nonlocal_entries + .template make_tpetra_map_rcp>( + communicator, true)); compressed = false; } @@ -451,7 +466,9 @@ namespace LinearAlgebra Teuchos::Array vector_data(V.begin(), V.end()); vector = Utilities::Trilinos::internal::make_rcp< TpetraTypes::VectorType>( - V.locally_owned_elements().make_tpetra_map_rcp(), vector_data); + V.locally_owned_elements() + .template make_tpetra_map_rcp>(), + vector_data); has_ghost = false; compressed = true; @@ -511,7 +528,8 @@ namespace LinearAlgebra else { tpetra_comm_pattern = Teuchos::rcp_dynamic_cast< - const TpetraWrappers::CommunicationPattern>(communication_pattern); + const TpetraWrappers::CommunicationPattern>( + communication_pattern); AssertThrow( !tpetra_comm_pattern.is_null(), @@ -543,9 +561,7 @@ namespace LinearAlgebra for (size_t k = 0; k < localLength; ++k) x_1d(k) = *values_it++; # if !DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0) - source_vector.template sync< - typename Tpetra::Vector:: - device_type::memory_space>(); + source_vector.template sync(); # endif } Tpetra::CombineMode tpetra_operation = Tpetra::ZERO; @@ -630,8 +646,11 @@ namespace LinearAlgebra // TODO: Tpetra doesn't have a combine mode that also updates local // elements, maybe there is a better workaround. - Tpetra::Vector dummy( - vector->getMap(), false); + Tpetra::Vector> + dummy(vector->getMap(), false); TpetraTypes::ImportType data_exchange( V.trilinos_vector().getMap(), dummy.getMap()); dummy.doImport(V.trilinos_vector(), data_exchange, Tpetra::INSERT); @@ -738,9 +757,7 @@ namespace LinearAlgebra vector_1d(k) += a; } # if !DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0) - vector->template sync< - typename Tpetra::Vector:: - device_type::memory_space>(); + vector->template sync(); # endif } diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 42388b3eed..481245c874 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -200,8 +200,12 @@ Vector::Vector( // that know about the original vector. LinearAlgebra::TpetraWrappers::TpetraTypes::VectorType - localized_vector(complete_index_set(size()).make_tpetra_map_rcp(), - v.get_mpi_communicator()); + localized_vector( + complete_index_set(size()) + .template make_tpetra_map_rcp< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType< + MemorySpace>>(), + v.get_mpi_communicator()); Teuchos::RCP> @@ -890,8 +894,12 @@ Vector::operator=( // that know about the original vector. LinearAlgebra::TpetraWrappers::TpetraTypes::VectorType - localized_vector(complete_index_set(size()).make_tpetra_map_rcp(), - v.get_mpi_communicator()); + localized_vector( + complete_index_set(size()) + .template make_tpetra_map_rcp< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType< + MemorySpace>>(), + v.get_mpi_communicator()); Teuchos::RCP> diff --git a/include/deal.II/lac/vector_element_access.h b/include/deal.II/lac/vector_element_access.h index 6790a15dca..07b142e2bb 100644 --- a/include/deal.II/lac/vector_element_access.h +++ b/include/deal.II/lac/vector_element_access.h @@ -127,11 +127,13 @@ namespace internal # ifdef DEAL_II_TRILINOS_WITH_TPETRA - template - struct ElementAccess> + template + struct ElementAccess< + LinearAlgebra::TpetraWrappers::Vector> { public: - using VectorType = LinearAlgebra::TpetraWrappers::Vector; + using VectorType = + LinearAlgebra::TpetraWrappers::Vector; static void add(const typename VectorType::value_type value, const types::global_dof_index i, @@ -148,16 +150,16 @@ namespace internal - template + template inline void - ElementAccess>::add( - const typename VectorType::value_type value, - const types::global_dof_index i, - LinearAlgebra::TpetraWrappers::Vector &V) + ElementAccess< + LinearAlgebra::TpetraWrappers::Vector>:: + add(const typename VectorType::value_type value, + const types::global_dof_index i, + LinearAlgebra::TpetraWrappers::Vector &V) { // Extract local indices in the vector. - Tpetra::Vector vector = - V.trilinos_vector(); + auto vector = V.trilinos_vector(); TrilinosWrappers::types::int_type trilinos_i = vector.getMap()->getLocalElement( static_cast(i)); @@ -184,16 +186,16 @@ namespace internal - template + template inline void - ElementAccess>::set( - const typename VectorType::value_type value, - const types::global_dof_index i, - LinearAlgebra::TpetraWrappers::Vector &V) + ElementAccess< + LinearAlgebra::TpetraWrappers::Vector>:: + set(const typename VectorType::value_type value, + const types::global_dof_index i, + LinearAlgebra::TpetraWrappers::Vector &V) { // Extract local indices in the vector. - Tpetra::Vector vector = - V.trilinos_vector(); + auto vector = V.trilinos_vector(); TrilinosWrappers::types::int_type trilinos_i = vector.getMap()->getLocalElement( static_cast(i)); @@ -220,21 +222,21 @@ namespace internal - template - inline typename LinearAlgebra::TpetraWrappers::Vector::value_type - ElementAccess>::get( - const LinearAlgebra::TpetraWrappers::Vector &V, - const types::global_dof_index i) + template + inline typename LinearAlgebra::TpetraWrappers::Vector::value_type + ElementAccess< + LinearAlgebra::TpetraWrappers::Vector>:: + get(const LinearAlgebra::TpetraWrappers::Vector &V, + const types::global_dof_index i) { // Extract local indices in the vector. # if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0) - const Tpetra::Vector - &vector = V.trilinos_vector(); - auto vector_2d = + const auto &vector = V.trilinos_vector(); + auto vector_2d = vector.template getLocalView(Tpetra::Access::ReadOnly); # else - Tpetra::Vector vector = - V.trilinos_vector(); + auto vector = V.trilinos_vector(); vector.template sync(); auto vector_2d = vector.template getLocalView(); # endif diff --git a/source/base/index_set.cc b/source/base/index_set.cc index c9fff8ec56..9d8a7c801c 100644 --- a/source/base/index_set.cc +++ b/source/base/index_set.cc @@ -17,6 +17,7 @@ #include #include +#include #include @@ -39,9 +40,10 @@ DEAL_II_NAMESPACE_OPEN # ifdef DEAL_II_TRILINOS_WITH_TPETRA +template IndexSet::IndexSet( - const Teuchos::RCP> - &map) + const Teuchos::RCP< + const Tpetra::Map> &map) : is_compressed(true) , index_space_size(1 + map->getMaxAllGlobalIndex()) , largest_range(numbers::invalid_unsigned_int) @@ -950,16 +952,18 @@ IndexSet::fill_index_vector(std::vector &indices) const #ifdef DEAL_II_WITH_TRILINOS # ifdef DEAL_II_TRILINOS_WITH_TPETRA -Tpetra::Map +template +Tpetra::Map IndexSet::make_tpetra_map(const MPI_Comm communicator, const bool overlapping) const { - return *make_tpetra_map_rcp(communicator, overlapping); + return *make_tpetra_map_rcp(communicator, overlapping); } -Teuchos::RCP> +template +Teuchos::RCP> IndexSet::make_tpetra_map_rcp(const MPI_Comm communicator, const bool overlapping) const { @@ -996,7 +1000,7 @@ IndexSet::make_tpetra_map_rcp(const MPI_Comm communicator, overlapping ? false : is_ascending_and_one_to_one(communicator); if (linear) return Utilities::Trilinos::internal::make_rcp< - Tpetra::Map>( + Tpetra::Map>( size(), n_elements(), 0, @@ -1016,7 +1020,7 @@ IndexSet::make_tpetra_map_rcp(const MPI_Comm communicator, int_indices); return Utilities::Trilinos::internal::make_rcp< - Tpetra::Map>( + Tpetra::Map>( size(), arr_view, 0, @@ -1197,6 +1201,54 @@ IndexSet::memory_consumption() const sizeof(compress_mutex)); } +// explicit template instantiations +#ifdef DEAL_II_WITH_TRILINOS + +# ifdef DEAL_II_TRILINOS_WITH_TPETRA + +template IndexSet::IndexSet( + const Teuchos::RCP>> + &); +template IndexSet::IndexSet( + const Teuchos::RCP>> + &); + +template LinearAlgebra::TpetraWrappers::TpetraTypes::MapType +dealii::IndexSet::make_tpetra_map< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType>( + int, + bool) const; +template LinearAlgebra::TpetraWrappers::TpetraTypes::MapType< + MemorySpace::Default> +dealii::IndexSet::make_tpetra_map< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType>( + int, + bool) const; + +template Teuchos::RCP< + LinearAlgebra::TpetraWrappers::TpetraTypes::MapType> +dealii::IndexSet::make_tpetra_map_rcp< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType>( + int, + bool) const; +template Teuchos::RCP< + LinearAlgebra::TpetraWrappers::TpetraTypes::MapType> +dealii::IndexSet::make_tpetra_map_rcp< + LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType>( + int, + bool) const; + + +# endif + +#endif + DEAL_II_NAMESPACE_CLOSE diff --git a/source/lac/trilinos_tpetra_communication_pattern.cc b/source/lac/trilinos_tpetra_communication_pattern.cc index a4dd925139..897d650656 100644 --- a/source/lac/trilinos_tpetra_communication_pattern.cc +++ b/source/lac/trilinos_tpetra_communication_pattern.cc @@ -31,7 +31,8 @@ namespace LinearAlgebra { namespace TpetraWrappers { - CommunicationPattern::CommunicationPattern( + template + CommunicationPattern::CommunicationPattern( const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) @@ -39,78 +40,102 @@ namespace LinearAlgebra // virtual functions called in constructors and destructors never use the // override in a derived class // for clarity be explicit on which function is called - CommunicationPattern::reinit(locally_owned_indices, - ghost_indices, - communicator); + CommunicationPattern::reinit(locally_owned_indices, + ghost_indices, + communicator); } + template void - CommunicationPattern::reinit(const IndexSet &locally_owned_indices, - const IndexSet &ghost_indices, - const MPI_Comm communicator) + CommunicationPattern::reinit( + const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm communicator) { comm = Teuchos::rcpFromUndefRef(communicator); auto vector_space_vector_map = - locally_owned_indices.make_tpetra_map_rcp(*comm, false); + locally_owned_indices + .template make_tpetra_map_rcp>( + *comm, false); auto read_write_vector_map = - ghost_indices.make_tpetra_map_rcp(*comm, true); + ghost_indices + .template make_tpetra_map_rcp>( + *comm, true); // Target map is read_write_vector_map // Source map is vector_space_vector_map. This map must have uniquely // owned GID. tpetra_import = - Teuchos::rcp(new Tpetra::Import( + Teuchos::rcp(new Tpetra::Import>( read_write_vector_map, vector_space_vector_map)); tpetra_export = - Teuchos::rcp(new Tpetra::Export( + Teuchos::rcp(new Tpetra::Export>( read_write_vector_map, vector_space_vector_map)); } + template MPI_Comm - CommunicationPattern::get_mpi_communicator() const + CommunicationPattern::get_mpi_communicator() const { return *comm; } - const Tpetra::Import & - CommunicationPattern::get_tpetra_import() const + template + const Tpetra::Import> & + CommunicationPattern::get_tpetra_import() const { return *tpetra_import; } - - Teuchos::RCP> - CommunicationPattern::get_tpetra_import_rcp() const + template + Teuchos::RCP> + CommunicationPattern::get_tpetra_import_rcp() const { return tpetra_import; } - const Tpetra::Export & - CommunicationPattern::get_tpetra_export() const + template + const Tpetra::Export> & + CommunicationPattern::get_tpetra_export() const { return *tpetra_export; } - Teuchos::RCP> - CommunicationPattern::get_tpetra_export_rcp() const + template + Teuchos::RCP> + CommunicationPattern::get_tpetra_export_rcp() const { return tpetra_export; } } // namespace TpetraWrappers } // namespace LinearAlgebra + +template class LinearAlgebra::TpetraWrappers::CommunicationPattern< + MemorySpace::Default>; +template class LinearAlgebra::TpetraWrappers::CommunicationPattern< + MemorySpace::Host>; + DEAL_II_NAMESPACE_CLOSE #endif diff --git a/source/lac/trilinos_tpetra_sparsity_pattern.cc b/source/lac/trilinos_tpetra_sparsity_pattern.cc index 0337c3e65b..d27cba16f0 100644 --- a/source/lac/trilinos_tpetra_sparsity_pattern.cc +++ b/source/lac/trilinos_tpetra_sparsity_pattern.cc @@ -471,7 +471,9 @@ namespace LinearAlgebra SparsityPatternBase::resize(parallel_partitioning.size(), parallel_partitioning.size()); Teuchos::RCP> map = - parallel_partitioning.make_tpetra_map_rcp(communicator, false); + parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp( map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph); } @@ -488,7 +490,9 @@ namespace LinearAlgebra SparsityPatternBase::resize(parallel_partitioning.size(), parallel_partitioning.size()); Teuchos::RCP> map = - parallel_partitioning.make_tpetra_map_rcp(communicator, false); + parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp( map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph); } @@ -506,9 +510,13 @@ namespace LinearAlgebra SparsityPatternBase::resize(row_parallel_partitioning.size(), col_parallel_partitioning.size()); Teuchos::RCP> row_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); Teuchos::RCP> col_map = - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + col_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp(row_map, col_map, n_entries_per_row, @@ -530,9 +538,13 @@ namespace LinearAlgebra SparsityPatternBase::resize(row_parallel_partitioning.size(), col_parallel_partitioning.size()); Teuchos::RCP> row_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); Teuchos::RCP> col_map = - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + col_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp(row_map, col_map, n_entries_per_row, @@ -555,9 +567,13 @@ namespace LinearAlgebra SparsityPatternBase::resize(row_parallel_partitioning.size(), col_parallel_partitioning.size()); Teuchos::RCP> row_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); Teuchos::RCP> col_map = - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + col_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp(row_map, col_map, n_entries_per_row, @@ -581,7 +597,9 @@ namespace LinearAlgebra if (Utilities::MPI::n_mpi_processes(communicator) > 1) { Teuchos::RCP> nonlocal_map = - nonlocal_partitioner.make_tpetra_map_rcp(communicator, true); + nonlocal_partitioner + .template make_tpetra_map_rcp>( + communicator, true); nonlocal_graph = Utilities::Trilinos::internal::make_rcp< TpetraTypes::GraphType>(nonlocal_map, col_map, 0); } @@ -604,9 +622,13 @@ namespace LinearAlgebra SparsityPatternBase::resize(row_parallel_partitioning.size(), col_parallel_partitioning.size()); Teuchos::RCP> row_map = - row_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + row_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); Teuchos::RCP> col_map = - col_parallel_partitioning.make_tpetra_map_rcp(communicator, false); + col_parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp( row_map, col_map, @@ -635,7 +657,9 @@ namespace LinearAlgebra SparsityPatternBase::resize(parallel_partitioning.size(), parallel_partitioning.size()); Teuchos::RCP> map = - parallel_partitioning.make_tpetra_map_rcp(communicator, false); + parallel_partitioning + .template make_tpetra_map_rcp>( + communicator, false); SparsityPatternImpl::reinit_sp( map, map, @@ -1113,6 +1137,46 @@ namespace LinearAlgebra const dealii::DynamicSparsityPattern &, const MPI_Comm, bool); + + + template class SparsityPattern; + + template void + SparsityPattern::copy_from( + const dealii::SparsityPattern &); + template void + SparsityPattern::copy_from( + const dealii::DynamicSparsityPattern &); + + template void + SparsityPattern::reinit( + const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm, + bool); + template void + SparsityPattern::reinit( + const IndexSet &, + const dealii::DynamicSparsityPattern &, + const MPI_Comm, + bool); + + + template void + SparsityPattern::reinit( + const IndexSet &, + const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm, + bool); + template void + SparsityPattern::reinit( + const IndexSet &, + const IndexSet &, + const dealii::DynamicSparsityPattern &, + const MPI_Comm, + bool); + # endif } // namespace TpetraWrappers -- 2.39.5