/**
* Constructor from a Trilinos Teuchos::RCP<Tpetra::Map>.
*/
+ template <typename NodeType>
explicit IndexSet(
- const Teuchos::RCP<const Tpetra::Map<int, types::signed_global_dof_index>>
- &map);
+ const Teuchos::RCP<
+ const Tpetra::Map<int, types::signed_global_dof_index, NodeType>> &map);
# endif // DEAL_II_TRILINOS_WITH_TPETRA
/**
const bool overlapping = false) const;
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
- Tpetra::Map<int, types::signed_global_dof_index>
+ template <typename NodeType>
+ Tpetra::Map<int, types::signed_global_dof_index, NodeType>
make_tpetra_map(const MPI_Comm communicator = MPI_COMM_WORLD,
const bool overlapping = false) const;
- Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index>>
+ template <typename NodeType>
+ Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index, NodeType>>
make_tpetra_map_rcp(const MPI_Comm communicator = MPI_COMM_WORLD,
const bool overlapping = false) const;
# endif
* communication pattern is used multiple times. This can be used to improve
* performance.
*/
- template <typename Dummy = Number>
+ template <typename MemorySpace, typename Dummy = Number>
std::enable_if_t<std::is_same_v<Dummy, Number> &&
dealii::is_tpetra_type<Number>::value>
import_elements(
- const TpetraWrappers::Vector<Number> &tpetra_vec,
- VectorOperation::values operation,
+ const TpetraWrappers::Vector<Number, MemorySpace> &tpetra_vec,
+ VectorOperation::values operation,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern = {});
/**
* @deprecated Use import_elements() instead.
*/
- template <typename Dummy = Number>
+ template <typename MemorySpace, typename Dummy = Number>
DEAL_II_DEPRECATED std::enable_if_t<std::is_same_v<Dummy, Number> &&
dealii::is_tpetra_type<Number>::value>
- import(const TpetraWrappers::Vector<Number> &V,
- VectorOperation::values operation,
- const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
- &communication_pattern = {})
+ import(const TpetraWrappers::Vector<Number, MemorySpace> &V,
+ VectorOperation::values operation,
+ const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
+ &communication_pattern = {})
{
import_elements(V, operation, communication_pattern);
}
* vector @p tpetra_vector. This is an helper function and it should not be
* used directly.
*/
- template <typename Dummy = Number>
+ template <typename MemorySpace, typename Dummy = Number>
std::enable_if_t<std::is_same_v<Dummy, Number> &&
dealii::is_tpetra_type<Number>::value>
import_elements(
- const Tpetra::Vector<Number, int, types::signed_global_dof_index>
+ const Tpetra::
+ Vector<Number, int, types::signed_global_dof_index, MemorySpace>
&tpetra_vector,
const IndexSet &locally_owned_elements,
VectorOperation::values operation,
* Return a TpetraWrappers::CommunicationPattern and store it for future
* use.
*/
- TpetraWrappers::CommunicationPattern
+ template <typename MemorySpace = dealii::MemorySpace::Host>
+ TpetraWrappers::CommunicationPattern<MemorySpace>
create_tpetra_comm_pattern(const IndexSet &source_index_set,
const MPI_Comm mpi_comm);
# endif
#ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
- template <typename Dummy>
+ template <typename NodeType, typename Dummy>
std::enable_if_t<std::is_same_v<Dummy, Number> &&
dealii::is_tpetra_type<Number>::value>
ReadWriteVector<Number>::import_elements(
- const Tpetra::Vector<Number, int, types::signed_global_dof_index> &vector,
+ const Tpetra::Vector<Number, int, types::signed_global_dof_index, NodeType>
+ &vector,
const IndexSet &source_elements,
VectorOperation::values operation,
const MPI_Comm mpi_comm,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern)
{
- std::shared_ptr<const TpetraWrappers::CommunicationPattern>
+ using MemorySpace = std::conditional_t<
+ std::is_same_v<typename NodeType::memory_space, Kokkos::HostSpace>,
+ dealii::MemorySpace::Host,
+ dealii::MemorySpace::Default>;
+
+ std::shared_ptr<const TpetraWrappers::CommunicationPattern<MemorySpace>>
tpetra_comm_pattern;
// If no communication pattern is given, create one. Otherwise, use the one
(source_elements == source_stored_elements))
{
tpetra_comm_pattern = std::dynamic_pointer_cast<
- const TpetraWrappers::CommunicationPattern>(comm_pattern);
+ const TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ comm_pattern);
if (tpetra_comm_pattern == nullptr)
- tpetra_comm_pattern =
- std::make_shared<const TpetraWrappers::CommunicationPattern>(
- create_tpetra_comm_pattern(source_elements, mpi_comm));
+ tpetra_comm_pattern = std::make_shared<
+ const TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ create_tpetra_comm_pattern<MemorySpace>(source_elements,
+ mpi_comm));
}
else
- tpetra_comm_pattern =
- std::make_shared<const TpetraWrappers::CommunicationPattern>(
- create_tpetra_comm_pattern(source_elements, mpi_comm));
+ tpetra_comm_pattern = std::make_shared<
+ const TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ create_tpetra_comm_pattern<MemorySpace>(source_elements, mpi_comm));
}
else
{
- tpetra_comm_pattern =
- std::dynamic_pointer_cast<const TpetraWrappers::CommunicationPattern>(
- communication_pattern);
+ tpetra_comm_pattern = std::dynamic_pointer_cast<
+ const TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ communication_pattern);
AssertThrow(tpetra_comm_pattern != nullptr,
ExcMessage(
std::string("The communication pattern is not of type ") +
"LinearAlgebra::TpetraWrappers::CommunicationPattern."));
}
- Tpetra::Export<int, types::signed_global_dof_index> tpetra_export(
+ Tpetra::Export<int, types::signed_global_dof_index, NodeType> tpetra_export(
tpetra_comm_pattern->get_tpetra_export());
- Tpetra::Vector<Number, int, types::signed_global_dof_index> target_vector(
- tpetra_export.getSourceMap());
+ Tpetra::Vector<Number, int, types::signed_global_dof_index, NodeType>
+ target_vector(tpetra_export.getSourceMap());
// Communicate the vector to the correct map.
// Remark: We use here doImport on an Export object since we have to use
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
- template <typename Dummy>
+ template <typename MemorySpace, typename Dummy>
std::enable_if_t<std::is_same_v<Dummy, Number> &&
dealii::is_tpetra_type<Number>::value>
ReadWriteVector<Number>::import_elements(
- const LinearAlgebra::TpetraWrappers::Vector<Number> &trilinos_vec,
- VectorOperation::values operation,
+ const LinearAlgebra::TpetraWrappers::Vector<Number, MemorySpace>
+ &trilinos_vec,
+ VectorOperation::values operation,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern)
{
#ifdef DEAL_II_WITH_TRILINOS
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
- TpetraWrappers::CommunicationPattern
+ template <typename MemorySpace>
+ TpetraWrappers::CommunicationPattern<MemorySpace>
ReadWriteVector<Number>::create_tpetra_comm_pattern(
const IndexSet &source_index_set,
const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
- TpetraWrappers::CommunicationPattern tpetra_comm_pattern(
- source_stored_elements, stored_elements, mpi_comm);
- comm_pattern = std::make_shared<TpetraWrappers::CommunicationPattern>(
+ TpetraWrappers::CommunicationPattern<MemorySpace> tpetra_comm_pattern(
source_stored_elements, stored_elements, mpi_comm);
+ comm_pattern =
+ std::make_shared<TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ source_stored_elements, stored_elements, mpi_comm);
return tpetra_comm_pattern;
}
std::vector<typename TpetraTypes::MapType<MemorySpace>> tpetra_maps;
for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
tpetra_maps.push_back(
- parallel_partitioning[i].make_tpetra_map(communicator, false));
+ parallel_partitioning[i]
+ .template make_tpetra_map<
+ typename TpetraTypes::NodeType<MemorySpace>>(communicator,
+ false));
Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_rows(),
ExcDimensionMismatch(tpetra_maps.size(),
#ifdef DEAL_II_TRILINOS_WITH_TPETRA
# include <deal.II/base/communication_pattern_base.h>
+# include <deal.II/base/memory_space.h>
# include <Tpetra_Export.hpp>
# include <Tpetra_Import.hpp>
/**
* This class implements a wrapper to Tpetra::Import and Tpetra::Export.
*/
+ template <typename MemorySpace = dealii::MemorySpace::Host>
class CommunicationPattern : public Utilities::MPI::CommunicationPatternBase
{
+ static_assert(std::is_same_v<MemorySpace, dealii::MemorySpace::Default> ||
+ std::is_same_v<MemorySpace, dealii::MemorySpace::Host>);
+
public:
- using MemorySpace = dealii::MemorySpace::Host;
/**
* Initialize the communication pattern.
*
// Get the Tpetra::Maps
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_space_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
column_space_map =
- column_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ column_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
if (column_space_map->getComm()->getRank() == 0)
{
// Get the Tpetra::Maps
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_space_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
column_space_map =
- column_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ column_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
if (column_space_map->getComm()->getRank() == 0)
{
const MPI_Comm communicator,
const unsigned int n_max_entries_per_row)
: column_space_map(
- parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ parallel_partitioning.template make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, false))
, matrix(Utilities::Trilinos::internal::make_rcp<
TpetraTypes::MatrixType<Number, MemorySpace>>(
column_space_map,
const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row)
: column_space_map(
- parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ parallel_partitioning.template make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, false))
, compressed(false)
{
Teuchos::Array<size_t> n_entries_per_row_array(n_entries_per_row.begin(),
const MPI_Comm communicator,
const size_type n_max_entries_per_row)
: column_space_map(
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ col_parallel_partitioning.template make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, false))
, matrix(Utilities::Trilinos::internal::make_rcp<
TpetraTypes::MatrixType<Number, MemorySpace>>(
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false),
+ row_parallel_partitioning.template make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, false),
n_max_entries_per_row))
, compressed(false)
{}
const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row)
: column_space_map(
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false))
+ col_parallel_partitioning.template make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, false))
, compressed(false)
{
Teuchos::Array<size_t> n_entries_per_row_array(n_entries_per_row.begin(),
# if DEAL_II_TRILINOS_VERSION_GTE(12, 16, 0)
matrix = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::MatrixType<Number, MemorySpace>>(
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false),
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false),
n_entries_per_row_array);
# else
matrix = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::MatrixType<Number, MemorySpace>>(
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false),
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false),
Teuchos::arcpFromArray(n_entries_per_row_array));
# endif
}
* CommunicationPattern for the communication between the
* source_stored_elements IndexSet and the current vector.
*/
- Teuchos::RCP<const TpetraWrappers::CommunicationPattern>
+ Teuchos::RCP<const TpetraWrappers::CommunicationPattern<MemorySpace>>
tpetra_comm_pattern;
// Make the reference class a friend.
, has_ghost(false)
, vector(Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- parallel_partitioner.make_tpetra_map_rcp(communicator, true)))
+ parallel_partitioner.make_tpetra_map_rcp<
+ TpetraTypes::NodeType<MemorySpace>>(communicator, true)))
{}
vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- parallel_partitioner.make_tpetra_map_rcp(communicator, true));
+ parallel_partitioner
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true));
compressed = true;
}
else
{
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
- locally_owned_entries.make_tpetra_map_rcp(communicator, false);
+ locally_owned_entries
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(map);
nonlocal_entries.subtract_set(locally_owned_entries);
nonlocal_vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- nonlocal_entries.make_tpetra_map_rcp(communicator, true));
+ nonlocal_entries
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true));
compressed = false;
}
has_ghost = false;
vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- parallel_partitioner.make_tpetra_map_rcp(communicator, true));
+ parallel_partitioner
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true));
}
vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- parallel_partitioner.make_tpetra_map_rcp(communicator, true));
+ parallel_partitioner
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true));
compressed = true;
}
else
{
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
- locally_owned_entries.make_tpetra_map_rcp(communicator, false);
+ locally_owned_entries
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
if (!vector->getMap()->isSameAs(*map))
{
nonlocal_vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- nonlocal_entries.make_tpetra_map_rcp(communicator, true));
+ nonlocal_entries
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true));
compressed = false;
}
Teuchos::Array<OtherNumber> vector_data(V.begin(), V.end());
vector = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::VectorType<Number, MemorySpace>>(
- V.locally_owned_elements().make_tpetra_map_rcp(), vector_data);
+ V.locally_owned_elements()
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(),
+ vector_data);
has_ghost = false;
compressed = true;
else
{
tpetra_comm_pattern = Teuchos::rcp_dynamic_cast<
- const TpetraWrappers::CommunicationPattern>(communication_pattern);
+ const TpetraWrappers::CommunicationPattern<MemorySpace>>(
+ communication_pattern);
AssertThrow(
!tpetra_comm_pattern.is_null(),
for (size_t k = 0; k < localLength; ++k)
x_1d(k) = *values_it++;
# if !DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
- source_vector.template sync<
- typename Tpetra::Vector<Number, int, types::signed_global_dof_index>::
- device_type::memory_space>();
+ source_vector.template sync<typename MemorySpace::kokkos_space>();
# endif
}
Tpetra::CombineMode tpetra_operation = Tpetra::ZERO;
// TODO: Tpetra doesn't have a combine mode that also updates local
// elements, maybe there is a better workaround.
- Tpetra::Vector<Number, int, types::signed_global_dof_index> dummy(
- vector->getMap(), false);
+ Tpetra::Vector<Number,
+ int,
+ types::signed_global_dof_index,
+ TpetraTypes::NodeType<MemorySpace>>
+ dummy(vector->getMap(), false);
TpetraTypes::ImportType<MemorySpace> data_exchange(
V.trilinos_vector().getMap(), dummy.getMap());
dummy.doImport(V.trilinos_vector(), data_exchange, Tpetra::INSERT);
vector_1d(k) += a;
}
# if !DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
- vector->template sync<
- typename Tpetra::Vector<Number, int, types::signed_global_dof_index>::
- device_type::memory_space>();
+ vector->template sync<typename MemorySpace::kokkos_space>();
# endif
}
// that know about the original vector.
LinearAlgebra::TpetraWrappers::TpetraTypes::VectorType<OtherNumber,
MemorySpace>
- localized_vector(complete_index_set(size()).make_tpetra_map_rcp(),
- v.get_mpi_communicator());
+ localized_vector(
+ complete_index_set(size())
+ .template make_tpetra_map_rcp<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<
+ MemorySpace>>(),
+ v.get_mpi_communicator());
Teuchos::RCP<const LinearAlgebra::TpetraWrappers::TpetraTypes::ImportType<
MemorySpace>>
// that know about the original vector.
LinearAlgebra::TpetraWrappers::TpetraTypes::VectorType<OtherNumber,
MemorySpace>
- localized_vector(complete_index_set(size()).make_tpetra_map_rcp(),
- v.get_mpi_communicator());
+ localized_vector(
+ complete_index_set(size())
+ .template make_tpetra_map_rcp<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<
+ MemorySpace>>(),
+ v.get_mpi_communicator());
Teuchos::RCP<const LinearAlgebra::TpetraWrappers::TpetraTypes::ImportType<
MemorySpace>>
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
- template <typename NumberType>
- struct ElementAccess<LinearAlgebra::TpetraWrappers::Vector<NumberType>>
+ template <typename NumberType, typename MemorySpace>
+ struct ElementAccess<
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace>>
{
public:
- using VectorType = LinearAlgebra::TpetraWrappers::Vector<NumberType>;
+ using VectorType =
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace>;
static void
add(const typename VectorType::value_type value,
const types::global_dof_index i,
- template <typename NumberType>
+ template <typename NumberType, typename MemorySpace>
inline void
- ElementAccess<LinearAlgebra::TpetraWrappers::Vector<NumberType>>::add(
- const typename VectorType::value_type value,
- const types::global_dof_index i,
- LinearAlgebra::TpetraWrappers::Vector<NumberType> &V)
+ ElementAccess<
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace>>::
+ add(const typename VectorType::value_type value,
+ const types::global_dof_index i,
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace> &V)
{
// Extract local indices in the vector.
- Tpetra::Vector<NumberType, int, types::signed_global_dof_index> vector =
- V.trilinos_vector();
+ auto vector = V.trilinos_vector();
TrilinosWrappers::types::int_type trilinos_i =
vector.getMap()->getLocalElement(
static_cast<TrilinosWrappers::types::int_type>(i));
- template <typename NumberType>
+ template <typename NumberType, typename MemorySpace>
inline void
- ElementAccess<LinearAlgebra::TpetraWrappers::Vector<NumberType>>::set(
- const typename VectorType::value_type value,
- const types::global_dof_index i,
- LinearAlgebra::TpetraWrappers::Vector<NumberType> &V)
+ ElementAccess<
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace>>::
+ set(const typename VectorType::value_type value,
+ const types::global_dof_index i,
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace> &V)
{
// Extract local indices in the vector.
- Tpetra::Vector<NumberType, int, types::signed_global_dof_index> vector =
- V.trilinos_vector();
+ auto vector = V.trilinos_vector();
TrilinosWrappers::types::int_type trilinos_i =
vector.getMap()->getLocalElement(
static_cast<TrilinosWrappers::types::int_type>(i));
- template <typename NumberType>
- inline typename LinearAlgebra::TpetraWrappers::Vector<NumberType>::value_type
- ElementAccess<LinearAlgebra::TpetraWrappers::Vector<NumberType>>::get(
- const LinearAlgebra::TpetraWrappers::Vector<NumberType> &V,
- const types::global_dof_index i)
+ template <typename NumberType, typename MemorySpace>
+ inline typename LinearAlgebra::TpetraWrappers::Vector<NumberType,
+ MemorySpace>::value_type
+ ElementAccess<
+ LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace>>::
+ get(const LinearAlgebra::TpetraWrappers::Vector<NumberType, MemorySpace> &V,
+ const types::global_dof_index i)
{
// Extract local indices in the vector.
# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
- const Tpetra::Vector<NumberType, int, types::signed_global_dof_index>
- &vector = V.trilinos_vector();
- auto vector_2d =
+ const auto &vector = V.trilinos_vector();
+ auto vector_2d =
vector.template getLocalView<Kokkos::HostSpace>(Tpetra::Access::ReadOnly);
# else
- Tpetra::Vector<NumberType, int, types::signed_global_dof_index> vector =
- V.trilinos_vector();
+ auto vector = V.trilinos_vector();
vector.template sync<Kokkos::HostSpace>();
auto vector_2d = vector.template getLocalView<Kokkos::HostSpace>();
# endif
#include <deal.II/base/mpi.h>
#include <deal.II/lac/exceptions.h>
+#include <deal.II/lac/trilinos_tpetra_types.h>
#include <vector>
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+template <typename NodeType>
IndexSet::IndexSet(
- const Teuchos::RCP<const Tpetra::Map<int, types::signed_global_dof_index>>
- &map)
+ const Teuchos::RCP<
+ const Tpetra::Map<int, types::signed_global_dof_index, NodeType>> &map)
: is_compressed(true)
, index_space_size(1 + map->getMaxAllGlobalIndex())
, largest_range(numbers::invalid_unsigned_int)
#ifdef DEAL_II_WITH_TRILINOS
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
-Tpetra::Map<int, types::signed_global_dof_index>
+template <typename NodeType>
+Tpetra::Map<int, types::signed_global_dof_index, NodeType>
IndexSet::make_tpetra_map(const MPI_Comm communicator,
const bool overlapping) const
{
- return *make_tpetra_map_rcp(communicator, overlapping);
+ return *make_tpetra_map_rcp<NodeType>(communicator, overlapping);
}
-Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index>>
+template <typename NodeType>
+Teuchos::RCP<Tpetra::Map<int, types::signed_global_dof_index, NodeType>>
IndexSet::make_tpetra_map_rcp(const MPI_Comm communicator,
const bool overlapping) const
{
overlapping ? false : is_ascending_and_one_to_one(communicator);
if (linear)
return Utilities::Trilinos::internal::make_rcp<
- Tpetra::Map<int, types::signed_global_dof_index>>(
+ Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
size(),
n_elements(),
0,
int_indices);
return Utilities::Trilinos::internal::make_rcp<
- Tpetra::Map<int, types::signed_global_dof_index>>(
+ Tpetra::Map<int, types::signed_global_dof_index, NodeType>>(
size(),
arr_view,
0,
sizeof(compress_mutex));
}
+// explicit template instantiations
+#ifdef DEAL_II_WITH_TRILINOS
+
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+
+template IndexSet::IndexSet(
+ const Teuchos::RCP<const Tpetra::Map<
+ int,
+ types::signed_global_dof_index,
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Host>>>
+ &);
+template IndexSet::IndexSet(
+ const Teuchos::RCP<const Tpetra::Map<
+ int,
+ types::signed_global_dof_index,
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Default>>>
+ &);
+
+template LinearAlgebra::TpetraWrappers::TpetraTypes::MapType<MemorySpace::Host>
+dealii::IndexSet::make_tpetra_map<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Host>>(
+ int,
+ bool) const;
+template LinearAlgebra::TpetraWrappers::TpetraTypes::MapType<
+ MemorySpace::Default>
+dealii::IndexSet::make_tpetra_map<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Default>>(
+ int,
+ bool) const;
+
+template Teuchos::RCP<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::MapType<MemorySpace::Host>>
+dealii::IndexSet::make_tpetra_map_rcp<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Host>>(
+ int,
+ bool) const;
+template Teuchos::RCP<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::MapType<MemorySpace::Default>>
+dealii::IndexSet::make_tpetra_map_rcp<
+ LinearAlgebra::TpetraWrappers::TpetraTypes::NodeType<MemorySpace::Default>>(
+ int,
+ bool) const;
+
+
+# endif
+
+#endif
+
DEAL_II_NAMESPACE_CLOSE
{
namespace TpetraWrappers
{
- CommunicationPattern::CommunicationPattern(
+ template <typename MemorySpace>
+ CommunicationPattern<MemorySpace>::CommunicationPattern(
const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
const MPI_Comm communicator)
// virtual functions called in constructors and destructors never use the
// override in a derived class
// for clarity be explicit on which function is called
- CommunicationPattern::reinit(locally_owned_indices,
- ghost_indices,
- communicator);
+ CommunicationPattern<MemorySpace>::reinit(locally_owned_indices,
+ ghost_indices,
+ communicator);
}
+ template <typename MemorySpace>
void
- CommunicationPattern::reinit(const IndexSet &locally_owned_indices,
- const IndexSet &ghost_indices,
- const MPI_Comm communicator)
+ CommunicationPattern<MemorySpace>::reinit(
+ const IndexSet &locally_owned_indices,
+ const IndexSet &ghost_indices,
+ const MPI_Comm communicator)
{
comm = Teuchos::rcpFromUndefRef(communicator);
auto vector_space_vector_map =
- locally_owned_indices.make_tpetra_map_rcp(*comm, false);
+ locally_owned_indices
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ *comm, false);
auto read_write_vector_map =
- ghost_indices.make_tpetra_map_rcp(*comm, true);
+ ghost_indices
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ *comm, true);
// Target map is read_write_vector_map
// Source map is vector_space_vector_map. This map must have uniquely
// owned GID.
tpetra_import =
- Teuchos::rcp(new Tpetra::Import<int, types::signed_global_dof_index>(
+ Teuchos::rcp(new Tpetra::Import<int,
+ types::signed_global_dof_index,
+ TpetraTypes::NodeType<MemorySpace>>(
read_write_vector_map, vector_space_vector_map));
tpetra_export =
- Teuchos::rcp(new Tpetra::Export<int, types::signed_global_dof_index>(
+ Teuchos::rcp(new Tpetra::Export<int,
+ types::signed_global_dof_index,
+ TpetraTypes::NodeType<MemorySpace>>(
read_write_vector_map, vector_space_vector_map));
}
+ template <typename MemorySpace>
MPI_Comm
- CommunicationPattern::get_mpi_communicator() const
+ CommunicationPattern<MemorySpace>::get_mpi_communicator() const
{
return *comm;
}
- const Tpetra::Import<int, types::signed_global_dof_index> &
- CommunicationPattern::get_tpetra_import() const
+ template <typename MemorySpace>
+ const Tpetra::Import<int,
+ types::signed_global_dof_index,
+ TpetraTypes::NodeType<MemorySpace>> &
+ CommunicationPattern<MemorySpace>::get_tpetra_import() const
{
return *tpetra_import;
}
-
- Teuchos::RCP<TpetraTypes::ImportType<dealii::MemorySpace::Host>>
- CommunicationPattern::get_tpetra_import_rcp() const
+ template <typename MemorySpace>
+ Teuchos::RCP<TpetraTypes::ImportType<MemorySpace>>
+ CommunicationPattern<MemorySpace>::get_tpetra_import_rcp() const
{
return tpetra_import;
}
- const Tpetra::Export<int, types::signed_global_dof_index> &
- CommunicationPattern::get_tpetra_export() const
+ template <typename MemorySpace>
+ const Tpetra::Export<int,
+ types::signed_global_dof_index,
+ TpetraTypes::NodeType<MemorySpace>> &
+ CommunicationPattern<MemorySpace>::get_tpetra_export() const
{
return *tpetra_export;
}
- Teuchos::RCP<TpetraTypes::ExportType<dealii::MemorySpace::Host>>
- CommunicationPattern::get_tpetra_export_rcp() const
+ template <typename MemorySpace>
+ Teuchos::RCP<TpetraTypes::ExportType<MemorySpace>>
+ CommunicationPattern<MemorySpace>::get_tpetra_export_rcp() const
{
return tpetra_export;
}
} // namespace TpetraWrappers
} // namespace LinearAlgebra
+
+template class LinearAlgebra::TpetraWrappers::CommunicationPattern<
+ MemorySpace::Default>;
+template class LinearAlgebra::TpetraWrappers::CommunicationPattern<
+ MemorySpace::Host>;
+
DEAL_II_NAMESPACE_CLOSE
#endif
SparsityPatternBase::resize(parallel_partitioning.size(),
parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
- parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<MemorySpace>(
map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
}
SparsityPatternBase::resize(parallel_partitioning.size(),
parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
- parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<MemorySpace>(
map, map, n_entries_per_row, column_space_map, graph, nonlocal_graph);
}
SparsityPatternBase::resize(row_parallel_partitioning.size(),
col_parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ col_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
col_map,
n_entries_per_row,
SparsityPatternBase::resize(row_parallel_partitioning.size(),
col_parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ col_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
col_map,
n_entries_per_row,
SparsityPatternBase::resize(row_parallel_partitioning.size(),
col_parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ col_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<MemorySpace>(row_map,
col_map,
n_entries_per_row,
if (Utilities::MPI::n_mpi_processes(communicator) > 1)
{
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> nonlocal_map =
- nonlocal_partitioner.make_tpetra_map_rcp(communicator, true);
+ nonlocal_partitioner
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, true);
nonlocal_graph = Utilities::Trilinos::internal::make_rcp<
TpetraTypes::GraphType<MemorySpace>>(nonlocal_map, col_map, 0);
}
SparsityPatternBase::resize(row_parallel_partitioning.size(),
col_parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> row_map =
- row_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ row_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> col_map =
- col_parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ col_parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
row_map,
col_map,
SparsityPatternBase::resize(parallel_partitioning.size(),
parallel_partitioning.size());
Teuchos::RCP<TpetraTypes::MapType<MemorySpace>> map =
- parallel_partitioning.make_tpetra_map_rcp(communicator, false);
+ parallel_partitioning
+ .template make_tpetra_map_rcp<TpetraTypes::NodeType<MemorySpace>>(
+ communicator, false);
SparsityPatternImpl::reinit_sp<SparsityPatternType, MemorySpace>(
map,
map,
const dealii::DynamicSparsityPattern &,
const MPI_Comm,
bool);
+
+
+ template class SparsityPattern<dealii::MemorySpace::Default>;
+
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::copy_from(
+ const dealii::SparsityPattern &);
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::copy_from(
+ const dealii::DynamicSparsityPattern &);
+
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::reinit(
+ const IndexSet &,
+ const dealii::SparsityPattern &,
+ const MPI_Comm,
+ bool);
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::reinit(
+ const IndexSet &,
+ const dealii::DynamicSparsityPattern &,
+ const MPI_Comm,
+ bool);
+
+
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::reinit(
+ const IndexSet &,
+ const IndexSet &,
+ const dealii::SparsityPattern &,
+ const MPI_Comm,
+ bool);
+ template void
+ SparsityPattern<dealii::MemorySpace::Default>::reinit(
+ const IndexSet &,
+ const IndexSet &,
+ const dealii::DynamicSparsityPattern &,
+ const MPI_Comm,
+ bool);
+
# endif
} // namespace TpetraWrappers