* Pointer to data on the device.
*/
std::unique_ptr<Number[]> values_dev;
+
+ /**
+ * Pointers to the data of the processes sharing the same memory.
+ */
+ std::vector<ArrayView<const Number>> values_sm;
};
// This is not used but it allows to simplify the code until we start using
// CUDA-aware MPI.
std::unique_ptr<Number[]> values_dev;
+
+ std::vector<ArrayView<const Number>> values_sm;
};
std::unique_ptr<Number[], std::function<void(Number *)>> values;
std::unique_ptr<Number[], void (*)(Number *)> values_dev;
+
+ /**
+ * This is currently not used.
+ */
+ std::vector<ArrayView<const Number>> values_sm;
};
void
set_ghost_state(const bool ghosted) const;
+ /**
+ * Get pointers to the beginning of the values of the other
+ * processes of the same shared-memory domain.
+ */
+ const std::vector<ArrayView<const Number>> &
+ shared_vector_data() const;
+
//@}
/**
+ template <typename Number, typename MemorySpace>
+ const std::vector<ArrayView<const Number>> &
+ Vector<Number, MemorySpace>::shared_vector_data() const
+ {
+ return data.values_sm;
+ }
+
+
+
template <typename Number, typename MemorySpace>
inline Number
Vector<Number, MemorySpace>::operator()(const size_type global_index) const
#include <deal.II/matrix_free/mapping_info.h>
#include <deal.II/matrix_free/shape_info.h>
#include <deal.II/matrix_free/task_info.h>
+#include <deal.II/matrix_free/vector_data_exchange.h>
#include <array>
#include <memory>
std::shared_ptr<const Utilities::MPI::Partitioner> vector_partitioner;
/**
- * This partitioning selects a subset of ghost indices to the full
+ * Vector exchanger compatible with vector_partitioner.
+ */
+ std::shared_ptr<
+ const internal::MatrixFreeFunctions::VectorDataExchange::Base>
+ vector_exchanger;
+
+ /**
+ * Vector exchanger compatible with partitioners that select a subset of
+ * ghost indices to the full
* vector partitioner stored in @p vector_partitioner. These
* partitioners are used in specialized loops that only import parts of
* the ghosted region for reducing the amount of communication. There
* values and the gradients on all faces adjacent to the locally owned
* cells.
*/
- std::array<std::shared_ptr<const Utilities::MPI::Partitioner>, 5>
- vector_partitioner_face_variants;
+ std::array<
+ std::shared_ptr<
+ const internal::MatrixFreeFunctions::VectorDataExchange::Base>,
+ 5>
+ vector_exchanger_face_variants;
/**
* This stores a (sorted) list of all locally owned degrees of freedom
Utilities::MPI::Partitioner *vec_part =
const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
vec_part->set_ghost_indices(ghost_indices);
+
+ vector_exchanger = std::make_shared<
+ internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+ vector_partitioner);
}
Utilities::MPI::min<int>(compressed_set.n_elements() ==
part.ghost_indices().n_elements(),
part.get_mpi_communicator()) != 0;
+
+ std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
+
if (all_ghosts_equal)
- vector_partitioner_face_variants[0] = vector_partitioner;
+ temp_0 = vector_partitioner;
else
{
- vector_partitioner_face_variants[0] =
- std::make_shared<Utilities::MPI::Partitioner>(
- part.locally_owned_range(), part.get_mpi_communicator());
- const_cast<Utilities::MPI::Partitioner *>(
- vector_partitioner_face_variants[0].get())
+ temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
+ part.locally_owned_range(), part.get_mpi_communicator());
+ const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
->set_ghost_indices(compressed_set, part.ghost_indices());
}
+
+ vector_exchanger_face_variants[0] =
+ std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+ PartitionerWrapper>(temp_0);
}
// construct a numbering of faces
}
};
+ std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
+ temp_4;
+
// partitioner 1: values on faces
- process_values(vector_partitioner_face_variants[1], loop_over_faces);
+ process_values(temp_1, loop_over_faces);
// partitioner 2: values and gradients on faces
- process_gradients(vector_partitioner_face_variants[1],
- vector_partitioner_face_variants[2],
- loop_over_faces);
+ process_gradients(temp_1, temp_2, loop_over_faces);
if (fill_cell_centric)
{
ghost_indices.clear();
// partitioner 3: values on all faces
- process_values(vector_partitioner_face_variants[3],
- loop_over_all_faces);
+ process_values(temp_3, loop_over_all_faces);
// partitioner 4: values and gradients on faces
- process_gradients(vector_partitioner_face_variants[3],
- vector_partitioner_face_variants[4],
- loop_over_all_faces);
+ process_gradients(temp_3, temp_4, loop_over_all_faces);
}
else
{
- vector_partitioner_face_variants[3] =
- std::make_shared<Utilities::MPI::Partitioner>(
- part.locally_owned_range(), part.get_mpi_communicator());
- vector_partitioner_face_variants[4] =
- std::make_shared<Utilities::MPI::Partitioner>(
- part.locally_owned_range(), part.get_mpi_communicator());
+ temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
+ part.locally_owned_range(), part.get_mpi_communicator());
+ temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
+ part.locally_owned_range(), part.get_mpi_communicator());
}
+
+ vector_exchanger_face_variants[1] = std::make_shared<
+ internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+ temp_1);
+ vector_exchanger_face_variants[2] = std::make_shared<
+ internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+ temp_2);
+ vector_exchanger_face_variants[3] = std::make_shared<
+ internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+ temp_3);
+ vector_exchanger_face_variants[4] = std::make_shared<
+ internal::MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+ temp_4);
}
DataAccessOnFaces::unspecified)
for (unsigned int c = 0; c < matrix_free.n_components(); ++c)
AssertDimension(
- matrix_free.get_dof_info(c).vector_partitioner_face_variants.size(),
+ matrix_free.get_dof_info(c).vector_exchanger_face_variants.size(),
5);
}
* Get partitioner for the given @p mf_component taking into
* account vector_face_access set in constructor.
*/
- const Utilities::MPI::Partitioner &
+ const internal::MatrixFreeFunctions::VectorDataExchange::Base &
get_partitioner(const unsigned int mf_component) const
{
AssertDimension(matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants.size(),
+ .vector_exchanger_face_variants.size(),
5);
if (vector_face_access ==
dealii::MatrixFree<dim, Number, VectorizedArrayType>::
DataAccessOnFaces::none)
return *matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants[0];
+ .vector_exchanger_face_variants[0];
else if (vector_face_access ==
dealii::MatrixFree<dim, Number, VectorizedArrayType>::
DataAccessOnFaces::values)
return *matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants[1];
+ .vector_exchanger_face_variants[1];
else if (vector_face_access ==
dealii::MatrixFree<dim, Number, VectorizedArrayType>::
DataAccessOnFaces::gradients)
return *matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants[2];
+ .vector_exchanger_face_variants[2];
else if (vector_face_access ==
dealii::MatrixFree<dim, Number, VectorizedArrayType>::
DataAccessOnFaces::values_all_faces)
return *matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants[3];
+ .vector_exchanger_face_variants[3];
else if (vector_face_access ==
dealii::MatrixFree<dim, Number, VectorizedArrayType>::
DataAccessOnFaces::gradients_all_faces)
return *matrix_free.get_dof_info(mf_component)
- .vector_partitioner_face_variants[4];
+ .vector_exchanger_face_variants[4];
else
- return *matrix_free.get_dof_info(mf_component).vector_partitioner.get();
+ return *matrix_free.get_dof_info(mf_component).vector_exchanger.get();
}
part.export_to_ghosted_array_start(
component_in_block_vector + channel_shift,
ArrayView<const Number>(vec.begin(), part.local_size()),
- ArrayView<Number>(tmp_data[component_in_block_vector]->begin(),
- part.n_import_indices()),
+ vec.shared_vector_data(),
ArrayView<Number>(const_cast<Number *>(vec.begin()) +
part.local_size(),
matrix_free.get_dof_info(mf_component)
.vector_partitioner->n_ghost_indices()),
+ ArrayView<Number>(tmp_data[component_in_block_vector]->begin(),
+ part.n_import_indices()),
this->requests[component_in_block_vector]);
# endif
}
const auto &part = get_partitioner(mf_component);
- if (part.n_ghost_indices() != 0 || part.n_import_indices() != 0)
+ if (part.n_ghost_indices() != 0 || part.n_import_indices() != 0 ||
+ part.n_import_sm_procs() != 0)
{
part.export_to_ghosted_array_finish(
+ ArrayView<const Number>(vec.begin(), part.local_size()),
+ vec.shared_vector_data(),
ArrayView<Number>(const_cast<Number *>(vec.begin()) +
part.local_size(),
matrix_free.get_dof_info(mf_component)
const auto &part = get_partitioner(mf_component);
- if (part.n_ghost_indices() == 0 && part.n_import_indices() == 0)
+ if (part.n_ghost_indices() == 0 && part.n_import_indices() == 0 &&
+ part.n_import_sm_procs() == 0)
return;
tmp_data[component_in_block_vector] =
part.import_from_ghosted_array_start(
dealii::VectorOperation::add,
component_in_block_vector + channel_shift,
+ ArrayView<Number>(vec.begin(), part.local_size()),
+ vec.shared_vector_data(),
ArrayView<Number>(vec.begin() + part.local_size(),
matrix_free.get_dof_info(mf_component)
.vector_partitioner->n_ghost_indices()),
std::is_same<Number, typename VectorType::value_type>::value,
"Type mismatch between VectorType and VectorDataExchange");
(void)component_in_block_vector;
-
if (vec.size() != 0)
{
# ifdef DEAL_II_WITH_MPI
const auto &part = get_partitioner(mf_component);
- if (part.n_ghost_indices() == 0 && part.n_import_indices() == 0)
- return;
-
- part.import_from_ghosted_array_finish(
- VectorOperation::add,
- ArrayView<const Number>(
- tmp_data[component_in_block_vector]->begin(),
- part.n_import_indices()),
- ArrayView<Number>(vec.begin(), part.local_size()),
- ArrayView<Number>(vec.begin() + part.local_size(),
- matrix_free.get_dof_info(mf_component)
- .vector_partitioner->n_ghost_indices()),
- this->requests[component_in_block_vector]);
+ if (part.n_ghost_indices() != 0 || part.n_import_indices() != 0 ||
+ part.n_import_sm_procs() != 0)
+ {
+ part.import_from_ghosted_array_finish(
+ VectorOperation::add,
+ ArrayView<Number>(vec.begin(), part.local_size()),
+ vec.shared_vector_data(),
+ ArrayView<Number>(vec.begin() + part.local_size(),
+ matrix_free.get_dof_info(mf_component)
+ .vector_partitioner->n_ghost_indices()),
+ ArrayView<const Number>(
+ tmp_data[component_in_block_vector]->begin(),
+ part.n_import_indices()),
+ this->requests[component_in_block_vector]);
- matrix_free.release_scratch_data_non_threadsafe(
- tmp_data[component_in_block_vector]);
- tmp_data[component_in_block_vector] = nullptr;
+ matrix_free.release_scratch_data_non_threadsafe(
+ tmp_data[component_in_block_vector]);
+ tmp_data[component_in_block_vector] = nullptr;
+ }
# endif
}
}
if (ghosts_were_set == true)
return;
- if (vector_face_access ==
- dealii::MatrixFree<dim, Number, VectorizedArrayType>::
- DataAccessOnFaces::unspecified ||
- vec.size() == 0)
- vec.zero_out_ghosts();
- else
+ if (vec.size() != 0)
{
# ifdef DEAL_II_WITH_MPI
AssertDimension(requests.size(), tmp_data.size());
const unsigned int mf_component = find_vector_in_mf(vec);
- const Utilities::MPI::Partitioner &part =
- get_partitioner(mf_component);
- if (&part ==
- matrix_free.get_dof_info(mf_component).vector_partitioner.get())
- vec.zero_out_ghosts();
- else if (part.n_ghost_indices() > 0)
+
+ const auto &part = get_partitioner(mf_component);
+
+ if (part.n_ghost_indices() > 0)
{
- for (std::vector<std::pair<unsigned int, unsigned int>>::
- const_iterator my_ghosts =
- part.ghost_indices_within_larger_ghost_set().begin();
- my_ghosts !=
- part.ghost_indices_within_larger_ghost_set().end();
- ++my_ghosts)
- for (unsigned int j = my_ghosts->first; j < my_ghosts->second;
- j++)
- {
- const_cast<LinearAlgebra::distributed::Vector<Number> &>(
- vec)
- .local_element(j + part.local_size()) = 0.;
- }
+ part.reset_ghost_values(ArrayView<Number>(
+ const_cast<LinearAlgebra::distributed::Vector<Number> &>(vec)
+ .begin() +
+ part.local_size(),
+ matrix_free.get_dof_info(mf_component)
+ .vector_partitioner->n_ghost_indices()));
}
- // let vector know that it's not ghosted anymore
- vec.set_ghost_state(false);
# endif
}
+ // let vector know that it's not ghosted anymore
+ vec.set_ghost_state(false);
}
std::make_shared<Utilities::MPI::Partitioner>(locally_owned_dofs[no],
task_info.communicator);
+ dof_info[no].vector_exchanger =
+ std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
+ PartitionerWrapper>(
+ dof_info[no].vector_partitioner);
+
// initialize the arrays for indices
const unsigned int n_components_total =
dof_info[no].start_components.back();
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_matrix_free_vector_data_exchange_h
+#define dealii_matrix_free_vector_data_exchange_h
+
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/partitioner.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ namespace MatrixFreeFunctions
+ {
+ /**
+ * Namespace containing classes for inter-process data exchange (i.e.,
+ * for update_ghost_values and compress) in MatrixFree.
+ */
+ namespace VectorDataExchange
+ {
+ /**
+ * Interface needed by MatrixFree.
+ */
+ class Base
+ {
+ public:
+ virtual ~Base() = default;
+
+ virtual unsigned int
+ local_size() const = 0;
+
+ virtual unsigned int
+ n_ghost_indices() const = 0;
+
+ virtual unsigned int
+ n_import_indices() const = 0;
+
+ virtual unsigned int
+ n_import_sm_procs() const = 0;
+
+ virtual types::global_dof_index
+ size() const = 0;
+
+ virtual void
+ export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ export_to_ghosted_array_finish(
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<double> & locally_owned_storage,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<const double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ reset_ghost_values(const ArrayView<double> &ghost_array) const = 0;
+
+ virtual void
+ export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ export_to_ghosted_array_finish(
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<float> & locally_owned_storage,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<const float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const = 0;
+
+ virtual void
+ reset_ghost_values(const ArrayView<float> &ghost_array) const = 0;
+ };
+
+
+ /**
+ * Class that simply delegates the task to a Utilities::MPI::Partitioner.
+ */
+ class PartitionerWrapper : public Base
+ {
+ public:
+ PartitionerWrapper(
+ const std::shared_ptr<const Utilities::MPI::Partitioner>
+ &partitioner);
+
+ virtual ~PartitionerWrapper() = default;
+
+ unsigned int
+ local_size() const override;
+
+ unsigned int
+ n_ghost_indices() const override;
+
+ unsigned int
+ n_import_indices() const override;
+
+ unsigned int
+ n_import_sm_procs() const override;
+
+ types::global_dof_index
+ size() const override;
+
+ void
+ export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ export_to_ghosted_array_finish(
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<double> & locally_owned_storage,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<const double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ reset_ghost_values(const ArrayView<double> &ghost_array) const override;
+
+ void
+ export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ export_to_ghosted_array_finish(
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<float> & locally_owned_storage,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<const float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const override;
+
+ void
+ reset_ghost_values(const ArrayView<float> &ghost_array) const override;
+
+ private:
+ template <typename Number>
+ void
+ reset_ghost_values_impl(const ArrayView<Number> &ghost_array) const;
+
+ const std::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+ };
+
+ } // namespace VectorDataExchange
+ } // end of namespace MatrixFreeFunctions
+} // end of namespace internal
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
matrix_free.cc
shape_info.cc
task_info.cc
+ vector_data_exchange.cc
)
SET(_inst
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi.templates.h>
+#include <deal.II/base/mpi_compute_index_owner_internal.h>
+#include <deal.II/base/mpi_consensus_algorithms.h>
+#include <deal.II/base/timer.h>
+
+#include <deal.II/matrix_free/vector_data_exchange.h>
+
+#ifdef DEAL_II_WITH_64BIT_INDICES
+# include <deal.II/base/mpi_consensus_algorithms.templates.h>
+#endif
+
+#include <map>
+#include <vector>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ namespace MatrixFreeFunctions
+ {
+ namespace VectorDataExchange
+ {
+ PartitionerWrapper::PartitionerWrapper(
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
+ : partitioner(partitioner)
+ {}
+
+
+
+ unsigned int
+ PartitionerWrapper::local_size() const
+ {
+ return partitioner->local_size();
+ }
+
+
+
+ unsigned int
+ PartitionerWrapper::n_ghost_indices() const
+ {
+ return partitioner->n_ghost_indices();
+ }
+
+
+
+ unsigned int
+ PartitionerWrapper::n_import_indices() const
+ {
+ return partitioner->n_import_indices();
+ }
+
+
+
+ unsigned int
+ PartitionerWrapper::n_import_sm_procs() const
+ {
+ return 0;
+ }
+
+
+
+ types::global_dof_index
+ PartitionerWrapper::size() const
+ {
+ return partitioner->size();
+ }
+
+
+
+ void
+ PartitionerWrapper::export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)communication_channel;
+ (void)locally_owned_array;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->export_to_ghosted_array_start(communication_channel,
+ locally_owned_array,
+ temporary_storage,
+ ghost_array,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::export_to_ghosted_array_finish(
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)locally_owned_array;
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)ghost_array;
+ (void)requests;
+#else
+ partitioner->export_to_ghosted_array_finish(ghost_array, requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const double> & locally_owned_array,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)locally_owned_array;
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)communication_channel;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->import_from_ghosted_array_start(vector_operation,
+ communication_channel,
+ ghost_array,
+ temporary_storage,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<double> & locally_owned_storage,
+ const std::vector<ArrayView<const double>> &shared_arrays,
+ const ArrayView<double> & ghost_array,
+ const ArrayView<const double> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)locally_owned_storage;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->import_from_ghosted_array_finish(vector_operation,
+ temporary_storage,
+ locally_owned_storage,
+ ghost_array,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::reset_ghost_values(
+ const ArrayView<double> &ghost_array) const
+ {
+ reset_ghost_values_impl(ghost_array);
+ }
+
+
+
+ void
+ PartitionerWrapper::export_to_ghosted_array_start(
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)communication_channel;
+ (void)locally_owned_array;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->export_to_ghosted_array_start(communication_channel,
+ locally_owned_array,
+ temporary_storage,
+ ghost_array,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::export_to_ghosted_array_finish(
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)locally_owned_array;
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)ghost_array;
+ (void)requests;
+#else
+ partitioner->export_to_ghosted_array_finish(ghost_array, requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::import_from_ghosted_array_start(
+ const VectorOperation::values vector_operation,
+ const unsigned int communication_channel,
+ const ArrayView<const float> & locally_owned_array,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)locally_owned_array;
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)communication_channel;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->import_from_ghosted_array_start(vector_operation,
+ communication_channel,
+ ghost_array,
+ temporary_storage,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::import_from_ghosted_array_finish(
+ const VectorOperation::values vector_operation,
+ const ArrayView<float> & locally_owned_storage,
+ const std::vector<ArrayView<const float>> &shared_arrays,
+ const ArrayView<float> & ghost_array,
+ const ArrayView<const float> & temporary_storage,
+ std::vector<MPI_Request> & requests) const
+ {
+ (void)shared_arrays;
+#ifndef DEAL_II_WITH_MPI
+ (void)vector_operation;
+ (void)locally_owned_storage;
+ (void)ghost_array;
+ (void)temporary_storage;
+ (void)requests;
+#else
+ partitioner->import_from_ghosted_array_finish(vector_operation,
+ temporary_storage,
+ locally_owned_storage,
+ ghost_array,
+ requests);
+#endif
+ }
+
+
+
+ void
+ PartitionerWrapper::reset_ghost_values(
+ const ArrayView<float> &ghost_array) const
+ {
+ reset_ghost_values_impl(ghost_array);
+ }
+
+ template <typename Number>
+ void
+ PartitionerWrapper::reset_ghost_values_impl(
+ const ArrayView<Number> &ghost_array) const
+ {
+ for (const auto &my_ghosts :
+ partitioner->ghost_indices_within_larger_ghost_set())
+ for (unsigned int j = my_ghosts.first; j < my_ghosts.second; ++j)
+ ghost_array[j] = 0.;
+ }
+
+ } // namespace VectorDataExchange
+ } // namespace MatrixFreeFunctions
+} // namespace internal
+
+
+DEAL_II_NAMESPACE_CLOSE