#define dealii_sundials_arkode_h
#include <deal.II/base/config.h>
+#include <deal.II/base/mpi.h>
#ifdef DEAL_II_WITH_SUNDIALS
#include <deal.II/base/logstream.h>
#include <arkode/arkode.h>
#include <arkode/arkode_impl.h>
#include <nvector/nvector_serial.h>
+#ifdef DEAL_II_WITH_MPI
+#include <nvector/nvector_parallel.h>
+#endif
#include <sundials/sundials_math.h>
#include <sundials/sundials_types.h>
maximum_non_linear_iterations(maximum_non_linear_iterations),
implicit_function_is_linear(implicit_function_is_linear),
implicit_function_is_time_independent(implicit_function_is_time_independent)
- {};
+ {}
/**
* Add all AdditionalData() parameters to the given ParameterHandler
* passing an AdditionalData() object that sets all of the solver
* parameters.
*
+ * The MPI communicator is simply ignored in the serial case.
+ *
+ *
* @param data ARKode configuration data
* @param mpi_comm MPI communicator
*/
*/
N_Vector abs_tolls;
-#ifdef DEAL_II_WITH_MPI
/**
- * MPI communicator. SUNDIALS solver runs happily in parallel.
+ * MPI communicator. SUNDIALS solver runs happily in
+ * parallel. Note that if the library is compiled without MPI
+ * support, MPI_Comm is typedefed as int.
*/
MPI_Comm communicator;
-#endif
/**
* Memory pool of vectors.
}
template <typename VectorType>
- ARKode<VectorType>::ARKode(const AdditionalData &data, const MPI_Comm mpi_comm) :
+ ARKode<VectorType>::ARKode(const AdditionalData &data,
+ const MPI_Comm mpi_comm) :
data(data),
arkode_mem(nullptr),
communicator(Utilities::MPI::duplicate_communicator(mpi_comm))
{
if (arkode_mem)
ARKodeFree(&arkode_mem);
+#ifdef DEAL_II_WITH_MPI
MPI_Comm_free(&communicator);
+#endif
}
{
unsigned int system_size = solution.size();
- unsigned int local_system_size = system_size;
double t = data.initial_time;
double h = data.initial_step_size;
#ifdef DEAL_II_WITH_MPI
if (is_serial_vector<VectorType>::value == false)
{
- IndexSet is = solution.locally_owned_elements();
- local_system_size = is.n_elements();
+ const IndexSet is = solution.locally_owned_elements();
+ const size_t local_system_size = is.n_elements();
yy = N_VNew_Parallel(communicator,
local_system_size,
{
unsigned int system_size;
- unsigned int local_system_size;
if (arkode_mem)
ARKodeFree(&arkode_mem);
#ifdef DEAL_II_WITH_MPI
if (is_serial_vector<VectorType>::value == false)
{
- IndexSet is = solution.locally_owned_elements();
- local_system_size = is.n_elements();
+ const IndexSet is = solution.locally_owned_elements();
+ const size_t local_system_size = is.n_elements();
yy = N_VNew_Parallel(communicator,
local_system_size,
length = NV_LENGTH_S(vec);
break;
}
+#ifdef DEAL_II_WITH_MPI
case SUNDIALS_NVEC_PARALLEL:
{
length = NV_LOCLENGTH_P(vec);
break;
}
+#endif
default:
Assert(false, ExcNotImplemented());
}
void copy(TrilinosWrappers::MPI::Vector &dst, const N_Vector &src)
{
- IndexSet is = dst.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(src));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = dst.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(src));
+ for (size_t i=0; i<N; ++i)
{
dst[is.nth_index_in_set(i)] = NV_Ith_P(src, i);
}
void copy(N_Vector &dst, const TrilinosWrappers::MPI::Vector &src)
{
- IndexSet is = src.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(dst));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = src.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(dst));
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_P(dst, i) = src[is.nth_index_in_set(i)];
}
void copy(TrilinosWrappers::MPI::BlockVector &dst, const N_Vector &src)
{
- IndexSet is = dst.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(src));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = dst.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(src));
+ for (size_t i=0; i<N; ++i)
{
dst[is.nth_index_in_set(i)] = NV_Ith_P(src, i);
}
void copy(N_Vector &dst, const TrilinosWrappers::MPI::BlockVector &src)
{
IndexSet is = src.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(dst));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(dst));
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_P(dst, i) = src[is.nth_index_in_set(i)];
}
void copy(PETScWrappers::MPI::Vector &dst, const N_Vector &src)
{
- IndexSet is = dst.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(src));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = dst.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(src));
+ for (size_t i=0; i<N; ++i)
{
dst[is.nth_index_in_set(i)] = NV_Ith_P(src, i);
}
void copy(N_Vector &dst, const PETScWrappers::MPI::Vector &src)
{
- IndexSet is = src.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(dst));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = src.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(dst));
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_P(dst, i) = src[is.nth_index_in_set(i)];
}
void copy(PETScWrappers::MPI::BlockVector &dst, const N_Vector &src)
{
- IndexSet is = dst.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(src));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = dst.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(src));
+ for (size_t i=0; i<N; ++i)
{
dst[is.nth_index_in_set(i)] = NV_Ith_P(src, i);
}
void copy(N_Vector &dst, const PETScWrappers::MPI::BlockVector &src)
{
- IndexSet is = src.locally_owned_elements();
- AssertDimension(is.n_elements(), N_Vector_length(dst));
- for (unsigned int i=0; i<is.n_elements(); ++i)
+ const IndexSet is = src.locally_owned_elements();
+ const size_t N = is.n_elements();
+ AssertDimension(N, N_Vector_length(dst));
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_P(dst, i) = src[is.nth_index_in_set(i)];
}
void copy(BlockVector<double> &dst, const N_Vector &src)
{
- AssertDimension(N_Vector_length(src), dst.size());
- for (unsigned int i=0; i<dst.size(); ++i)
+ const size_t N = dst.size();
+ AssertDimension(N_Vector_length(src), N);
+ for (size_t i=0; i<N; ++i)
{
dst[i] = NV_Ith_S(src, i);
}
void copy(N_Vector &dst, const BlockVector<double> &src)
{
- AssertDimension(N_Vector_length(dst), src.size());
- for (unsigned int i=0; i<src.size(); ++i)
+ const size_t N = src.size();
+ AssertDimension(N_Vector_length(dst), N);
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_S(dst, i) = src[i];
}
void copy(Vector<double> &dst, const N_Vector &src)
{
- AssertDimension(N_Vector_length(src), dst.size());
- for (unsigned int i=0; i<dst.size(); ++i)
+ const size_t N = dst.size();
+ AssertDimension(N_Vector_length(src), N);
+ for (size_t i=0; i<N; ++i)
{
dst[i] = NV_Ith_S(src, i);
}
void copy(N_Vector &dst, const Vector<double> &src)
{
- AssertDimension(N_Vector_length(dst), src.size());
- for (unsigned int i=0; i<src.size(); ++i)
+ const size_t N = src.size();
+ AssertDimension(N_Vector_length(dst), N);
+ for (size_t i=0; i<N; ++i)
{
NV_Ith_S(dst, i) = src[i];
}
}
template <typename VectorType>
- IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm mpi_comm) :
+ IDA<VectorType>::IDA(const AdditionalData &data,
+ const MPI_Comm mpi_comm) :
data(data),
ida_mem(nullptr),
communicator(Utilities::MPI::duplicate_communicator(mpi_comm))
{
if (ida_mem)
IDAFree(&ida_mem);
+#ifdef DEAL_II_WITH_MPI
MPI_Comm_free(&communicator);
+#endif
}
{
unsigned int system_size = solution.size();
- unsigned int local_system_size = system_size;
double t = data.initial_time;
double h = data.initial_step_size;
#ifdef DEAL_II_WITH_MPI
if (is_serial_vector<VectorType>::value == false)
{
- IndexSet is = solution.locally_owned_elements();
- local_system_size = is.n_elements();
+ const IndexSet is = solution.locally_owned_elements();
+ const size_t local_system_size = is.n_elements();
yy = N_VNew_Parallel(communicator,
local_system_size,
{
unsigned int system_size;
- unsigned int local_system_size;
bool first_step = (current_time == data.initial_time);
if (ida_mem)
#ifdef DEAL_II_WITH_MPI
if (is_serial_vector<VectorType>::value == false)
{
- IndexSet is = solution.locally_owned_elements();
- local_system_size = is.n_elements();
+ const IndexSet is = solution.locally_owned_elements();
+ const size_t local_system_size = is.n_elements();
yy = N_VNew_Parallel(communicator,
local_system_size,