// by changing the constant dimension below to 3.
int main (int argc, char *argv[])
{
- using namespace Step32;
- using namespace dealii;
-
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
- numbers::invalid_unsigned_int);
-
try
{
+ using namespace Step32;
+ using namespace dealii;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
+ numbers::invalid_unsigned_int);
+
std::string parameter_filename;
if (argc>=2)
parameter_filename = argv[1];
// in step-6:
int main (int argc, char *argv[])
{
- dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
-
try
{
using namespace dealii;
using namespace Step50;
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
LaplaceProblem<2> laplace_problem(1/*degree*/);
laplace_problem.run ();
}
inline
Vector<Number>::~Vector ()
{
- clear_mpi_requests();
+ try
+ {
+ clear_mpi_requests();
+ }
+ catch (...)
+ {}
}
// virtual functions called in constructors and destructors never use the
// override in a derived class
// for clarity be explicit on which function is called
- Triangulation<dim, spacedim>::clear ();
+ try
+ {
+ Triangulation<dim, spacedim>::clear ();
+ }
+ catch (...)
+ {}
- Assert (triangulation_has_content == false,
- ExcInternalError());
- Assert (connectivity == nullptr, ExcInternalError());
- Assert (parallel_forest == nullptr, ExcInternalError());
+ AssertNothrow (triangulation_has_content == false,
+ ExcInternalError());
+ AssertNothrow (connectivity == nullptr, ExcInternalError());
+ AssertNothrow (parallel_forest == nullptr, ExcInternalError());
}
template <int spacedim>
Triangulation<1,spacedim>::~Triangulation ()
{
- Assert (false, ExcNotImplemented());
+ AssertNothrow (false, ExcNotImplemented());
}
// different tags for phase 1 and 2, but the cost of a
// barrier is negligible compared to everything else we do
// here
- const parallel::distributed::Triangulation< dim, spacedim > *triangulation
- = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
- (&dof_handler.get_triangulation()));
- const int ierr = MPI_Barrier(triangulation->get_communicator());
- AssertThrowMPI(ierr);
+ if (const auto *triangulation =
+ dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>(&dof_handler.get_triangulation()))
+ {
+ const int ierr = MPI_Barrier(triangulation->get_communicator());
+ AssertThrowMPI(ierr);
+ }
+ else
+ {
+ Assert(false, ExcMessage("The function communicate_dof_indices_on_marked_cells() "
+ "only works with parallel distributed triangulations."));
+ }
#endif
}
const MPI_Comm mpi_comm) :
data(data),
arkode_mem(nullptr),
+ yy(nullptr),
+ abs_tolls(nullptr),
communicator(is_serial_vector<VectorType>::value ?
MPI_COMM_SELF :
Utilities::MPI::duplicate_communicator(mpi_comm))
if (is_serial_vector<VectorType>::value == false)
{
const int ierr = MPI_Comm_free(&communicator);
- AssertThrowMPI(ierr);
+ AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
}
#endif
}
const MPI_Comm mpi_comm) :
data(data),
ida_mem(nullptr),
+ yy(nullptr),
+ yp(nullptr),
+ abs_tolls(nullptr),
+ diff_id(nullptr),
communicator(is_serial_vector<VectorType>::value ?
MPI_COMM_SELF :
Utilities::MPI::duplicate_communicator(mpi_comm))
if (is_serial_vector<VectorType>::value == false)
{
const int ierr = MPI_Comm_free(&communicator);
- AssertThrowMPI(ierr);
+ AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
}
#endif
}
KINSOL<VectorType>::KINSOL(const AdditionalData &data, const MPI_Comm mpi_comm) :
data(data),
kinsol_mem(nullptr),
+ solution(nullptr),
+ u_scale(nullptr),
+ f_scale(nullptr),
communicator(is_serial_vector<VectorType>::value ?
MPI_COMM_SELF :
Utilities::MPI::duplicate_communicator(mpi_comm))
if (is_serial_vector<VectorType>::value == false)
{
const int ierr = MPI_Comm_free(&communicator);
- AssertThrowMPI(ierr);
+ AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
}
#endif
}