<h3>General</h3>
<ol>
+<li> New: There is now a namespace Utilities::MPI that holds some of the MPI-related
+functions that were previously part of Utilities::System. Specifically, the following
+functions were moved and in part renamed: Utilities::System::get_n_mpi_processes
+is now Utilities::MPI::n_mpi_processes; Utilities::System::get_this_mpi_process
+is now Utilities::MPI::this_mpi_process; Utilities::System::compute_point_to_point_communication_pattern
+is now Utilities::MPI::compute_point_to_point_communication_pattern;
+Utilities::System::duplicate_communicator
+is now Utilities::MPI::duplicate_communicator.
+<br>
+(Wolfgang Bangerth, 2011/09/26)
+
<li> New: When using a new enough version of GCC, debug sections in
object files are now compressed using the <code>-Wa,--compress-debug-sections</code>
flag, resulting in savings in disk space on the order of 230 MB.
dof_handler (triangulation),
fe (FE_Q<dim>(1), dim),
mpi_communicator (MPI_COMM_WORLD),
- n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)),
- this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator))
+ n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)),
+ this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator))
{
pcout.set_condition(this_mpi_process == 0);
}
dof_handler (triangulation),
quadrature_formula (2),
mpi_communicator (MPI_COMM_WORLD),
- n_mpi_processes (Utilities::System::get_n_mpi_processes(mpi_communicator)),
- this_mpi_process (Utilities::System::get_this_mpi_process(mpi_communicator)),
+ n_mpi_processes (Utilities::MPI::n_mpi_processes(mpi_communicator)),
+ this_mpi_process (Utilities::MPI::this_mpi_process(mpi_communicator)),
pcout (std::cout, this_mpi_process == 0)
{}
void solve_S(TrilinosWrappers::MPI::Vector &dst,
const TrilinosWrappers::MPI::Vector &src) const
{
+//TODO: shouldn't this be a *relative* tolerance
SolverControl cn(5000, 1e-5);
TrilinosWrappers::SolverCG solver(cn);
endc = stokes_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values[velocities].get_function_values (stokes_solution,
endc = stokes_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values[velocities].get_function_values (stokes_solution,
endc = temperature_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values.get_function_values (old_temperature_solution,
endc = temperature_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values.get_function_values (old_temperature_solution,
endc = temperature_dof_handler.end();
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
fe_values.reinit (cell);
fe_values.get_function_values (old_temperature_solution,
for (; cell!=endc; ++cell)
if (cell->subdomain_id() ==
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD))
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD))
{
cell->get_dof_indices (local_dof_indices);
fe_values.reinit (cell);
IndexSet stokes_relevant_set;
{
const unsigned int my_id =
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
stokes_partitioning.push_back(stokes_index_set.get_view(0,n_u));
stokes_partitioning.push_back(stokes_index_set.get_view(n_u,n_u+n_p));
const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree+1);
+//todo: define a new filter for locally owned cells
typedef
FilteredIterator<typename DoFHandler<dim>::active_cell_iterator>
SubdomainFilter;
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_stokes_preconditioner,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
stokes_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_stokes_system,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_temperature_matrix,
WorkStream::
run (SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.begin_active()),
SubdomainFilter (IteratorFilters::SubdomainEqualTo
- (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)),
temperature_dof_handler.end()),
std_cxx1x::bind (&BoussinesqFlowProblem<dim>::
local_assemble_temperature_rhs,
std::ofstream output (filename.c_str());
data_out.write_vtu (output);
- if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
{
std::vector<std::string> filenames;
- for (unsigned int i=0; i<Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD); ++i)
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
filenames.push_back (std::string("solution-") +
Utilities::int_to_string (out_index, 5) +
"." +
csp,
dof_handler.n_locally_owned_dofs_per_processor(),
dof_handler.n_locally_owned_dofs_per_processor(),
- Utilities::System::get_this_mpi_process(mpi_communicator));
+ Utilities::MPI::this_mpi_process(mpi_communicator));
}
// file; we arbitrarily choose
// processor zero to take over this
// job.
- if (Utilities::System::get_this_mpi_process(mpi_communicator) == 0)
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
{
std::vector<std::string> filenames;
for (unsigned int i=0;
- i<Utilities::System::get_n_mpi_processes(mpi_communicator);
+ i<Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
filenames.push_back ("solution-" +
Utilities::int_to_string (cycle, 2) +
assemble_system ();
solve ();
- if (Utilities::System::get_n_mpi_processes(mpi_communicator) <= 32)
+ if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
output_results (cycle);
pcout << std::endl;
void
Timer::print_data(STREAM & stream) const
{
- unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+ unsigned int my_id = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
if (my_id==0)
stream << mpi_data.max << " wall,"
<< " max @" << mpi_data.max_index
template <typename T>
T sum (const T &t,
const MPI_Comm &mpi_communicator);
+
+ /**
+ * Return the number of MPI processes
+ * there exist in the given communicator
+ * object. If this is a sequential job,
+ * it returns 1.
+ */
+ unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Return the number of the present MPI
+ * process in the space of processes
+ * described by the given
+ * communicator. This will be a unique
+ * value for each process between zero
+ * and (less than) the number of all
+ * processes (given by
+ * get_n_mpi_processes()).
+ */
+ unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Consider an unstructured
+ * communication pattern where
+ * every process in an MPI
+ * universe wants to send some
+ * data to a subset of the other
+ * processors. To do that, the
+ * other processors need to know
+ * who to expect messages
+ * from. This function computes
+ * this information.
+ *
+ * @param mpi_comm A communicator
+ * that describes the processors
+ * that are going to communicate
+ * with each other.
+ *
+ * @param destinations The list
+ * of processors the current
+ * process wants to send
+ * information to. This list need
+ * not be sorted in any way. If
+ * it contains duplicate entries
+ * that means that multiple
+ * messages are intended for a
+ * given destination.
+ *
+ * @return A list of processors
+ * that have indicated that they
+ * want to send something to the
+ * current processor. The
+ * resulting list is not
+ * sorted. It may contain
+ * duplicate entries if
+ * processors enter the same
+ * destination more than once in
+ * their destinations list.
+ */
+ std::vector<unsigned int>
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> & destinations);
+
+ /**
+ * Given a communicator, generate a new
+ * communicator that contains the same
+ * set of processors but that has a
+ * different, unique identifier.
+ *
+ * This functionality can be used to
+ * ensure that different objects, such as
+ * distributed matrices, each have unique
+ * communicators over which they can
+ * interact without interfering with each
+ * other.
+ *
+ * When no longer needed, the
+ * communicator created here needs to
+ * be destroyed using
+ * <code>MPI_Comm_free</code>.
+ */
+ MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
}
/**
* A namespace for utility functions that
bool job_supports_mpi ();
/**
- * Return the number of MPI processes
- * there exist in the given communicator
- * object. If this is a sequential job,
- * it returns 1.
+ * This function is an alias for
+ * Utilities::MPI::n_mpi_processes.
+ *
+ * @deprecated
*/
unsigned int get_n_mpi_processes (const MPI_Comm &mpi_communicator);
/**
- * Return the number of the present MPI
- * process in the space of processes
- * described by the given
- * communicator. This will be a unique
- * value for each process between zero
- * and (less than) the number of all
- * processes (given by
- * get_n_mpi_processes()).
+ * This function is an alias for
+ * Utilities::MPI::this_mpi_process.
+ *
+ * @deprecated
*/
unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator);
/**
- * Consider an unstructured
- * communication pattern where
- * every process in an MPI
- * universe wants to send some
- * data to a subset of the other
- * processors. To do that, the
- * other processors need to know
- * who to expect messages
- * from. This function computes
- * this information.
- *
- * @param mpi_comm A communicator
- * that describes the processors
- * that are going to communicate
- * with each other.
- *
- * @param destinations The list
- * of processors the current
- * process wants to send
- * information to. This list need
- * not be sorted in any way. If
- * it contains duplicate entries
- * that means that multiple
- * messages are intended for a
- * given destination.
- *
- * @return A list of processors
- * that have indicated that they
- * want to send something to the
- * current processor. The
- * resulting list is not
- * sorted. It may contain
- * duplicate entries if
- * processors enter the same
- * destination more than once in
- * their destinations list.
+ * This function is an alias for
+ * Utilities::MPI::compute_point_to_point_communication_pattern.
+ *
+ * @deprecated
*/
- std::vector<unsigned int>
- compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
- const std::vector<unsigned int> & destinations);
-
+ using
+ Utilities::MPI::compute_point_to_point_communication_pattern;
/**
- * Given a communicator, generate a new
- * communicator that contains the same
- * set of processors but that has a
- * different, unique identifier.
- *
- * This functionality can be used to
- * ensure that different objects, such as
- * distributed matrices, each have unique
- * communicators over which they can
- * interact without interfering with each
- * other.
- *
- * When no longer needed, the
- * communicator created here needs to
- * be destroyed using
- * <code>MPI_Comm_free</code>.
+ * This function is an alias for
+ * Utilities::MPI::duplicate_communicator.
+ *
+ * @deprecated
*/
- MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+ using Utilities::MPI::duplicate_communicator;
/**
* Data structure to store the result of
}
+ namespace MPI
+ {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ // Unfortunately, we have to work
+ // around an oddity in the way PETSc
+ // and some gcc versions interact. If
+ // we use PETSc's MPI dummy
+ // implementation, it expands the
+ // calls to the two MPI functions
+ // basically as ``(n_jobs=1, 0)'',
+ // i.e. it assigns the number one to
+ // the variable holding the number of
+ // jobs, and then uses the comma
+ // operator to let the entire
+ // expression have the value zero. The
+ // latter is important, since
+ // ``MPI_Comm_size'' returns an error
+ // code that we may want to check (we
+ // don't here, but one could in
+ // principle), and the trick with the
+ // comma operator makes sure that both
+ // the number of jobs is correctly
+ // assigned, and the return value is
+ // zero. Unfortunately, if some recent
+ // versions of gcc detect that the
+ // comma expression just stands by
+ // itself, i.e. the result is not
+ // assigned to another variable, then
+ // they warn ``right-hand operand of
+ // comma has no effect''. This
+ // unwanted side effect can be
+ // suppressed by casting the result of
+ // the entire expression to type
+ // ``void'' -- not beautiful, but
+ // helps calming down unwarranted
+ // compiler warnings...
+ unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
+ {
+ int n_jobs=1;
+ (void) MPI_Comm_size (mpi_communicator, &n_jobs);
+
+ return n_jobs;
+ }
+
+
+ unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
+ {
+ int rank=0;
+ (void) MPI_Comm_rank (mpi_communicator, &rank);
+
+ return rank;
+ }
+
+
+ MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
+ {
+ MPI_Comm new_communicator;
+ MPI_Comm_dup (mpi_communicator, &new_communicator);
+ return new_communicator;
+ }
+
+
+ std::vector<unsigned int>
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> & destinations)
+ {
+ unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
+ unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
+
+ for (unsigned int i=0; i<destinations.size(); ++i)
+ {
+ Assert (destinations[i] < n_procs,
+ ExcIndexRange (destinations[i], 0, n_procs));
+ Assert (destinations[i] != myid,
+ ExcMessage ("There is no point in communicating with ourselves."));
+ }
+
+
+ // let all processors
+ // communicate the maximal
+ // number of destinations they
+ // have
+ unsigned int my_n_destinations = destinations.size();
+ unsigned int max_n_destinations = 0;
+
+ MPI_Allreduce (&my_n_destinations, &max_n_destinations, 1, MPI_UNSIGNED,
+ MPI_MAX, mpi_comm);
+
+ // now that we know the number
+ // of data packets every
+ // processor wants to send, set
+ // up a buffer with the maximal
+ // size and copy our
+ // destinations in there,
+ // padded with -1's
+ std::vector<unsigned int> my_destinations(max_n_destinations,
+ numbers::invalid_unsigned_int);
+ std::copy (destinations.begin(), destinations.end(),
+ my_destinations.begin());
+
+ // now exchange these (we could
+ // communicate less data if we
+ // used MPI_Allgatherv, but
+ // we'd have to communicate
+ // my_n_destinations to all
+ // processors in this case,
+ // which is more expensive than
+ // the reduction operation
+ // above in MPI_Allreduce)
+ std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
+ MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ mpi_comm);
+
+ // now we know who is going to
+ // communicate with
+ // whom. collect who is going
+ // to communicate with us!
+ std::vector<unsigned int> origins;
+ for (unsigned int i=0; i<n_procs; ++i)
+ for (unsigned int j=0; j<max_n_destinations; ++j)
+ if (all_destinations[i*max_n_destinations + j] == myid)
+ origins.push_back (i);
+ else if (all_destinations[i*max_n_destinations + j] ==
+ numbers::invalid_unsigned_int)
+ break;
+
+ return origins;
+ }
+
+#else
+
+ unsigned int get_n_mpi_processes (const MPI_Comm &)
+ {
+ return 1;
+ }
+
+
+
+ unsigned int get_this_mpi_process (const MPI_Comm &)
+ {
+ return 0;
+ }
+
+
+ MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
+ {
+ return mpi_communicator;
+ }
+#endif
+ }
namespace System
return (MPI_has_been_started > 0);
}
- // Unfortunately, we have to work
- // around an oddity in the way PETSc
- // and some gcc versions interact. If
- // we use PETSc's MPI dummy
- // implementation, it expands the
- // calls to the two MPI functions
- // basically as ``(n_jobs=1, 0)'',
- // i.e. it assigns the number one to
- // the variable holding the number of
- // jobs, and then uses the comma
- // operator to let the entire
- // expression have the value zero. The
- // latter is important, since
- // ``MPI_Comm_size'' returns an error
- // code that we may want to check (we
- // don't here, but one could in
- // principle), and the trick with the
- // comma operator makes sure that both
- // the number of jobs is correctly
- // assigned, and the return value is
- // zero. Unfortunately, if some recent
- // versions of gcc detect that the
- // comma expression just stands by
- // itself, i.e. the result is not
- // assigned to another variable, then
- // they warn ``right-hand operand of
- // comma has no effect''. This
- // unwanted side effect can be
- // suppressed by casting the result of
- // the entire expression to type
- // ``void'' -- not beautiful, but
- // helps calming down unwarranted
- // compiler warnings...
- unsigned int get_n_mpi_processes (const MPI_Comm &mpi_communicator)
- {
- int n_jobs=1;
- (void) MPI_Comm_size (mpi_communicator, &n_jobs);
-
- return n_jobs;
- }
-
-
- unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator)
- {
- int rank=0;
- (void) MPI_Comm_rank (mpi_communicator, &rank);
-
- return rank;
- }
-
-
- MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
- {
- MPI_Comm new_communicator;
- MPI_Comm_dup (mpi_communicator, &new_communicator);
- return new_communicator;
- }
-
namespace
{
MinMaxAvg & result)
{
const unsigned int my_id
- = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+ = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
const unsigned int numproc
- = dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+ = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
MPI_Op op;
int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
}
- std::vector<unsigned int>
- compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
- const std::vector<unsigned int> & destinations)
- {
- unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
- unsigned int n_procs = Utilities::System::get_n_mpi_processes(mpi_comm);
-
- for (unsigned int i=0; i<destinations.size(); ++i)
- {
- Assert (destinations[i] < n_procs,
- ExcIndexRange (destinations[i], 0, n_procs));
- Assert (destinations[i] != myid,
- ExcMessage ("There is no point in communicating with ourselves."));
- }
-
-
- // let all processors
- // communicate the maximal
- // number of destinations they
- // have
- unsigned int my_n_destinations = destinations.size();
- unsigned int max_n_destinations = 0;
-
- MPI_Allreduce (&my_n_destinations, &max_n_destinations, 1, MPI_UNSIGNED,
- MPI_MAX, mpi_comm);
-
- // now that we know the number
- // of data packets every
- // processor wants to send, set
- // up a buffer with the maximal
- // size and copy our
- // destinations in there,
- // padded with -1's
- std::vector<unsigned int> my_destinations(max_n_destinations,
- numbers::invalid_unsigned_int);
- std::copy (destinations.begin(), destinations.end(),
- my_destinations.begin());
-
- // now exchange these (we could
- // communicate less data if we
- // used MPI_Allgatherv, but
- // we'd have to communicate
- // my_n_destinations to all
- // processors in this case,
- // which is more expensive than
- // the reduction operation
- // above in MPI_Allreduce)
- std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
- MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
- &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
- mpi_comm);
-
- // now we know who is going to
- // communicate with
- // whom. collect who is going
- // to communicate with us!
- std::vector<unsigned int> origins;
- for (unsigned int i=0; i<n_procs; ++i)
- for (unsigned int j=0; j<max_n_destinations; ++j)
- if (all_destinations[i*max_n_destinations + j] == myid)
- origins.push_back (i);
- else if (all_destinations[i*max_n_destinations + j] ==
- numbers::invalid_unsigned_int)
- break;
-
- return origins;
- }
#else
- unsigned int get_n_mpi_processes (const MPI_Comm &)
- {
- return 1;
- }
-
-
-
- unsigned int get_this_mpi_process (const MPI_Comm &)
- {
- return 0;
- }
void calculate_collective_mpi_min_max_avg(const MPI_Comm &,
}
- MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
- {
- return mpi_communicator;
- }
#endif
return false;
#endif
}
+
+
+ unsigned int get_n_mpi_processes (const MPI_Comm &mpi_communicator)
+ {
+ return MPI::n_mpi_processes (mpi_communicator);
+ }
+
+ unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator)
+ {
+ return MPI::this_mpi_process (mpi_communicator);
+ }
}
// make sure only processor zero
// got something
- if (Utilities::System::get_this_mpi_process (mpi_communicator) != 0)
+ if (Utilities::MPI::this_mpi_process (mpi_communicator) != 0)
Assert ((result[0] == 0) && (result[1] == 0),
ExcInternalError());
// make sure only processor zero
// got something
- if (Utilities::System::get_this_mpi_process (mpi_communicator) != 0)
+ if (Utilities::MPI::this_mpi_process (mpi_communicator) != 0)
Assert (result == 0, ExcInternalError());
return result;
// from here on designate a
// master and slaves
double top_threshold, bottom_threshold;
- if (Utilities::System::get_this_mpi_process (mpi_communicator) == 0)
+ if (Utilities::MPI::this_mpi_process (mpi_communicator) == 0)
{
// this is the master
// processor
// from here on designate a
// master and slaves
double top_threshold, bottom_threshold;
- if (Utilities::System::get_this_mpi_process (mpi_communicator) == 0)
+ if (Utilities::MPI::this_mpi_process (mpi_communicator) == 0)
{
// this is the master
// processor
dealii::Triangulation<dim,spacedim>
(smooth_grid,
false),
- mpi_communicator (Utilities::System::
+ mpi_communicator (Utilities::MPI::
duplicate_communicator(mpi_communicator)),
- my_subdomain (Utilities::System::get_this_mpi_process (this->mpi_communicator)),
+ my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
triangulation_has_content (false),
connectivity (0),
parallel_forest (0),
dealii::internal::p4est::InitFinalize::do_initialize ();
number_cache.n_locally_owned_active_cells
- .resize (Utilities::System::get_n_mpi_processes (mpi_communicator));
+ .resize (Utilities::MPI::n_mpi_processes (mpi_communicator));
}
{
const unsigned int total_local_cells = this->n_active_cells();
- if (Utilities::System::get_n_mpi_processes (mpi_communicator) == 1)
+ if (Utilities::MPI::n_mpi_processes (mpi_communicator) == 1)
Assert (static_cast<unsigned int>(parallel_forest->local_num_quadrants) ==
total_local_cells,
ExcInternalError())
{
Assert (number_cache.n_locally_owned_active_cells.size()
==
- Utilities::System::get_n_mpi_processes (mpi_communicator),
+ Utilities::MPI::n_mpi_processes (mpi_communicator),
ExcInternalError());
std::fill (number_cache.n_locally_owned_active_cells.begin(),
Assert (tr != 0, ExcInternalError());
const unsigned int
- n_cpus = Utilities::System::get_n_mpi_processes (tr->get_communicator());
+ n_cpus = Utilities::MPI::n_mpi_processes (tr->get_communicator());
//* 1. distribute on own
//* subdomain
// gather information from all CPUs
std::vector<unsigned int>
all_dof_counts(fe_collection.n_components() *
- Utilities::System::get_n_mpi_processes (tria->get_communicator()));
+ Utilities::MPI::n_mpi_processes (tria->get_communicator()));
MPI_Allgather ( &local_dof_count[0], n_buckets, MPI_UNSIGNED, &all_dof_counts[0],
n_buckets, MPI_UNSIGNED, tria->get_communicator());
shifts[c]=cumulated;
for (types::subdomain_id_t i=0; i<tria->locally_owned_subdomain(); ++i)
shifts[c] += all_dof_counts[c+n_buckets*i];
- for (unsigned int i=0; i<Utilities::System::get_n_mpi_processes (tria->get_communicator()); ++i)
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes (tria->get_communicator()); ++i)
cumulated += all_dof_counts[c+n_buckets*i];
}
#else
dmumps_c (&id);
// Hand over matrix and right-hand side
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
// Objects denoting a MUMPS data structure:
dmumps_c (&id);
// Copy solution into the given vector
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
for (unsigned int i=0; i<n; ++i)
vector(i) = rhs[i];
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008, 2009, 2010 by the deal.II authors
+// Copyright (C) 2008, 2009, 2010, 2011 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
const MPI_Comm & mpi_comm,
const IndexSet & myrange)
{
- unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+ unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
std::vector<unsigned int> start_index(rows_per_cpu.size()+1);
start_index[0]=0;
for (unsigned int i=0;i<rows_per_cpu.size();++i)
send_to.push_back(it->first);
num_receive =
- Utilities::System::
+ Utilities::MPI::
compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
}
== dof_handler.n_dofs(),
ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile("count_dofs_per_block_01/output");
== dof_handler.n_dofs(),
ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile("count_dofs_per_component_01/output");
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile("dof_handler_number_cache/output");
{
Assert( Utilities::System::job_supports_mpi(), ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("collective_01").c_str());
deallog.attach(logfile);
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
int int_sum, uint_sum, double_sum, float_sum;
int_sum
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("collective_02").c_str());
deallog.attach(logfile);
const double mean
= VectorTools::compute_mean_value (dofh, QGauss<dim>(2), x_rel, 0);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "mean=" << mean
<< std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
std::vector<unsigned int> dofs_per_block (fe.n_blocks());
DoFTools::count_dofs_per_block (dof_handler, dofs_per_block);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (unsigned int i=0; i<fe.n_blocks(); ++i)
deallog << "Block " << i << " has " << dofs_per_block[i] << " dofs" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("count_dofs_per_block_01").c_str());
== dof_handler.n_dofs(),
ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("count_dofs_per_block_02").c_str());
== dof_handler.n_dofs(),
ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
deallog << "Total number of dofs: " << dof_handler.n_dofs() << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("count_dofs_per_component_01").c_str());
dof_handler.distribute_dofs (fe);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("crash_01").c_str());
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("crash_02").c_str());
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("crash_03").c_str());
}
}
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
deallog << "id=" << triangulation.locally_owned_subdomain()
<< " n_coarsen=" << n_coarse << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("crash_04").c_str());
Assert( Utilities::System::job_supports_mpi(), ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("distribute_sp_01").c_str());
deallog.attach(logfile);
dof_handler.distribute_dofs (fe);
const unsigned int N = dof_handler.n_dofs();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << N << std::endl;
Assert (dof_handler.n_locally_owned_dofs() <= N,
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
std::ofstream logfile(output_file_for_mpi("dof_handler_number_cache").c_str());
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<dim>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
DoFHandler<dim> dofh(tr);
dofh.distribute_dofs (fe);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "Total dofs=" << dofh.n_dofs() << std::endl;
// extract constant modes and print
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
// tr.locally_owned_subdomain()),
// ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
deallog << locally_active.size() << ' ' << locally_active.n_elements()
<< std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "numproc=" << numproc << std::endl;
// check local values
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
deallog << myid*2 << ":" << v(myid*2) << std::endl;
deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
// check ghost values
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "ghost: " << v(1) << std::endl;
Assert(v(1) == 2.0, ExcInternalError());
int main (int argc, char **argv)
{
PetscInitialize(&argc,&argv,0,0);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
MPI_SUM,
tr.get_communicator());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "difference = " << std::sqrt(global)
<< std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
// create a mesh so that all but one
// processor are empty
compile_time_error;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
// create a mesh so that all but one
// processor are empty
{
PetscInitialize(&argc,&argv,0,0);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
Assert (dim == 3, ExcNotImplemented());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
{
{
Utilities::System::MPI_InitFinalize mpi_init (argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
Assert (dim == 3, ExcNotImplemented());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
{
{
Utilities::System::MPI_InitFinalize mpi_init (argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
Assert (dim == 3, ExcNotImplemented());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
{
{
Utilities::System::MPI_InitFinalize mpi_init (argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
DoFTools::make_hanging_node_constraints (dofh, cm2);
{
- std::ofstream file((std::string("p4est_2d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
+ std::ofstream file((std::string("p4est_2d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
file << "**** proc " << myid << std::endl;
cm.print(file);
file << "****" << std::endl;
{
for (unsigned int i=0;i<numproc;++i)
{
- cat_file((std::string("p4est_2d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
+ cat_file((std::string("p4est_2d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
}
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
for (int i=0;i<12;++i)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "step " << i << std::endl;
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
if (myid==0)
{
- std::ofstream file((std::string("p4est_2d_constraintmatrix_03/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
+ std::ofstream file((std::string("p4est_2d_constraintmatrix_03/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
file << "**** proc " << myid << std::endl;
x_dub.print(file);
}
if (myid==0)
{
- cat_file((std::string("p4est_2d_constraintmatrix_03/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(0)).c_str());
+ cat_file((std::string("p4est_2d_constraintmatrix_03/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(0)).c_str());
}
tr.set_boundary (0);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
x_ref -= x;
double err = x_ref.linfty_norm();
if (err>1.0e-12)
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "err:" << err << std::endl;
// x_rel=x_ref; //uncomment to output error
tr.set_boundary (0);
tr.set_boundary (1);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
x1 -= x_ref;
double err = x1.linfty_norm();
if (err>1.0e-12)
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "err:" << err << std::endl;
// now test the same thing with a fresh vector
x2 -= x_ref;
err = x2.linfty_norm();
if (err>1.0e-12)
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "err:" << err << std::endl;
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
}
tr.execute_coarsening_and_refinement ();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "#local cells:" << tr.n_locally_owned_active_cells() << std::endl;
DoFHandler<dim> dofh(tr);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
}
tr.execute_coarsening_and_refinement ();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "#local cells:" << tr.n_locally_owned_active_cells() << std::endl;
DoFHandler<dim> dofh(tr);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
}
tr.execute_coarsening_and_refinement ();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "#local cells:" << tr.n_locally_owned_active_cells() << std::endl;
DoFHandler<dim> dofh(tr);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
dofh.distribute_dofs (fe);
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
for (int i=0;i<5;++i)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "refine loop:" << i << std::endl;
tr.refine_global(1);
}
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
std::cout << myid << ":" << getpid() << std::endl;
//system("sleep 20");
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
cell_subd.resize(tr.n_active_cells());
GridTools::get_subdomain_association(tr, cell_subd);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
for (unsigned int i=0;i<tr.n_active_cells();++i)
deallog << cell_subd[i] << " ";
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
GridGenerator::hyper_cube(tr);
tr.refine_global(1);
- while (tr.n_global_active_cells() < 20000/Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD))
+ while (tr.n_global_active_cells() < 20000/Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD))
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "refine_loop..." << std::endl;
std::vector<bool> flags (tr.n_active_cells(), false);
}
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
GridGenerator::hyper_cube(tr);
typename DoFHandler<dim>::active_cell_iterator
cell, endc = dofh.end();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (cell = dofh.begin_active(); cell != endc; ++cell)
if (!cell->is_artificial() && !cell->is_ghost())
{
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<dim>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
DoFHandler<dim> dofh(tr);
dofh.distribute_dofs (fe);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "Total dofs=" << dofh.n_dofs() << std::endl;
{
IndexSet dof_set;
DoFTools::extract_locally_active_dofs (dofh, dof_set);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
dof_set.print(deallog);
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "****" << std::endl;
DoFRenumbering::component_wise(dofh);
IndexSet dof_set;
DoFTools::extract_locally_active_dofs (dofh, dof_set);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
dof_set.print(deallog);
- for (unsigned int i=0; i<Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD); ++i)
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); ++i)
{
deallog << "Dofs owned by processor " << i << ": ";
dofh.locally_owned_dofs_per_processor()[i].print(deallog);
typename DoFHandler<dim>::active_cell_iterator
cell, endc = dofh.end();
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
for (cell = dofh.begin_active(); cell != endc; ++cell)
if (!cell->is_artificial() && !cell->is_ghost())
{
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
Assert(tr.n_active_cells()==1, ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "subdomainid = " << tr.begin_active()->subdomain_id() << std::endl;
if (myid == numproc-1)
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (myid == 0)
{
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
DoFTools::make_hanging_node_constraints (dofh, cm2);
{
- std::ofstream file((std::string("p4est_3d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
+ std::ofstream file((std::string("p4est_3d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
file << "**** proc " << myid << std::endl;
cm.print(file);
file << "****" << std::endl;
{
for (unsigned int i=0;i<numproc;++i)
{
- cat_file((std::string("p4est_3d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
+ cat_file((std::string("p4est_3d_constraintmatrix_01/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
}
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
x_ref -= x;
double err = x_ref.linfty_norm();
if (err>1.0e-12)
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "err:" << err << std::endl;
// x_rel=x_ref; //uncomment to output error
tr.set_boundary (0);
tr.set_boundary (1);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
// print out constraints for each
// processor.
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
IndexSet locally_active (dof.n_dofs());
DoFTools::extract_locally_active_dofs (dof, locally_active);
- std::ofstream file((std::string("p4est_3d_constraintmatrix_04/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
+ std::ofstream file((std::string("p4est_3d_constraintmatrix_04/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
file << "**** proc " << myid << ": \n\n";
file << "Constraints:\n";
constraints.print(file);
{
for (unsigned int i=0;i<numproc;++i)
{
- cat_file((std::string("p4est_3d_constraintmatrix_04/ncpu_") + Utilities::int_to_string(Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
+ cat_file((std::string("p4est_3d_constraintmatrix_04/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
}
}
}
int main(int argc, char** argv)
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
std::cout << myid << ":" << getpid() << std::endl;
//system("sleep 20");
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
GridGenerator::hyper_cube(tr);
tr.refine_global(1);
- while (tr.n_active_cells() < 20000/Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD))
+ while (tr.n_active_cells() < 20000/Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD))
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "refine_loop..." << std::endl;
std::vector<bool> flags (tr.n_active_cells(), false);
}
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
GridGenerator::hyper_shell (tr,
<< std::endl;
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
GridGenerator::hyper_cube(tr);
<< std::endl;
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
<< std::endl;
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 1)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 1)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
if (true)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "hyper_cube" << std::endl;
parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
for (int i=0;i<29;++i)
{
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "refine_loop... level=" << level << std::endl;
if (myid==0)
}
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
{
PetscInitialize(&argc,&argv,0,0);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("petsc_01").c_str());
deallog.attach(logfile);
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
{
PetscInitialize(&argc,&argv,0,0);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("petsc_02").c_str());
deallog.attach(logfile);
// $Id$
// Version: $Name$
//
-// Copyright (C) 2009, 2010 by the deal.II authors
+// Copyright (C) 2009, 2010, 2011 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
{
Assert( Utilities::System::job_supports_mpi(), ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
// select a few destinations
std::vector<unsigned int> destinations;
deallog << "Exchanging data..." << std::endl;
std::vector<unsigned int> origins
- = Utilities::System::compute_point_to_point_communication_pattern (MPI_COMM_WORLD,
- destinations);
+ = Utilities::MPI::compute_point_to_point_communication_pattern (MPI_COMM_WORLD,
+ destinations);
if (myid == 0)
{
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("point_to_point_pattern_01").c_str());
deallog.attach(logfile);
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
// since that would make 'make' delete the output file without us
// ever seeing what we want to see if things go wrong
// Assert (n_refined ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
// Assert (n_coarsened ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
// since that would make 'make' delete the output file without us
// ever seeing what we want to see if things go wrong
// Assert (n_refined ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
// Assert (n_coarsened ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
// since that would make 'make' delete the output file without us
// ever seeing what we want to see if things go wrong
// Assert (n_refined ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
// Assert (n_coarsened ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
// since that would make 'make' delete the output file without us
// ever seeing what we want to see if things go wrong
// Assert (n_refined ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
// Assert (n_coarsened ==
-// 4*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD),
+// 4*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD),
// ExcInternalError());
}
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
std::vector<unsigned int> sub(2);
- sub[0] = 5*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ sub[0] = 5*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
sub[1] = 1;
GridGenerator::subdivided_hyper_rectangle(static_cast<Triangulation<2>&>(tr),
sub, Point<2>(0,0), Point<2>(1,1));
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test()
{
- const unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<2> tr(MPI_COMM_WORLD);
(void)argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
template <int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
GridGenerator::hyper_cube (triangulation);
(void)argv;
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("refine_and_coarsen_fixed_number_06").c_str());
deallog.attach(logfile);
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr (MPI_COMM_WORLD);
(void) argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push (Utilities::int_to_string (myid));
template<int dim>
void test()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
parallel::distributed::Triangulation<dim> tr (MPI_COMM_WORLD);
(void) argv;
#endif
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push (Utilities::int_to_string (myid));
Assert( Utilities::System::job_supports_mpi(), ExcInternalError());
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
#endif
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
std::ofstream logfile(output_file_for_mpi("simple_mpi_01").c_str());
deallog.attach(logfile);
// hanging
MPI_Barrier (MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
PetscInitialize(&argc,&argv,0,0);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
Assert(test(locally_owned.nth_index_in_set(5)) == 7, ExcInternalError());
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
v.compress();
v*=2.0;
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
deallog << myid*2 << ":" << v(myid*2) << std::endl;
deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
v_tmp.reinit(v,false,true);
// check ghost values
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "ghost: " << v_tmp(1) << std::endl;
Assert(v_tmp(1) == 2.0, ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
// assignment with transfer to ghost
v_tmp = v;
// check ghost values
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "ghost: " << v_tmp(1) << std::endl;
Assert(v_tmp(1) == 1.0, ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
{
}
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
}
Assert (exc == false, ExcInternalError());
- Assert (norm == 2.*Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD)-1,
+ Assert (norm == 2.*Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)-1,
ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
{
deallog << norm << std::endl;
deallog << "OK" << std::endl;
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0)
deallog << "numproc=" << numproc << std::endl;
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- const unsigned int n_procs = Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD);
- const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
const unsigned int n_rows = 2;
const unsigned int n_cols = 2;
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)
void test ()
{
- const unsigned int n_procs = Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD);
- const unsigned int my_id = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
const unsigned int n_rows = 3;
const unsigned int n_cols = 4;
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- const unsigned int n_procs = Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
// let processor 1 speak if we run
void test ()
{
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
- unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
if (myid==0) deallog << "numproc=" << numproc << std::endl;
Assert (test1.vector_partitioner().SameAs(test2.vector_partitioner()),
ExcInternalError());
- if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
deallog << "OK" << std::endl;
}
{
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
- unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
deallog.push(Utilities::int_to_string(myid));
if (myid == 0)