From: Timo Heister Date: Mon, 1 Oct 2018 17:10:11 +0000 (-0400) Subject: use const cast for old OpenMPI version X-Git-Tag: v9.1.0-rc1~662^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3c3ea55ffce7128e4be4f5db0cf07e7107e82d99;p=dealii.git use const cast for old OpenMPI version Old versions of OpenMPI are missing const specifiers for input arguments in functions like MPI_Allgather. Introduce a macro to cast away const in these cases. --- diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index d2482be792..82c643d1c6 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -58,6 +58,33 @@ class SymmetricTensor; template class SparseMatrix; + +// Helper macro to remove const from the pointer arguments to some MPI_* +// functions. +// +// This is needed as the input arguments of functions like MPI_Allgather() are +// not marked as const in OpenMPI 1.6.5. +#ifdef DEAL_II_WITH_MPI +# if DEAL_II_MPI_VERSION_GTE(3, 0) +// We are good, no casts are needed. +# define DEAL_II_MPI_CONST_CAST(expr) (expr) +# else + +# include + +// This monster of a macro will: +// 1. remove * +// 2. remove const +// 3. add * +// 4. const_cast the given expression to this new type. +# define DEAL_II_MPI_CONST_CAST(expr) \ + const_cast< \ + std::remove_const::type>::type *>( \ + expr) + +# endif +#endif + namespace Utilities { /** diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index f5dfbd5a69..9d4d9727be 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1812,7 +1812,7 @@ namespace parallel MPI_File fh; ierr = MPI_File_open(mpi_communicator, - fname_fixed.c_str(), + DEAL_II_MPI_CONST_CAST(fname_fixed.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh); @@ -1836,9 +1836,11 @@ namespace parallel // it is sufficient to let only the first processor perform this task. if (myrank == 0) { + const unsigned int *data = sizes_fixed_cumulative.data(); + ierr = MPI_File_write_at(fh, 0, - sizes_fixed_cumulative.data(), + DEAL_II_MPI_CONST_CAST(data), sizes_fixed_cumulative.size(), MPI_UNSIGNED, MPI_STATUS_IGNORE); @@ -1849,12 +1851,14 @@ namespace parallel const unsigned int offset_fixed = sizes_fixed_cumulative.size() * sizeof(unsigned int); + const char *data = src_data_fixed.data(); + ierr = MPI_File_write_at( fh, offset_fixed + parallel_forest->global_first_quadrant[myrank] * sizes_fixed_cumulative.back(), // global position in file - src_data_fixed.data(), + DEAL_II_MPI_CONST_CAST(data), src_data_fixed.size(), // local buffer MPI_CHAR, MPI_STATUS_IGNORE); @@ -1880,7 +1884,7 @@ namespace parallel MPI_File fh; ierr = MPI_File_open(mpi_communicator, - fname_variable.c_str(), + DEAL_II_MPI_CONST_CAST(fname_variable.c_str()), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh); @@ -1896,15 +1900,19 @@ namespace parallel AssertThrowMPI(ierr); // Write sizes of each cell into file simultaneously. - ierr = - MPI_File_write_at(fh, - parallel_forest->global_first_quadrant[myrank] * - sizeof(int), // global position in file - src_sizes_variable.data(), - src_sizes_variable.size(), // local buffer - MPI_INT, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); + { + const int *data = src_sizes_variable.data(); + ierr = + MPI_File_write_at(fh, + parallel_forest->global_first_quadrant[myrank] * + sizeof(int), // global position in file + DEAL_II_MPI_CONST_CAST(data), + src_sizes_variable.size(), // local buffer + MPI_INT, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + const unsigned int offset_variable = parallel_forest->global_num_quadrants * sizeof(int); @@ -1914,7 +1922,7 @@ namespace parallel // Share information among all processors. std::vector sizes_on_all_procs(n_procs); - ierr = MPI_Allgather(&size_on_proc, + ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc), 1, MPI_UNSIGNED, sizes_on_all_procs.data(), @@ -1929,6 +1937,8 @@ namespace parallel sizes_on_all_procs.end(), sizes_on_all_procs.begin()); + const char *data = src_data_variable.data(); + // Write data consecutively into file. ierr = MPI_File_write_at( fh, @@ -1936,7 +1946,7 @@ namespace parallel ((myrank == 0) ? 0 : sizes_on_all_procs[myrank - 1]), // global position in file - src_data_variable.data(), + DEAL_II_MPI_CONST_CAST(data), src_data_variable.size(), // local buffer MPI_CHAR, MPI_STATUS_IGNORE); @@ -1980,8 +1990,11 @@ namespace parallel AssertThrowMPI(ierr); MPI_File fh; - ierr = MPI_File_open( - mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh); + ierr = MPI_File_open(mpi_communicator, + DEAL_II_MPI_CONST_CAST(fname_fixed.c_str()), + MPI_MODE_RDONLY, + info, + &fh); AssertThrowMPI(ierr); ierr = MPI_Info_free(&info); @@ -2042,7 +2055,7 @@ namespace parallel MPI_File fh; ierr = MPI_File_open(mpi_communicator, - fname_variable.c_str(), + DEAL_II_MPI_CONST_CAST(fname_variable.c_str()), MPI_MODE_RDONLY, info, &fh); @@ -2073,7 +2086,7 @@ namespace parallel // share information among all processors std::vector sizes_on_all_procs(n_procs); - ierr = MPI_Allgather(&size_on_proc, + ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc), 1, MPI_UNSIGNED, sizes_on_all_procs.data(), diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 36bb1a526d..b8c4ed6a79 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -5079,7 +5079,7 @@ namespace internal n_locally_owned_dofs_per_processor(n_cpus); const int ierr = - MPI_Allgather(&n_locally_owned_dofs, + MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs), 1, DEAL_II_DOF_INDEX_MPI_TYPE, n_locally_owned_dofs_per_processor.data(),