]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add InitFinalize base class of MPI_InitFinalize to initialize/finalize deal.II and...
authorBruno Turcksin <bruno.turcksin@gmail.com>
Sat, 1 Jul 2023 19:19:30 +0000 (19:19 +0000)
committerBruno Turcksin <bruno.turcksin@gmail.com>
Wed, 20 Mar 2024 15:55:33 +0000 (11:55 -0400)
include/deal.II/base/init_finalize.h [new file with mode: 0644]
include/deal.II/base/mpi.h
source/base/CMakeLists.txt
source/base/init_finalize.cc [new file with mode: 0644]
source/base/mpi.cc

diff --git a/include/deal.II/base/init_finalize.h b/include/deal.II/base/init_finalize.h
new file mode 100644 (file)
index 0000000..5704574
--- /dev/null
@@ -0,0 +1,221 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_init_finalize_h
+#define dealii_init_finalize_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/mpi_stub.h>
+#include <deal.II/base/numbers.h>
+
+#include <boost/signals2.hpp>
+
+#include <set>
+
+DEAL_II_NAMESPACE_OPEN
+
+/**
+ * The enum type given to the constructor of InitFinalize telling which
+ * external libraries should be initialized and finalized by deal.II.
+ */
+enum class InitializeLibrary
+{
+  /**
+   * No external library will be initialized/finalized.
+   */
+  None = 0,
+  /**
+   * Initialize/finalize MPI.
+   */
+  MPI = 1,
+  /**
+   * Initialize/finalize Kokkos.
+   */
+  Kokkos = 2,
+  /**
+   * Initialize/finalize both PETSc and SLEPc.
+   */
+  SLEPc = 4,
+  /**
+   * Initialize/finalize PETSc.
+   */
+  PETSc = 8,
+  /**
+   * Initialize/finalize Zoltan.
+   */
+  Zoltan = 16,
+  /**
+   * Initialize/finalize P4EST and SC.
+   */
+  P4EST = 32
+};
+
+
+
+/**
+ * Global operator which returns an object in which all bits are set which are
+ * either set in the first or the second argument.
+ */
+inline InitializeLibrary
+operator|(const InitializeLibrary f1, const InitializeLibrary f2)
+{
+  return static_cast<InitializeLibrary>(static_cast<unsigned int>(f1) |
+                                        static_cast<unsigned int>(f2));
+}
+
+
+
+/**
+ * Global operator which sets the bits from the second argument also in the
+ * first one.
+ */
+inline InitializeLibrary
+operator&(const InitializeLibrary f1, const InitializeLibrary f2)
+{
+  return static_cast<InitializeLibrary>(static_cast<unsigned int>(f1) &
+                                        static_cast<unsigned int>(f2));
+}
+
+
+
+/**
+ * A class that is used to initialize and finalize deal.II and the external
+ * libraries requested by the user.
+ */
+class InitFinalize
+{
+public:
+  /**
+   * Initialize deal.II and the requested external libraries.
+   *
+   * @note This function calls MultithreadInfo::set_thread_limit() with
+   * either @p max_num_threads or, following the discussion above, a
+   * number of threads equal to the number of cores allocated to this MPI
+   * process. However, MultithreadInfo::set_thread_limit() in turn also
+   * evaluates the environment variable DEAL_II_NUM_THREADS. Finally, the
+   * worker threads can only be created on cores to which the current MPI
+   * process has access to; some MPI implementations limit the number of
+   * cores each process may access to one or a subset of cores in order to
+   * ensure better cache behavior. Consequently, the number of threads
+   * that will really be created will be the minimum of the argument
+   * passed here, the environment variable (if set), and the number of
+   * cores accessible to the thread.
+   *
+   * @note MultithreadInfo::set_thread_limit() can only work if it is
+   * called before any threads are created. The safest place for a call to
+   * it is therefore at the beginning of <code>main()</code>.
+   * Consequently, this extends to the current class: the best place to
+   * create an object of this type is also at or close to the top of
+   * <code>main()</code>.
+   */
+  InitFinalize(
+    int                     &argc,
+    char                  **&argv,
+    const InitializeLibrary &libraries,
+    const unsigned int       max_num_threads = numbers::invalid_unsigned_int);
+
+  /**
+   * Destructor. Finalize deal.II and the external libraries initialize by
+   * deal.II.
+   */
+  ~InitFinalize();
+
+  /**
+   * Finalize deal.II and the external libraries initialize by
+   * deal.II.
+   */
+  void
+  finalize();
+
+  /**
+   * Register a reference to an MPI_Request
+   * on which we need to call `MPI_Wait` before calling `MPI_Finalize`.
+   *
+   * The object @p request needs to exist when MPI_Finalize is called, which means the
+   * request is typically statically allocated. Otherwise, you need to call
+   * unregister_request() before the request goes out of scope. Note that it
+   * is acceptable for a request to be already waited on (and consequently
+   * reset to MPI_REQUEST_NULL).
+   *
+   * It is acceptable to call this function more than once with the same
+   * instance (as it is done in the example below).
+   *
+   * Typically, this function is used by CollectiveMutex and not directly,
+   * but it can also be used directly like this:
+   * @code
+   * void my_fancy_communication()
+   * {
+   *   static MPI_Request request = MPI_REQUEST_NULL;
+   *   MPI_InitFinalize::register_request(request);
+   *   MPI_Wait(&request, MPI_STATUS_IGNORE);
+   *   // [some algorithm that is not safe to be executed twice in a row.]
+   *   MPI_IBarrier(comm, &request);
+   * }
+   * @endcode
+   */
+  static void
+  register_request(MPI_Request &request);
+
+  /**
+   * Unregister a request previously added using register_request().
+   */
+  static void
+  unregister_request(MPI_Request &request);
+
+  /**
+   * A structure that has boost::signal objects to register a call back
+   * to run after MPI init or finalize.
+   *
+   * For documentation on signals, see
+   * http://www.boost.org/doc/libs/release/libs/signals2 .
+   */
+  struct Signals
+  {
+    /**
+     * A signal that is triggered immediately after we have
+     * initialized the MPI context with <code>MPI_Init()</code>.
+     */
+    boost::signals2::signal<void()> at_mpi_init;
+
+    /**
+     * A signal that is triggered just before we close the MPI context
+     * with <code>MPI_Finalize()</code>. It can be used to deallocate
+     * statically allocated MPI resources that need to be deallocated
+     * before <code>MPI_Finalize()</code> is called.
+     */
+    boost::signals2::signal<void()> at_mpi_finalize;
+  };
+
+  static Signals signals;
+
+private:
+  InitializeLibrary libraries;
+
+  /**
+   * Requests to MPI_Wait before finalizing
+   */
+  static std::set<MPI_Request *> requests;
+
+  bool is_finalized = false;
+
+#ifdef DEAL_II_WITH_PETSC
+  bool finalize_petscslepc;
+#endif
+};
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 3ecfd0096daf8895e0f9ed35161cc0dd527e9135..69fc7be7d2b18bd8d05e831cfdcb72fd5066137c 100644 (file)
 #include <deal.II/base/config.h>
 
 #include <deal.II/base/array_view.h>
+#include <deal.II/base/init_finalize.h>
 #include <deal.II/base/mpi_stub.h>
 #include <deal.II/base/mpi_tags.h>
 #include <deal.II/base/numbers.h>
 #include <deal.II/base/template_constraints.h>
 #include <deal.II/base/utilities.h>
 
-#include <boost/signals2.hpp>
-
 #include <complex>
 #include <limits>
 #include <map>
 #include <numeric>
-#include <set>
 #include <vector>
 
 
@@ -1079,7 +1077,7 @@ namespace Utilities
      * MPI processes at the beginning of the program because it uses
      * `MPI_COMM_WORLD` during initialization.
      */
-    class MPI_InitFinalize
+    class MPI_InitFinalize : public InitFinalize
     {
     public:
       /**
@@ -1136,78 +1134,7 @@ namespace Utilities
        * Destructor. Calls <tt>MPI_Finalize()</tt> in case this class owns the
        * MPI process.
        */
-      ~MPI_InitFinalize();
-
-      /**
-       * Register a reference to an MPI_Request
-       * on which we need to call `MPI_Wait` before calling `MPI_Finalize`.
-       *
-       * The object @p request needs to exist when MPI_Finalize is called, which means the
-       * request is typically statically allocated. Otherwise, you need to call
-       * unregister_request() before the request goes out of scope. Note that it
-       * is acceptable for a request to be already waited on (and consequently
-       * reset to MPI_REQUEST_NULL).
-       *
-       * It is acceptable to call this function more than once with the same
-       * instance (as it is done in the example below).
-       *
-       * Typically, this function is used by CollectiveMutex and not directly,
-       * but it can also be used directly like this:
-       * @code
-       * void my_fancy_communication()
-       * {
-       *   static MPI_Request request = MPI_REQUEST_NULL;
-       *   MPI_InitFinalize::register_request(request);
-       *   MPI_Wait(&request, MPI_STATUS_IGNORE);
-       *   // [some algorithm that is not safe to be executed twice in a row.]
-       *   MPI_IBarrier(comm, &request);
-       * }
-       * @endcode
-       */
-      static void
-      register_request(MPI_Request &request);
-
-      /**
-       * Unregister a request previously added using register_request().
-       */
-      static void
-      unregister_request(MPI_Request &request);
-
-      /**
-       * A structure that has boost::signal objects to register a call back
-       * to run after MPI init or finalize.
-       *
-       * For documentation on signals, see
-       * http://www.boost.org/doc/libs/release/libs/signals2 .
-       */
-      struct Signals
-      {
-        /**
-         * A signal that is triggered immediately after we have
-         * initialized the MPI context with <code>MPI_Init()</code>.
-         */
-        boost::signals2::signal<void()> at_mpi_init;
-
-        /**
-         * A signal that is triggered just before we close the MPI context
-         * with <code>MPI_Finalize()</code>. It can be used to deallocate
-         * statically allocated MPI resources that need to be deallocated
-         * before <code>MPI_Finalize()</code> is called.
-         */
-        boost::signals2::signal<void()> at_mpi_finalize;
-      };
-
-      static Signals signals;
-
-    private:
-      /**
-       * Requests to MPI_Wait before finalizing
-       */
-      static std::set<MPI_Request *> requests;
-
-#ifdef DEAL_II_WITH_PETSC
-      bool finalize_petscslepc;
-#endif
+      ~MPI_InitFinalize() = default;
     };
 
     /**
index c4c4b8b08472eeb825f00ffa5d88e81c5f8203c3..b7c4c4b825d349e468124115855e32b9c9bc68da 100644 (file)
@@ -41,6 +41,7 @@ set(_unity_include_src
   geometric_utilities.cc
   graph_coloring.cc
   incremental_function.cc
+  init_finalize.cc
   index_set.cc
   job_identifier.cc
   logstream.cc
diff --git a/source/base/init_finalize.cc b/source/base/init_finalize.cc
new file mode 100644 (file)
index 0000000..6556242
--- /dev/null
@@ -0,0 +1,455 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/init_finalize.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/vector_memory.h>
+
+#include <Kokkos_Core.hpp>
+
+#ifdef DEAL_II_WITH_TRILINOS
+#  ifdef DEAL_II_WITH_MPI
+#    include <deal.II/lac/trilinos_parallel_block_vector.h>
+#    include <deal.II/lac/trilinos_vector.h>
+
+#    include <Epetra_MpiComm.h>
+#  endif
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+#  include <deal.II/lac/petsc_block_vector.h>
+#  include <deal.II/lac/petsc_vector.h>
+
+#  include <petscsys.h>
+#endif
+
+#ifdef DEAL_II_WITH_SLEPC
+#  include <deal.II/lac/slepc_solver.h>
+
+#  include <slepcsys.h>
+#endif
+
+#ifdef DEAL_II_WITH_P4EST
+#  include <p4est_bits.h>
+#endif
+
+#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
+#  include <zoltan_cpp.h>
+#endif
+
+DEAL_II_NAMESPACE_OPEN
+
+
+/* Force initialization of static struct: */
+InitFinalize::Signals InitFinalize::signals = InitFinalize::Signals();
+
+
+InitFinalize::InitFinalize(int                     &argc,
+                           char                  **&argv,
+                           const InitializeLibrary &libraries,
+                           const unsigned int       max_num_threads)
+  : libraries(libraries)
+{
+  static bool constructor_has_already_run = false;
+  (void)constructor_has_already_run;
+  Assert(constructor_has_already_run == false,
+         ExcMessage("You can only create a single object of this class "
+                    "in a program since it initializes the MPI system."));
+
+
+  int ierr = 0;
+#ifdef DEAL_II_WITH_MPI
+  if (static_cast<bool>(libraries & InitializeLibrary::MPI))
+    {
+      // if we have PETSc, we will initialize it and let it handle MPI.
+      // Otherwise, we will do it.
+      int MPI_has_been_started = 0;
+      ierr                     = MPI_Initialized(&MPI_has_been_started);
+      AssertThrowMPI(ierr);
+      AssertThrow(MPI_has_been_started == 0,
+                  ExcMessage("MPI error. You can only start MPI once!"));
+
+      int provided;
+      // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
+      // we might use several threads but never call two MPI functions at the
+      // same time. For an explanation see on why we do this see
+      // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
+      int wanted = MPI_THREAD_SERIALIZED;
+      ierr       = MPI_Init_thread(&argc, &argv, wanted, &provided);
+      AssertThrowMPI(ierr);
+
+      // disable for now because at least some implementations always return
+      // MPI_THREAD_SINGLE.
+      // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
+      //    ExcMessage("MPI reports that we are not allowed to use multiple
+      //    threads."));
+    }
+#else
+  // make sure the compiler doesn't warn about these variables
+  (void)argc;
+  (void)argv;
+  (void)ierr;
+#endif
+
+  // Initialize Kokkos
+  if (static_cast<bool>(libraries & InitializeLibrary::Kokkos))
+    {
+      // argv has argc+1 elements and the last one is a nullptr. For appending
+      // one element we thus create a new argv by copying the first argc
+      // elements, append the new option, and then a nullptr.
+      //
+      // We do get in trouble, though, if a user program is called with
+      // '--help' as a command line argument. This '--help' gets passed on to
+      // Kokkos, which promptly responds with a lengthy message that the user
+      // likely did not intend. As a consequence, filter out this specific
+      // flag.
+      std::vector<char *> argv_new;
+      for (auto *const arg : make_array_view(&argv[0], &argv[0] + argc))
+        if (strcmp(arg, "--help") != 0)
+          argv_new.push_back(arg);
+
+      std::stringstream threads_flag;
+#if KOKKOS_VERSION >= 30700
+      threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
+#else
+      threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
+#endif
+      const std::string threads_flag_string = threads_flag.str();
+      argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
+      argv_new.push_back(nullptr);
+
+      // The first argument in Kokkos::initialize is of type int&. Hence, we
+      // need to define a new variable to pass to it (instead of using argc+1
+      // inline).
+      int argc_new = argv_new.size() - 1;
+      Kokkos::initialize(argc_new, argv_new.data());
+    }
+
+    // we are allowed to call MPI_Init ourselves and PETScInitialize will
+    // detect this. This allows us to use MPI_Init_thread instead.
+#ifdef DEAL_II_WITH_PETSC
+  PetscErrorCode pierr;
+#  ifdef DEAL_II_WITH_SLEPC
+  // Initialize SLEPc (with PETSc):
+  if (static_cast<bool>(libraries & InitializeLibrary::SLEPc))
+    {
+      finalize_petscslepc = SlepcInitializeCalled ? false : true;
+      pierr               = SlepcInitialize(&argc, &argv, nullptr, nullptr);
+      AssertThrow(pierr == 0, SLEPcWrappers::SolverBase::ExcSLEPcError(pierr));
+    }
+#  else
+  // or just initialize PETSc alone:
+  if (static_cast<bool>(libraries & InitializeLibrary::PETSc))
+    {
+      finalize_petscslepc = PetscInitializeCalled ? false : true;
+      pierr               = PetscInitialize(&argc, &argv, nullptr, nullptr);
+      AssertThrow(pierr == 0, ExcPETScError(pierr));
+    }
+#  endif
+
+  // Disable PETSc exception handling. This just prints a large wall
+  // of text that is not particularly helpful for what we do:
+  if (static_cast<bool>(libraries & InitializeLibrary::SLEPc) ||
+      static_cast<bool>(libraries & InitializeLibrary::PETSc))
+    {
+      pierr = PetscPopSignalHandler();
+      AssertThrow(pierr == 0, ExcPETScError(pierr));
+    }
+#endif
+
+    // Initialize zoltan
+#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
+  if (static_cast<bool>(libraries & InitializeLibrary::Zoltan))
+    {
+      float version;
+      Zoltan_Initialize(argc, argv, &version);
+    }
+#endif
+
+    // Initialize p4est and libsc components
+#ifdef DEAL_II_WITH_P4EST
+  if (static_cast<bool>(libraries & InitializeLibrary::P4EST))
+    {
+#  if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
+      // This feature is broken in version 2.0.0 for calls to
+      // MPI_Comm_create_group (see cburstedde/p4est#30).
+      // Disabling it leads to more verbose p4est error messages
+      // which should be fine.
+      sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
+#  endif
+      p4est_init(nullptr, SC_LP_SILENT);
+    }
+#endif
+
+  constructor_has_already_run = true;
+
+
+  // Now also see how many threads we'd like to run
+  if (max_num_threads != numbers::invalid_unsigned_int)
+    {
+      // set maximum number of threads (also respecting the environment
+      // variable that the called function evaluates) based on what the
+      // user asked
+      MultithreadInfo::set_thread_limit(max_num_threads);
+    }
+  else
+    // user wants automatic choice
+    {
+      unsigned int n_threads = MultithreadInfo::n_cores();
+#ifdef DEAL_II_WITH_MPI
+      if (static_cast<bool>(libraries & InitializeLibrary::MPI))
+        {
+          int MPI_has_been_started = 0;
+          int ierr                 = MPI_Initialized(&MPI_has_been_started);
+          AssertThrowMPI(ierr);
+
+          // we need to figure out how many MPI processes there are on the
+          // current node, as well as how many CPU cores we have. for the
+          // first task, check what get_hostname() returns and then do an
+          // allgather so each processor gets the answer
+          //
+          // in calculating the length of the string, don't forget the
+          // terminating \0 on C-style strings
+          const std::string hostname = Utilities::System::get_hostname();
+
+          int my_hostname_size  = hostname.size() + 1;
+          int max_hostname_size = -1;
+          ierr                  = MPI_Allreduce(&my_hostname_size,
+                               &max_hostname_size,
+                               1,
+                               MPI_INT,
+                               MPI_MAX,
+                               MPI_COMM_WORLD);
+          AssertThrowMPI(ierr);
+          std::vector<char> hostname_array(max_hostname_size);
+          std::copy(hostname.c_str(),
+                    hostname.c_str() + hostname.size() + 1,
+                    hostname_array.begin());
+
+          int n_mpi_processes = 1;
+          if (MPI_has_been_started)
+            {
+              ierr = MPI_Comm_size(MPI_COMM_WORLD, &n_mpi_processes);
+              AssertThrowMPI(ierr);
+            }
+          std::vector<char> all_hostnames(max_hostname_size * n_mpi_processes);
+          ierr = MPI_Allgather(hostname_array.data(),
+                               max_hostname_size,
+                               MPI_CHAR,
+                               all_hostnames.data(),
+                               max_hostname_size,
+                               MPI_CHAR,
+                               MPI_COMM_WORLD);
+          AssertThrowMPI(ierr);
+
+          // search how often our own hostname appears and the how-manyth
+          // instance the current process represents
+          unsigned int n_local_processes   = 0;
+          unsigned int nth_process_on_host = 0;
+          int          rank                = 0;
+          if (MPI_has_been_started)
+            {
+              ierr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+              AssertThrowMPI(ierr);
+            }
+          for (int i = 0; i < n_mpi_processes; ++i)
+            if (std::string(all_hostnames.data() + i * max_hostname_size) ==
+                hostname)
+              {
+                ++n_local_processes;
+                if (i <= rank)
+                  ++nth_process_on_host;
+              }
+          Assert(nth_process_on_host > 0, ExcInternalError());
+
+
+          // compute how many cores each process gets. if the number does not
+          // divide evenly, then we get one more core if we are among the
+          // first few processes
+          //
+          // if the number would be zero, round up to one since every process
+          // needs to have at least one thread
+          n_threads =
+            std::max(MultithreadInfo::n_cores() / n_local_processes +
+                       (nth_process_on_host <=
+                            MultithreadInfo::n_cores() % n_local_processes ?
+                          1 :
+                          0),
+                     1U);
+        }
+#endif
+
+      // finally set this number of threads
+      MultithreadInfo::set_thread_limit(n_threads);
+    }
+
+  // As a final step call the at_mpi_init() signal handler.
+  signals.at_mpi_init();
+}
+
+
+
+void
+InitFinalize::register_request(MPI_Request &request)
+{
+  // insert if it is not in the set already:
+  requests.insert(&request);
+}
+
+
+
+void
+InitFinalize::unregister_request(MPI_Request &request)
+{
+  Assert(requests.find(&request) != requests.end(),
+         ExcMessage(
+           "You tried to call unregister_request() with an invalid request."));
+
+  requests.erase(&request);
+}
+
+
+
+std::set<MPI_Request *> InitFinalize::requests;
+
+
+
+void
+InitFinalize::finalize()
+{
+  if (!is_finalized)
+    {
+      // First, call the at_mpi_finalize() signal handler.
+      signals.at_mpi_finalize();
+
+      // make memory pool release all PETSc/Trilinos/MPI-based vectors that
+      // are no longer used at this point. this is relevant because the static
+      // object destructors run for these vectors at the end of the program
+      // would run after MPI_Finalize is called, leading to errors
+
+#ifdef DEAL_II_WITH_MPI
+      // Before exiting, wait for nonblocking communication to complete:
+      for (auto *request : requests)
+        {
+          const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
+          AssertThrowMPI(ierr);
+        }
+
+      // Start with deal.II MPI vectors and delete vectors from the pools:
+      GrowingVectorMemory<
+        LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
+      GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<double>>::
+        release_unused_memory();
+      GrowingVectorMemory<
+        LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
+      GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<float>>::
+        release_unused_memory();
+
+      // Next with Trilinos:
+#  ifdef DEAL_II_WITH_TRILINOS
+      GrowingVectorMemory<
+        TrilinosWrappers::MPI::Vector>::release_unused_memory();
+      GrowingVectorMemory<
+        TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
+#  endif
+#endif
+
+
+      // Now deal with PETSc (with or without MPI). Only delete the vectors if
+      // finalize hasn't been called yet, otherwise this will lead to errors.
+#ifdef DEAL_II_WITH_PETSC
+      if (!PetscFinalizeCalled)
+        {
+          GrowingVectorMemory<
+            PETScWrappers::MPI::Vector>::release_unused_memory();
+          GrowingVectorMemory<
+            PETScWrappers::MPI::BlockVector>::release_unused_memory();
+        }
+#  ifdef DEAL_II_WITH_SLEPC
+      // and now end SLEPc with PETSc if we did so
+      if (static_cast<bool>(libraries & InitializeLibrary::SLEPc) &&
+          (finalize_petscslepc))
+        {
+          PetscErrorCode ierr = SlepcFinalize();
+          AssertThrow(ierr == 0,
+                      SLEPcWrappers::SolverBase::ExcSLEPcError(ierr));
+        }
+#  else
+      // or just end PETSc if we did so
+      if (static_cast<bool>(libraries & InitializeLibrary::PETSc) &&
+          (finalize_petscslepc))
+        {
+          PetscErrorCode ierr = PetscFinalize();
+          AssertThrow(ierr == 0, ExcPETScError(ierr));
+        }
+#  endif
+#endif
+
+#ifdef DEAL_II_WITH_P4EST
+      // now end p4est and libsc
+      // Note: p4est has no finalize function
+      if (static_cast<bool>(libraries & InitializeLibrary::P4EST))
+        sc_finalize();
+#endif
+
+
+      // Finalize Kokkos
+      if (static_cast<bool>(libraries & InitializeLibrary::Kokkos))
+        Kokkos::finalize();
+
+        // only MPI_Finalize if we are running with MPI. We also need to do this
+        // when running PETSc, because we initialize MPI ourselves before
+        // calling PetscInitialize
+#ifdef DEAL_II_WITH_MPI
+      int       MPI_has_been_started = 0;
+      const int ierr                 = MPI_Initialized(&MPI_has_been_started);
+      AssertThrowMPI(ierr);
+      if (static_cast<bool>(libraries & InitializeLibrary::MPI) &&
+          (MPI_has_been_started))
+        {
+#  if __cpp_lib_uncaught_exceptions >= 201411
+          // std::uncaught_exception() is deprecated in c++17
+          if (std::uncaught_exceptions() > 0)
+#  else
+          if (std::uncaught_exception() == true)
+#  endif
+            {
+              // do not try to call MPI_Finalize to avoid a deadlock.
+            }
+          else
+            {
+              const int ierr = MPI_Finalize();
+              (void)ierr;
+              AssertNothrow(ierr == MPI_SUCCESS, dealii::ExcMPI(ierr));
+            }
+        }
+#endif
+      is_finalized = true;
+    }
+}
+
+
+
+InitFinalize::~InitFinalize()
+{
+  finalize();
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
index e19aabb7452ececf6cd8d5e90f6c1beb971e4cf0..12b8fa91f548338d0c181d70fe23592b05633caf 100644 (file)
 #include <deal.II/base/mpi_compute_index_owner_internal.h>
 #include <deal.II/base/mpi_large_count.h>
 #include <deal.II/base/mpi_tags.h>
-#include <deal.II/base/multithread_info.h>
 #include <deal.II/base/utilities.h>
 
 #include <deal.II/lac/full_matrix.h>
-#include <deal.II/lac/la_parallel_block_vector.h>
-#include <deal.II/lac/la_parallel_vector.h>
 #include <deal.II/lac/lapack_full_matrix.h>
-#include <deal.II/lac/vector_memory.h>
 
 #include <boost/serialization/utility.hpp>
 
-#include <Kokkos_Core.hpp>
-
 #include <iostream>
 #include <limits>
 #include <numeric>
 #include <set>
 #include <vector>
 
-#ifdef DEAL_II_WITH_TRILINOS
-#  ifdef DEAL_II_WITH_MPI
-#    include <deal.II/lac/trilinos_parallel_block_vector.h>
-#    include <deal.II/lac/trilinos_vector.h>
-
-#    include <Epetra_MpiComm.h>
-#  endif
-#endif
-
-#ifdef DEAL_II_WITH_PETSC
-#  include <deal.II/lac/petsc_block_vector.h>
-#  include <deal.II/lac/petsc_vector.h>
-
-#  include <petscsys.h>
-#endif
-
-#ifdef DEAL_II_WITH_SLEPC
-#  include <deal.II/lac/slepc_solver.h>
-
-#  include <slepcsys.h>
-#endif
-
-#ifdef DEAL_II_WITH_P4EST
-#  include <p4est_bits.h>
-#endif
-
-#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
-#  include <zoltan_cpp.h>
-#endif
-
 DEAL_II_NAMESPACE_OPEN
 
 
@@ -566,7 +530,7 @@ namespace Utilities
 
         /* Ensure that we free the allocated datatype again at the end of
          * the program run just before we call MPI_Finalize():*/
-        MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
+        InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
           int ierr = MPI_Type_free(&type);
           AssertThrowMPI(ierr);
         });
@@ -590,7 +554,7 @@ namespace Utilities
 
         /* Ensure that we free the allocated op again at the end of the
          * program run just before we call MPI_Finalize():*/
-        MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
+        InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
           int ierr = MPI_Op_free(&op);
           AssertThrowMPI(ierr);
         });
@@ -715,336 +679,17 @@ namespace Utilities
 
 #endif
 
-    /* Force initialization of static struct: */
-    MPI_InitFinalize::Signals MPI_InitFinalize::signals =
-      MPI_InitFinalize::Signals();
-
 
     MPI_InitFinalize::MPI_InitFinalize(int               &argc,
                                        char            **&argv,
                                        const unsigned int max_num_threads)
-    {
-      static bool constructor_has_already_run = false;
-      (void)constructor_has_already_run;
-      Assert(constructor_has_already_run == false,
-             ExcMessage("You can only create a single object of this class "
-                        "in a program since it initializes the MPI system."));
-
-
-      int ierr = 0;
-#ifdef DEAL_II_WITH_MPI
-      // if we have PETSc, we will initialize it and let it handle MPI.
-      // Otherwise, we will do it.
-      int MPI_has_been_started = 0;
-      ierr                     = MPI_Initialized(&MPI_has_been_started);
-      AssertThrowMPI(ierr);
-      AssertThrow(MPI_has_been_started == 0,
-                  ExcMessage("MPI error. You can only start MPI once!"));
-
-      int provided;
-      // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
-      // we might use several threads but never call two MPI functions at the
-      // same time. For an explanation see on why we do this see
-      // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
-      int wanted = MPI_THREAD_SERIALIZED;
-      ierr       = MPI_Init_thread(&argc, &argv, wanted, &provided);
-      AssertThrowMPI(ierr);
-
-      // disable for now because at least some implementations always return
-      // MPI_THREAD_SINGLE.
-      // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
-      //    ExcMessage("MPI reports that we are not allowed to use multiple
-      //    threads."));
-#else
-      // make sure the compiler doesn't warn about these variables
-      (void)argc;
-      (void)argv;
-      (void)ierr;
-#endif
-
-      // Initialize Kokkos
-      {
-        // argv has argc+1 elements and the last one is a nullptr. For appending
-        // one element we thus create a new argv by copying the first argc
-        // elements, append the new option, and then a nullptr.
-        //
-        // We do get in trouble, though, if a user program is called with
-        // '--help' as a command line argument. This '--help' gets passed on to
-        // Kokkos, which promptly responds with a lengthy message that the user
-        // likely did not intend. As a consequence, filter out this specific
-        // flag.
-        std::vector<char *> argv_new;
-        for (auto *const arg : make_array_view(&argv[0], &argv[0] + argc))
-          if (strcmp(arg, "--help") != 0)
-            argv_new.push_back(arg);
-
-        std::stringstream threads_flag;
-#if KOKKOS_VERSION >= 30700
-        threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
-#else
-        threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
-#endif
-        const std::string threads_flag_string = threads_flag.str();
-        argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
-        argv_new.push_back(nullptr);
-
-        // The first argument in Kokkos::initialize is of type int&. Hence, we
-        // need to define a new variable to pass to it (instead of using argc+1
-        // inline).
-        int argc_new = argv_new.size() - 1;
-        Kokkos::initialize(argc_new, argv_new.data());
-      }
-
-      // we are allowed to call MPI_Init ourselves and PETScInitialize will
-      // detect this. This allows us to use MPI_Init_thread instead.
-#ifdef DEAL_II_WITH_PETSC
-      PetscErrorCode pierr;
-#  ifdef DEAL_II_WITH_SLEPC
-      // Initialize SLEPc (with PETSc):
-      finalize_petscslepc = SlepcInitializeCalled ? false : true;
-      pierr               = SlepcInitialize(&argc, &argv, nullptr, nullptr);
-      AssertThrow(pierr == 0, SLEPcWrappers::SolverBase::ExcSLEPcError(pierr));
-#  else
-      // or just initialize PETSc alone:
-      finalize_petscslepc = PetscInitializeCalled ? false : true;
-      pierr               = PetscInitialize(&argc, &argv, nullptr, nullptr);
-      AssertThrow(pierr == 0, ExcPETScError(pierr));
-#  endif
-
-      // Disable PETSc exception handling. This just prints a large wall
-      // of text that is not particularly helpful for what we do:
-      pierr = PetscPopSignalHandler();
-      AssertThrow(pierr == 0, ExcPETScError(pierr));
-#endif
-
-      // Initialize zoltan
-#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
-      float version;
-      Zoltan_Initialize(argc, argv, &version);
-#endif
-
-#ifdef DEAL_II_WITH_P4EST
-      // Initialize p4est and libsc components
-#  if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
-      // This feature is broken in version 2.0.0 for calls to
-      // MPI_Comm_create_group (see cburstedde/p4est#30).
-      // Disabling it leads to more verbose p4est error messages
-      // which should be fine.
-      sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
-#  endif
-      p4est_init(nullptr, SC_LP_SILENT);
-#endif
-
-      constructor_has_already_run = true;
-
-
-      // Now also see how many threads we'd like to run
-      if (max_num_threads != numbers::invalid_unsigned_int)
-        {
-          // set maximum number of threads (also respecting the environment
-          // variable that the called function evaluates) based on what the
-          // user asked
-          MultithreadInfo::set_thread_limit(max_num_threads);
-        }
-      else
-        // user wants automatic choice
-        {
-#ifdef DEAL_II_WITH_MPI
-          // we need to figure out how many MPI processes there are on the
-          // current node, as well as how many CPU cores we have. for the
-          // first task, check what get_hostname() returns and then do an
-          // allgather so each processor gets the answer
-          //
-          // in calculating the length of the string, don't forget the
-          // terminating \0 on C-style strings
-          const std::string  hostname = Utilities::System::get_hostname();
-          const unsigned int max_hostname_size =
-            Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
-          std::vector<char> hostname_array(max_hostname_size);
-          std::copy(hostname.c_str(),
-                    hostname.c_str() + hostname.size() + 1,
-                    hostname_array.begin());
-
-          std::vector<char> all_hostnames(max_hostname_size *
-                                          MPI::n_mpi_processes(MPI_COMM_WORLD));
-          const int         ierr = MPI_Allgather(hostname_array.data(),
-                                         max_hostname_size,
-                                         MPI_CHAR,
-                                         all_hostnames.data(),
-                                         max_hostname_size,
-                                         MPI_CHAR,
-                                         MPI_COMM_WORLD);
-          AssertThrowMPI(ierr);
-
-          // search how often our own hostname appears and the how-manyth
-          // instance the current process represents
-          unsigned int n_local_processes   = 0;
-          unsigned int nth_process_on_host = 0;
-          for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
-               ++i)
-            if (std::string(all_hostnames.data() + i * max_hostname_size) ==
-                hostname)
-              {
-                ++n_local_processes;
-                if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
-                  ++nth_process_on_host;
-              }
-          Assert(nth_process_on_host > 0, ExcInternalError());
-
-
-          // compute how many cores each process gets. if the number does not
-          // divide evenly, then we get one more core if we are among the
-          // first few processes
-          //
-          // if the number would be zero, round up to one since every process
-          // needs to have at least one thread
-          const unsigned int n_threads =
-            std::max(MultithreadInfo::n_cores() / n_local_processes +
-                       (nth_process_on_host <=
-                            MultithreadInfo::n_cores() % n_local_processes ?
-                          1 :
-                          0),
-                     1U);
-#else
-          const unsigned int n_threads = MultithreadInfo::n_cores();
-#endif
-
-          // finally set this number of threads
-          MultithreadInfo::set_thread_limit(n_threads);
-        }
-
-      // As a final step call the at_mpi_init() signal handler.
-      signals.at_mpi_init();
-    }
-
-
-
-    void
-    MPI_InitFinalize::register_request(MPI_Request &request)
-    {
-      // insert if it is not in the set already:
-      requests.insert(&request);
-    }
-
-
-
-    void
-    MPI_InitFinalize::unregister_request(MPI_Request &request)
-    {
-      Assert(
-        requests.find(&request) != requests.end(),
-        ExcMessage(
-          "You tried to call unregister_request() with an invalid request."));
-
-      requests.erase(&request);
-    }
-
-
-
-    std::set<MPI_Request *> MPI_InitFinalize::requests;
-
-
-
-    MPI_InitFinalize::~MPI_InitFinalize()
-    {
-      // First, call the at_mpi_finalize() signal handler.
-      signals.at_mpi_finalize();
-
-      // make memory pool release all PETSc/Trilinos/MPI-based vectors that
-      // are no longer used at this point. this is relevant because the static
-      // object destructors run for these vectors at the end of the program
-      // would run after MPI_Finalize is called, leading to errors
-
-#ifdef DEAL_II_WITH_MPI
-      // Before exiting, wait for nonblocking communication to complete:
-      for (auto *request : requests)
-        {
-          const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
-          AssertThrowMPI(ierr);
-        }
-
-      // Start with deal.II MPI vectors and delete vectors from the pools:
-      GrowingVectorMemory<
-        LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
-      GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<double>>::
-        release_unused_memory();
-      GrowingVectorMemory<
-        LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
-      GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<float>>::
-        release_unused_memory();
-
-      // Next with Trilinos:
-#  ifdef DEAL_II_WITH_TRILINOS
-      GrowingVectorMemory<
-        TrilinosWrappers::MPI::Vector>::release_unused_memory();
-      GrowingVectorMemory<
-        TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
-#  endif
-#endif
-
-
-      // Now deal with PETSc (with or without MPI). Only delete the vectors if
-      // finalize hasn't been called yet, otherwise this will lead to errors.
-#ifdef DEAL_II_WITH_PETSC
-      if (!PetscFinalizeCalled)
-        {
-          GrowingVectorMemory<
-            PETScWrappers::MPI::Vector>::release_unused_memory();
-          GrowingVectorMemory<
-            PETScWrappers::MPI::BlockVector>::release_unused_memory();
-        }
-#  ifdef DEAL_II_WITH_SLEPC
-      // and now end SLEPc with PETSc if we did so
-      if (finalize_petscslepc)
-        {
-          PetscErrorCode ierr = SlepcFinalize();
-          AssertThrow(ierr == 0,
-                      SLEPcWrappers::SolverBase::ExcSLEPcError(ierr));
-        }
-#  else
-      // or just end PETSc if we did so
-      if (finalize_petscslepc)
-        {
-          PetscErrorCode ierr = PetscFinalize();
-          AssertThrow(ierr == 0, ExcPETScError(ierr));
-        }
-#  endif
-#endif
-
-#ifdef DEAL_II_WITH_P4EST
-      // now end p4est and libsc
-      // Note: p4est has no finalize function
-      sc_finalize();
-#endif
-
-
-      // Finalize Kokkos
-      Kokkos::finalize();
-
-      // only MPI_Finalize if we are running with MPI. We also need to do this
-      // when running PETSc, because we initialize MPI ourselves before
-      // calling PetscInitialize
-#ifdef DEAL_II_WITH_MPI
-      if (job_supports_mpi() == true)
-        {
-#  if __cpp_lib_uncaught_exceptions >= 201411
-          // std::uncaught_exception() is deprecated in c++17
-          if (std::uncaught_exceptions() > 0)
-#  else
-          if (std::uncaught_exception() == true)
-#  endif
-            {
-              // do not try to call MPI_Finalize to avoid a deadlock.
-            }
-          else
-            {
-              const int ierr = MPI_Finalize();
-              (void)ierr;
-              AssertNothrow(ierr == MPI_SUCCESS, dealii::ExcMPI(ierr));
-            }
-        }
-#endif
-    }
+      : InitFinalize(argc,
+                     argv,
+                     InitializeLibrary::MPI | InitializeLibrary::Kokkos |
+                       InitializeLibrary::SLEPc | InitializeLibrary::PETSc |
+                       InitializeLibrary::Zoltan | InitializeLibrary::P4EST,
+                     max_num_threads)
+    {}
 
 
 
@@ -1146,7 +791,7 @@ namespace Utilities
       : locked(false)
       , request(MPI_REQUEST_NULL)
     {
-      Utilities::MPI::MPI_InitFinalize::register_request(request);
+      InitFinalize::register_request(request);
     }
 
 
@@ -1162,7 +807,7 @@ namespace Utilities
         ExcMessage(
           "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
 
-      Utilities::MPI::MPI_InitFinalize::unregister_request(request);
+      InitFinalize::unregister_request(request);
     }
 
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.