]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Remove 'parallel' prefix in distributed/grid_tools
authorDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Sun, 12 Nov 2017 20:35:40 +0000 (21:35 +0100)
committerDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Mon, 13 Nov 2017 13:47:02 +0000 (14:47 +0100)
12 files changed:
include/deal.II/distributed/grid_tools.h
source/distributed/grid_tools.cc
source/distributed/grid_tools.inst.in
source/dofs/dof_handler_policy.cc
source/hp/dof_handler.cc
tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc
tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=12.output
tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.mpirun=4.output
tests/distributed_grids/grid_tools_exchange_cell_data_01.cc
tests/distributed_grids/grid_tools_exchange_cell_data_02.cc
tests/distributed_grids/grid_tools_exchange_cell_data_03.cc
tests/distributed_grids/grid_tools_exchange_cell_data_04.cc

index 47606000f7998f46737216fbcc4baa4d715fdd11..31e15b78d65c682c1c8ae8a7dce90f01cfaf9792 100644 (file)
@@ -42,438 +42,425 @@ DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
 
 DEAL_II_NAMESPACE_OPEN
 
-namespace parallel
+namespace GridTools
 {
+  /**
+   * Exchange arbitrary data of type @p DataType provided by the function
+   * objects from locally owned cells to ghost cells on other processors.
+   *
+   * After this call, you typically will have received data from @p unpack on
+   * every ghost cell as it was given by @p pack on the owning processor.
+   * Whether you do or do not receive information to @p unpack on a given
+   * ghost cell depends on whether the @p pack function decided that
+   * something needs to be sent. It does so using the boost::optional
+   * mechanism: if the boost::optional return object of the @p pack
+   * function is empty, then this implies that no data has to be sent for
+   * the locally owned cell it was called on. In that case, @p unpack will
+   * also not be called on the ghost cell that corresponds to it on the
+   * receiving side. On the other hand, if the boost::optional object is
+   * not empty, then the data stored within it will be sent to the received
+   * and the @p unpack function called with it.
+   *
+   * @tparam DataType The type of the data to be communicated. It is assumed
+   *   to be serializable by boost::serialization. In many cases, this
+   *   data type can not be deduced by the compiler, e.g., if you provide
+   *   lambda functions for the second and third argument
+   *   to this function. In this case, you have to explicitly specify
+   *   the @p DataType as a template argument to the function call.
+   * @tparam MeshType The type of @p mesh.
+   *
+   * @param mesh A variable of a type that satisfies the requirements of the
+   * @ref ConceptMeshType "MeshType concept".
+   * @param pack The function that will be called on each locally owned cell
+   *   that is a ghost cell somewhere else. As mentioned above, the function
+   *   may return a regular data object of type @p DataType to indicate
+   *   that data should be sent, or an empty
+   *   <code>boost::optional@<DataType@></code> to indicate that nothing has
+   *   to be sent for this cell.
+   * @param unpack The function that will be called for each ghost cell
+   *   for which data was sent, i.e., for which the @p pack function
+   *   on the sending side returned a non-empty boost::optional object.
+   *   The @p unpack function is then called with the data sent by the
+   *   processor that owns that cell.
+   *
+   *
+   * <h4> An example </h4>
+   *
+   * Here is an example that shows how this function is to be used
+   * in a concrete context. It is taken from the code that makes
+   * sure that the @p active_fe_index (a single unsigned integer) is
+   * transported from locally owned cells where one can set it in
+   * hp::DoFHandler objects, to the corresponding ghost cells on
+   * other processors to ensure that one can query the right value
+   * also on those processors:
+   * @code
+   *    auto pack
+   *    = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell) -> unsigned int
+   *    {
+   *      return cell->active_fe_index();
+   *    };
+   *
+   *    auto unpack
+   *      = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell,
+   *            const unsigned int                                                        &active_fe_index) -> void
+   *    {
+   *      cell->set_active_fe_index(active_fe_index);
+   *    };
+   *
+   *   GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
+   *        (dof_handler, pack, unpack);
+   * @endcode
+   *
+   * You will notice that the @p pack lambda function returns an `unsigned int`,
+   * not a `boost::optional<unsigned int>`. The former converts automatically
+   * to the latter, implying that data will always be transported to the
+   * other processor.
+   *
+   * (In reality, the @p unpack function needs to be a bit more
+   * complicated because it is not allowed to call
+   * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the
+   * @p unpack function directly accesses internal data structures. But
+   * you get the idea -- the code could, just as well, have exchanged
+   * material ids, user indices, boundary indictors, or any kind of other
+   * data with similar calls as the ones above.)
+   */
+  template <typename DataType, typename MeshType>
+  void
+  exchange_cell_data_to_ghosts (const MeshType &mesh,
+                                const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
+                                const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack);
+
+  /* Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
+   * boxes @p local_bboxes.
+   *
+   * This function is meant to exchange bounding boxes describing the locally owned
+   * cells in a distributed triangulation obtained with the function
+   * GridTools::compute_mesh_predicate_bounding_box .
+   *
+   * The output vector's size is the number of processes of the MPI communicator:
+   * its i-th entry contains the vector @p local_bboxes of the i-th process.
+   */
+  template<int spacedim>
+  std::vector< std::vector< BoundingBox<spacedim> > >
+  exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
+                                MPI_Comm                                    mpi_communicator);
 
   /**
-   * This namespace defines parallel algorithms that operate on meshes.
+   * A structure that allows the transfer of cell data of type @p T from one processor
+   * to another. It corresponds to a packed buffer that stores a vector of
+   * CellId and a vector of type @p T.
+   *
+   * This class facilitates the transfer by providing the save/load functions
+   * that are able to pack up the vector of CellId's and the associated
+   * data of type @p T into a stream.
+   *
+   * Type @p T is assumed to be serializable by <code>boost::serialization</code> (for
+   * example <code>unsigned int</code> or <code>std::vector@<double@></code>).
    */
-  namespace GridTools
+  template <int dim, typename T>
+  struct CellDataTransferBuffer
   {
     /**
-     * Exchange arbitrary data of type @p DataType provided by the function
-     * objects from locally owned cells to ghost cells on other processors.
-     *
-     * After this call, you typically will have received data from @p unpack on
-     * every ghost cell as it was given by @p pack on the owning processor.
-     * Whether you do or do not receive information to @p unpack on a given
-     * ghost cell depends on whether the @p pack function decided that
-     * something needs to be sent. It does so using the boost::optional
-     * mechanism: if the boost::optional return object of the @p pack
-     * function is empty, then this implies that no data has to be sent for
-     * the locally owned cell it was called on. In that case, @p unpack will
-     * also not be called on the ghost cell that corresponds to it on the
-     * receiving side. On the other hand, if the boost::optional object is
-     * not empty, then the data stored within it will be sent to the received
-     * and the @p unpack function called with it.
-     *
-     * @tparam DataType The type of the data to be communicated. It is assumed
-     *   to be serializable by boost::serialization. In many cases, this
-     *   data type can not be deduced by the compiler, e.g., if you provide
-     *   lambda functions for the second and third argument
-     *   to this function. In this case, you have to explicitly specify
-     *   the @p DataType as a template argument to the function call.
-     * @tparam MeshType The type of @p mesh.
-     *
-     * @param mesh A variable of a type that satisfies the requirements of the
-     * @ref ConceptMeshType "MeshType concept".
-     * @param pack The function that will be called on each locally owned cell
-     *   that is a ghost cell somewhere else. As mentioned above, the function
-     *   may return a regular data object of type @p DataType to indicate
-     *   that data should be sent, or an empty
-     *   <code>boost::optional@<DataType@></code> to indicate that nothing has
-     *   to be sent for this cell.
-     * @param unpack The function that will be called for each ghost cell
-     *   for which data was sent, i.e., for which the @p pack function
-     *   on the sending side returned a non-empty boost::optional object.
-     *   The @p unpack function is then called with the data sent by the
-     *   processor that owns that cell.
-     *
-     *
-     * <h4> An example </h4>
-     *
-     * Here is an example that shows how this function is to be used
-     * in a concrete context. It is taken from the code that makes
-     * sure that the @p active_fe_index (a single unsigned integer) is
-     * transported from locally owned cells where one can set it in
-     * hp::DoFHandler objects, to the corresponding ghost cells on
-     * other processors to ensure that one can query the right value
-     * also on those processors:
-     * @code
-     *    auto pack
-     *    = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell) -> unsigned int
-     *    {
-     *      return cell->active_fe_index();
-     *    };
-     *
-     *    auto unpack
-     *      = [] (const typename dealii::hp::DoFHandler<dim,spacedim>::active_cell_iterator &cell,
-     *            const unsigned int                                                        &active_fe_index) -> void
-     *    {
-     *      cell->set_active_fe_index(active_fe_index);
-     *    };
-     *
-     *    parallel::GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
-     *        (dof_handler, pack, unpack);
-     * @endcode
-     *
-     * You will notice that the @p pack lambda function returns an `unsigned int`,
-     * not a `boost::optional<unsigned int>`. The former converts automatically
-     * to the latter, implying that data will always be transported to the
-     * other processor.
-     *
-     * (In reality, the @p unpack function needs to be a bit more
-     * complicated because it is not allowed to call
-     * DoFAccessor::set_active_fe_index() on ghost cells. Rather, the
-     * @p unpack function directly accesses internal data structures. But
-     * you get the idea -- the code could, just as well, have exchanged
-     * material ids, user indices, boundary indictors, or any kind of other
-     * data with similar calls as the ones above.)
+     * A vector to store IDs of cells to be transfered.
      */
-    template <typename DataType, typename MeshType>
-    void
-    exchange_cell_data_to_ghosts (const MeshType &mesh,
-                                  const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
-                                  const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack);
+    std::vector<CellId> cell_ids;
 
     /**
-     * Exchange with all processors of the MPI communicator @p mpi_communicator the vector of bounding
-     * boxes @p local_bboxes.
-     *
-     * This function is meant to exchange bounding boxes describing the locally owned
-     * cells in a distributed triangulation obtained with the function
-     * GridTools::compute_mesh_predicate_bounding_box .
-     *
-     * The output vector's size is the number of processes of the MPI communicator:
-     * its i-th entry contains the vector @p local_bboxes of the i-th process.
+     * A vector of cell data to be transfered.
      */
-    template<int spacedim>
-    std::vector< std::vector< BoundingBox<spacedim> > >
-    exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
-                                  MPI_Comm                                    mpi_communicator);
+    std::vector<T> data;
 
     /**
-     * A structure that allows the transfer of cell data of type @p T from one processor
-     * to another. It corresponds to a packed buffer that stores a vector of
-     * CellId and a vector of type @p T.
+     * Write the data of this object to a stream for the purpose of
+     * serialization.
      *
-     * This class facilitates the transfer by providing the save/load functions
-     * that are able to pack up the vector of CellId's and the associated
-     * data of type @p T into a stream.
-     *
-     * Type @p T is assumed to be serializable by <code>boost::serialization</code> (for
-     * example <code>unsigned int</code> or <code>std::vector@<double@></code>).
+     * @pre The user is responsible to keep the size of @p data
+     * equal to the size as @p cell_ids .
      */
-    template <int dim, typename T>
-    struct CellDataTransferBuffer
-    {
-      /**
-       * A vector to store IDs of cells to be transfered.
-       */
-      std::vector<CellId> cell_ids;
-
-      /**
-       * A vector of cell data to be transfered.
-       */
-      std::vector<T> data;
-
-      /**
-       * Write the data of this object to a stream for the purpose of
-       * serialization.
-       *
-       * @pre The user is responsible to keep the size of @p data
-       * equal to the size as @p cell_ids .
-       */
-      template <class Archive>
-      void save (Archive &ar,
-                 const unsigned int version) const;
-
-      /**
-       * Read the data of this object from a stream for the purpose of
-       * serialization. Throw away the previous content.
-       */
-      template <class Archive>
-      void load (Archive &ar,
-                 const unsigned int version);
-
-      BOOST_SERIALIZATION_SPLIT_MEMBER()
-
-      /**
-       * Pack the data that corresponds to this object into a buffer in
-       * the form of a vector of chars and return it.
-       */
-      std::vector<char> pack_data () const;
-
-      /**
-       * Given a buffer in the form of an array of chars, unpack it and
-       * restore the current object to the state that it was when
-       * it was packed into said buffer by the pack_data() function.
-       */
-      void unpack_data (const std::vector<char> &buffer);
-
-    };
+    template <class Archive>
+    void save (Archive &ar,
+               const unsigned int version) const;
 
-  }
+    /**
+     * Read the data of this object from a stream for the purpose of
+     * serialization. Throw away the previous content.
+     */
+    template <class Archive>
+    void load (Archive &ar,
+               const unsigned int version);
 
+    BOOST_SERIALIZATION_SPLIT_MEMBER()
+
+    /**
+     * Pack the data that corresponds to this object into a buffer in
+     * the form of a vector of chars and return it.
+     */
+    std::vector<char> pack_data () const;
+
+    /**
+     * Given a buffer in the form of an array of chars, unpack it and
+     * restore the current object to the state that it was when
+     * it was packed into said buffer by the pack_data() function.
+     */
+    void unpack_data (const std::vector<char> &buffer);
+
+  };
 
 }
 
 #ifndef DOXYGEN
 
-namespace parallel
+namespace GridTools
 {
-  namespace GridTools
-  {
 
-    template <int dim, typename T>
-    template <class Archive>
-    void
-    CellDataTransferBuffer<dim,T>::save (Archive &ar,
-                                         const unsigned int /*version*/) const
-    {
-      Assert(cell_ids.size() == data.size(),
-             ExcDimensionMismatch(cell_ids.size(), data.size()));
-      // archive the cellids in an efficient binary format
-      const size_t n_cells = cell_ids.size();
-      ar &n_cells;
-      for (auto &it : cell_ids)
-        {
-          CellId::binary_type binary_cell_id = it.template to_binary<dim>();
-          ar &binary_cell_id;
-        }
+  template <int dim, typename T>
+  template <class Archive>
+  void
+  CellDataTransferBuffer<dim,T>::save (Archive &ar,
+                                       const unsigned int /*version*/) const
+  {
+    Assert(cell_ids.size() == data.size(),
+           ExcDimensionMismatch(cell_ids.size(), data.size()));
+    // archive the cellids in an efficient binary format
+    const size_t n_cells = cell_ids.size();
+    ar &n_cells;
+    for (auto &it : cell_ids)
+      {
+        CellId::binary_type binary_cell_id = it.template to_binary<dim>();
+        ar &binary_cell_id;
+      }
 
-      ar &data;
-    }
+    ar &data;
+  }
 
 
 
-    template <int dim, typename T>
-    template <class Archive>
-    void
-    CellDataTransferBuffer<dim,T>::load (Archive &ar,
-                                         const unsigned int /*version*/)
-    {
-      size_t n_cells;
-      ar &n_cells;
-      cell_ids.clear();
-      cell_ids.reserve(n_cells);
-      for (unsigned int c=0; c<n_cells; ++c)
-        {
-          CellId::binary_type value;
-          ar &value;
-          cell_ids.emplace_back(std::move(value));
-        }
-      ar &data;
-    }
+  template <int dim, typename T>
+  template <class Archive>
+  void
+  CellDataTransferBuffer<dim,T>::load (Archive &ar,
+                                       const unsigned int /*version*/)
+  {
+    size_t n_cells;
+    ar &n_cells;
+    cell_ids.clear();
+    cell_ids.reserve(n_cells);
+    for (unsigned int c=0; c<n_cells; ++c)
+      {
+        CellId::binary_type value;
+        ar &value;
+        cell_ids.emplace_back(std::move(value));
+      }
+    ar &data;
+  }
 
 
 
-    template <int dim, typename T>
-    std::vector<char>
-    CellDataTransferBuffer<dim,T>::pack_data () const
+  template <int dim, typename T>
+  std::vector<char>
+  CellDataTransferBuffer<dim,T>::pack_data () const
+  {
+    // set up a buffer and then use it as the target of a compressing
+    // stream into which we serialize the current object
+    std::vector<char> buffer;
     {
-      // set up a buffer and then use it as the target of a compressing
-      // stream into which we serialize the current object
-      std::vector<char> buffer;
-      {
 #ifdef DEAL_II_WITH_ZLIB
-        boost::iostreams::filtering_ostream out;
-        out.push(boost::iostreams::gzip_compressor
-                 (boost::iostreams::gzip_params
-                  (boost::iostreams::gzip::best_compression)));
-        out.push(boost::iostreams::back_inserter(buffer));
-
-        boost::archive::binary_oarchive archive(out);
-        archive << *this;
-        out.flush();
+      boost::iostreams::filtering_ostream out;
+      out.push(boost::iostreams::gzip_compressor
+               (boost::iostreams::gzip_params
+                (boost::iostreams::gzip::best_compression)));
+      out.push(boost::iostreams::back_inserter(buffer));
+
+      boost::archive::binary_oarchive archive(out);
+      archive << *this;
+      out.flush();
 #else
-        std::ostringstream out;
-        boost::archive::binary_oarchive archive(out);
-        archive << *this;
-        const std::string &s = out.str();
-        buffer.reserve(s.size());
-        buffer.assign(s.begin(), s.end());
+      std::ostringstream out;
+      boost::archive::binary_oarchive archive(out);
+      archive << *this;
+      const std::string &s = out.str();
+      buffer.reserve(s.size());
+      buffer.assign(s.begin(), s.end());
 #endif
-      }
-
-      return buffer;
     }
 
+    return buffer;
+  }
+
 
-    template <int dim, typename T>
-    void
-    CellDataTransferBuffer<dim,T>::unpack_data (const std::vector<char> &buffer)
-    {
-      std::string decompressed_buffer;
+  template <int dim, typename T>
+  void
+  CellDataTransferBuffer<dim,T>::unpack_data (const std::vector<char> &buffer)
+  {
+    std::string decompressed_buffer;
 
-      // first decompress the buffer
-      {
+    // first decompress the buffer
+    {
 #ifdef DEAL_II_WITH_ZLIB
-        boost::iostreams::filtering_ostream decompressing_stream;
-        decompressing_stream.push(boost::iostreams::gzip_decompressor());
-        decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
-        decompressing_stream.write (buffer.data(), buffer.size());
+      boost::iostreams::filtering_ostream decompressing_stream;
+      decompressing_stream.push(boost::iostreams::gzip_decompressor());
+      decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
+      decompressing_stream.write (buffer.data(), buffer.size());
 #else
-        decompressed_buffer.assign (buffer.begin(), buffer.end());
+      decompressed_buffer.assign (buffer.begin(), buffer.end());
 #endif
-      }
+    }
 
-      // then restore the object from the buffer
-      std::istringstream in(decompressed_buffer);
-      boost::archive::binary_iarchive archive(in);
+    // then restore the object from the buffer
+    std::istringstream in(decompressed_buffer);
+    boost::archive::binary_iarchive archive(in);
 
-      archive >> *this;
-    }
+    archive >> *this;
+  }
 
 
 
-    template <typename DataType, typename MeshType>
-    void
-    exchange_cell_data_to_ghosts (const MeshType &mesh,
-                                  const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
-                                  const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack)
-    {
+  template <typename DataType, typename MeshType>
+  void
+  exchange_cell_data_to_ghosts (const MeshType &mesh,
+                                const std::function<boost::optional<DataType> (const typename MeshType::active_cell_iterator &)> &pack,
+                                const std::function<void (const typename MeshType::active_cell_iterator &, const DataType &)> &unpack)
+  {
 #ifndef DEAL_II_WITH_MPI
-      (void)mesh;
-      (void)pack;
-      (void)unpack;
-      Assert(false, ExcMessage("parallel::GridTools::exchange_cell_data_to_ghosts() requires MPI."));
+    (void)mesh;
+    (void)pack;
+    (void)unpack;
+    Assert(false, ExcMessage("GridTools::exchange_cell_data_to_ghosts() requires MPI."));
 #else
-      constexpr int dim = MeshType::dimension;
-      constexpr int spacedim = MeshType::space_dimension;
-      auto tria =
-        static_cast<const parallel::Triangulation<dim, spacedim>*>(&mesh.get_triangulation());
-      Assert (tria != nullptr,
-              ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
-
-      // map neighbor_id -> data_buffer where we accumulate the data to send
-      typedef std::map<dealii::types::subdomain_id, CellDataTransferBuffer<dim, DataType> >
-      DestinationToBufferMap;
-      DestinationToBufferMap destination_to_data_buffer_map;
-
-      std::map<unsigned int, std::set<dealii::types::subdomain_id> >
-      vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors();
-
-      for (auto cell : tria->active_cell_iterators())
-        if (cell->is_locally_owned())
-          {
-            std::set<dealii::types::subdomain_id> send_to;
-            for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
-              {
-                const std::map<unsigned int, std::set<dealii::types::subdomain_id> >::const_iterator
-                neighbor_subdomains_of_vertex
-                  = vertices_with_ghost_neighbors.find (cell->vertex_index(v));
-
-                if (neighbor_subdomains_of_vertex ==
-                    vertices_with_ghost_neighbors.end())
-                  continue;
-
-                Assert(neighbor_subdomains_of_vertex->second.size()!=0,
-                       ExcInternalError());
-
-                send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
-                               neighbor_subdomains_of_vertex->second.end());
-              }
-
-            if (send_to.size() > 0)
-              {
-                // this cell's data needs to be sent to someone
-                typename MeshType::active_cell_iterator
-                mesh_it (tria, cell->level(), cell->index(), &mesh);
-
-                const boost::optional<DataType> data = pack(mesh_it);
-
-                if (data)
-                  {
-                    const CellId cellid = cell->id();
-
-                    for (auto it : send_to)
-                      {
-                        const dealii::types::subdomain_id subdomain = it;
-
-                        // find the data buffer for proc "subdomain" if it exists
-                        // or create an empty one otherwise
-                        typename DestinationToBufferMap::iterator p
-                          = destination_to_data_buffer_map.insert (std::make_pair(subdomain,
-                                                                                  CellDataTransferBuffer<dim, DataType>()))
-                            .first;
-
-                        p->second.cell_ids.emplace_back(cellid);
-                        p->second.data.emplace_back(data.get());
-                      }
-                  }
-              }
-          }
+    constexpr int dim = MeshType::dimension;
+    constexpr int spacedim = MeshType::space_dimension;
+    auto tria =
+      static_cast<const parallel::Triangulation<dim, spacedim>*>(&mesh.get_triangulation());
+    Assert (tria != nullptr,
+            ExcMessage("The function exchange_cell_data_to_ghosts() only works with parallel triangulations."));
+
+    // map neighbor_id -> data_buffer where we accumulate the data to send
+    typedef std::map<dealii::types::subdomain_id, CellDataTransferBuffer<dim, DataType> >
+    DestinationToBufferMap;
+    DestinationToBufferMap destination_to_data_buffer_map;
+
+    std::map<unsigned int, std::set<dealii::types::subdomain_id> >
+    vertices_with_ghost_neighbors = tria->compute_vertices_with_ghost_neighbors();
+
+    for (auto cell : tria->active_cell_iterators())
+      if (cell->is_locally_owned())
+        {
+          std::set<dealii::types::subdomain_id> send_to;
+          for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+            {
+              const std::map<unsigned int, std::set<dealii::types::subdomain_id> >::const_iterator
+              neighbor_subdomains_of_vertex
+                = vertices_with_ghost_neighbors.find (cell->vertex_index(v));
 
+              if (neighbor_subdomains_of_vertex ==
+                  vertices_with_ghost_neighbors.end())
+                continue;
 
-      // 2. send our messages
-      std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
-      const unsigned int n_ghost_owners = ghost_owners.size();
-      std::vector<std::vector<char> > sendbuffers (n_ghost_owners);
-      std::vector<MPI_Request> requests (n_ghost_owners);
+              Assert(neighbor_subdomains_of_vertex->second.size()!=0,
+                     ExcInternalError());
 
-      unsigned int idx=0;
-      for (auto it = ghost_owners.begin();
-           it!=ghost_owners.end();
-           ++it, ++idx)
-        {
-          CellDataTransferBuffer<dim, DataType> &data = destination_to_data_buffer_map[*it];
-
-          // pack all the data into the buffer for this recipient and send it.
-          // keep data around till we can make sure that the packet has been
-          // received
-          sendbuffers[idx] = data.pack_data ();
-          const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
-                                     MPI_BYTE, *it,
-                                     786, tria->get_communicator(), &requests[idx]);
-          AssertThrowMPI(ierr);
+              send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
+                             neighbor_subdomains_of_vertex->second.end());
+            }
+
+          if (send_to.size() > 0)
+            {
+              // this cell's data needs to be sent to someone
+              typename MeshType::active_cell_iterator
+              mesh_it (tria, cell->level(), cell->index(), &mesh);
+
+              const boost::optional<DataType> data = pack(mesh_it);
+
+              if (data)
+                {
+                  const CellId cellid = cell->id();
+
+                  for (auto it : send_to)
+                    {
+                      const dealii::types::subdomain_id subdomain = it;
+
+                      // find the data buffer for proc "subdomain" if it exists
+                      // or create an empty one otherwise
+                      typename DestinationToBufferMap::iterator p
+                        = destination_to_data_buffer_map.insert (std::make_pair(subdomain,
+                                                                                CellDataTransferBuffer<dim, DataType>()))
+                          .first;
+
+                      p->second.cell_ids.emplace_back(cellid);
+                      p->second.data.emplace_back(data.get());
+                    }
+                }
+            }
         }
 
-      // 3. receive messages
-      std::vector<char> receive;
-      for (unsigned int idx=0; idx<n_ghost_owners; ++idx)
-        {
-          MPI_Status status;
-          int len;
-          int ierr = MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
-          AssertThrowMPI(ierr);
-          ierr = MPI_Get_count(&status, MPI_BYTE, &len);
-          AssertThrowMPI(ierr);
 
-          receive.resize(len);
+    // 2. send our messages
+    std::set<dealii::types::subdomain_id> ghost_owners = tria->ghost_owners();
+    const unsigned int n_ghost_owners = ghost_owners.size();
+    std::vector<std::vector<char> > sendbuffers (n_ghost_owners);
+    std::vector<MPI_Request> requests (n_ghost_owners);
 
-          char *ptr = receive.data();
-          ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                          tria->get_communicator(), &status);
-          AssertThrowMPI(ierr);
+    unsigned int idx=0;
+    for (auto it = ghost_owners.begin();
+         it!=ghost_owners.end();
+         ++it, ++idx)
+      {
+        CellDataTransferBuffer<dim, DataType> &data = destination_to_data_buffer_map[*it];
+
+        // pack all the data into the buffer for this recipient and send it.
+        // keep data around till we can make sure that the packet has been
+        // received
+        sendbuffers[idx] = data.pack_data ();
+        const int ierr = MPI_Isend(sendbuffers[idx].data(), sendbuffers[idx].size(),
+                                   MPI_BYTE, *it,
+                                   786, tria->get_communicator(), &requests[idx]);
+        AssertThrowMPI(ierr);
+      }
 
-          CellDataTransferBuffer<dim, DataType> cellinfo;
-          cellinfo.unpack_data(receive);
+    // 3. receive messages
+    std::vector<char> receive;
+    for (unsigned int idx=0; idx<n_ghost_owners; ++idx)
+      {
+        MPI_Status status;
+        int len;
+        int ierr = MPI_Probe(MPI_ANY_SOURCE, 786, tria->get_communicator(), &status);
+        AssertThrowMPI(ierr);
+        ierr = MPI_Get_count(&status, MPI_BYTE, &len);
+        AssertThrowMPI(ierr);
 
-          DataType *data = cellinfo.data.data();
-          for (unsigned int c=0; c<cellinfo.cell_ids.size(); ++c, ++data)
-            {
-              const typename Triangulation<dim,spacedim>::cell_iterator
-              tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
+        receive.resize(len);
 
-              const typename MeshType::active_cell_iterator
-              cell (tria, tria_cell->level(), tria_cell->index(), &mesh);
+        char *ptr = receive.data();
+        ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+                        tria->get_communicator(), &status);
+        AssertThrowMPI(ierr);
 
-              unpack(cell, *data);
-            }
-        }
+        CellDataTransferBuffer<dim, DataType> cellinfo;
+        cellinfo.unpack_data(receive);
 
-      // make sure that all communication is finished
-      // when we leave this function.
-      if (requests.size())
-        {
-          const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
-          AssertThrowMPI(ierr);
-        }
-#endif // DEAL_II_WITH_MPI
-    }
+        DataType *data = cellinfo.data.data();
+        for (unsigned int c=0; c<cellinfo.cell_ids.size(); ++c, ++data)
+          {
+            const typename Triangulation<dim,spacedim>::cell_iterator
+            tria_cell = cellinfo.cell_ids[c].to_cell(*tria);
 
+            const typename MeshType::active_cell_iterator
+            cell (tria, tria_cell->level(), tria_cell->index(), &mesh);
+
+            unpack(cell, *data);
+          }
+      }
+
+    // make sure that all communication is finished
+    // when we leave this function.
+    if (requests.size())
+      {
+        const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
+        AssertThrowMPI(ierr);
+      }
+#endif // DEAL_II_WITH_MPI
   }
+
 }
 
 #endif // DOXYGEN
index 0ab6fdbd210e33b2832bd21935e14576e133b24b..89f092db52c974dea996a1846c7210eb3fbe8a25 100644 (file)
@@ -22,85 +22,82 @@ DEAL_II_NAMESPACE_OPEN
 
 #ifdef DEAL_II_WITH_MPI
 
-namespace parallel
+namespace GridTools
 {
-  namespace GridTools
+  template<int spacedim>
+  std::vector< std::vector< BoundingBox<spacedim> > >
+  exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
+                                MPI_Comm                                    mpi_communicator)
   {
-    template<int spacedim>
-    std::vector< std::vector< BoundingBox<spacedim> > >
-    exchange_local_bounding_boxes(const std::vector< BoundingBox<spacedim> > &local_bboxes,
-                                  MPI_Comm                                    mpi_communicator)
-    {
 #ifndef DEAL_II_WITH_MPI
-      (void)local_bboxes;
-      (void)mpi_communicator;
-      Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI."));
+    (void)local_bboxes;
+    (void)mpi_communicator;
+    Assert(false, ExcMessage("parallel::GridTools::exchange_local_bounding_boxes() requires MPI."));
 #else
-      // Step 1: preparing data to be sent
-      unsigned int n_bboxes = local_bboxes.size();
-      // Dimension of the array to be exchanged (number of double)
-      int n_local_data = 2*spacedim*n_bboxes;
-      // data array stores each entry of each point describing the bounding boxes
-      std::vector<double> loc_data_array(n_local_data);
-      for (unsigned int i=0; i<n_bboxes; ++i)
-        for (unsigned int d=0; d < spacedim; ++d)
-          {
-            // Extracting the coordinates of each boundary point
-            loc_data_array[2*i*spacedim + d] = local_bboxes[i].get_boundary_points().first[d];
-            loc_data_array[2*i*spacedim + spacedim + d] = local_bboxes[i].get_boundary_points().second[d];
-          }
+    // Step 1: preparing data to be sent
+    unsigned int n_bboxes = local_bboxes.size();
+    // Dimension of the array to be exchanged (number of double)
+    int n_local_data = 2*spacedim*n_bboxes;
+    // data array stores each entry of each point describing the bounding boxes
+    std::vector<double> loc_data_array(n_local_data);
+    for (unsigned int i=0; i<n_bboxes; ++i)
+      for (unsigned int d=0; d < spacedim; ++d)
+        {
+          // Extracting the coordinates of each boundary point
+          loc_data_array[2*i*spacedim + d] = local_bboxes[i].get_boundary_points().first[d];
+          loc_data_array[2*i*spacedim + spacedim + d] = local_bboxes[i].get_boundary_points().second[d];
+        }
 
-      // Step 2: exchanging the size of local data
-      unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+    // Step 2: exchanging the size of local data
+    unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
 
-      // Vector to store the size of loc_data_array for every process
-      std::vector<int> size_all_data(n_procs);
+    // Vector to store the size of loc_data_array for every process
+    std::vector<int> size_all_data(n_procs);
 
-      // Exchanging the number of bboxes
-      MPI_Allgather(&n_local_data, 1, MPI_INT,
-                    &(size_all_data[0]), 1, MPI_INT,
-                    mpi_communicator);
+    // Exchanging the number of bboxes
+    MPI_Allgather(&n_local_data, 1, MPI_INT,
+                  &(size_all_data[0]), 1, MPI_INT,
+                  mpi_communicator);
 
-      // Now computing the the displacement, relative to recvbuf,
-      // at which to store the incoming data
-      std::vector<int> rdispls(n_procs);
-      rdispls[0] = 0;
-      for (unsigned int i=1; i < n_procs; ++i)
-        rdispls[i] = rdispls[i-1] + size_all_data[i-1];
+    // Now computing the the displacement, relative to recvbuf,
+    // at which to store the incoming data
+    std::vector<int> rdispls(n_procs);
+    rdispls[0] = 0;
+    for (unsigned int i=1; i < n_procs; ++i)
+      rdispls[i] = rdispls[i-1] + size_all_data[i-1];
 
-      // Step 3: exchange the data and bounding boxes:
-      // Allocating a vector to contain all the received data
-      std::vector<double> data_array(rdispls.back() + size_all_data.back());
+    // Step 3: exchange the data and bounding boxes:
+    // Allocating a vector to contain all the received data
+    std::vector<double> data_array(rdispls.back() + size_all_data.back());
 
-      MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
-                     &(data_array[0]), &(size_all_data[0]),
-                     &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
+    MPI_Allgatherv(&(loc_data_array[0]), n_local_data, MPI_DOUBLE,
+                   &(data_array[0]), &(size_all_data[0]),
+                   &(rdispls[0]), MPI_DOUBLE, mpi_communicator);
 
-      // Step 4: create the array of bboxes for output
-      std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
-      unsigned int begin_idx = 0;
-      for (unsigned int i=0; i < n_procs; ++i)
-        {
-          // Number of local bounding boxes
-          unsigned int n_bbox_i = size_all_data[i]/(spacedim*2);
-          global_bboxes[i].resize(n_bbox_i);
-          for (unsigned int bbox=0; bbox<n_bbox_i; ++bbox)
-            {
-              Point<spacedim> p1,p2; // boundary points for bbox
-              for (unsigned int d=0; d<spacedim; ++d)
-                {
-                  p1[d] = data_array[ begin_idx + 2*bbox*spacedim + d];
-                  p2[d] = data_array[ begin_idx + 2*bbox*spacedim + spacedim + d];
-                }
-              BoundingBox<spacedim> loc_bbox(std::make_pair(p1,p2));
-              global_bboxes[i][bbox] = loc_bbox;
-            }
-          // Shifting the first index to the start of the next vector
-          begin_idx += size_all_data[i];
-        }
-      return global_bboxes;
+    // Step 4: create the array of bboxes for output
+    std::vector< std::vector< BoundingBox<spacedim> > > global_bboxes(n_procs);
+    unsigned int begin_idx = 0;
+    for (unsigned int i=0; i < n_procs; ++i)
+      {
+        // Number of local bounding boxes
+        unsigned int n_bbox_i = size_all_data[i]/(spacedim*2);
+        global_bboxes[i].resize(n_bbox_i);
+        for (unsigned int bbox=0; bbox<n_bbox_i; ++bbox)
+          {
+            Point<spacedim> p1,p2; // boundary points for bbox
+            for (unsigned int d=0; d<spacedim; ++d)
+              {
+                p1[d] = data_array[ begin_idx + 2*bbox*spacedim + d];
+                p2[d] = data_array[ begin_idx + 2*bbox*spacedim + spacedim + d];
+              }
+            BoundingBox<spacedim> loc_bbox(std::make_pair(p1,p2));
+            global_bboxes[i][bbox] = loc_bbox;
+          }
+        // Shifting the first index to the start of the next vector
+        begin_idx += size_all_data[i];
+      }
+    return global_bboxes;
 #endif // DEAL_II_WITH_MPI
-    }
   }
 }
 
index 505e37cd5fa78ca35e5388841557a78d6c007b97..9e245a2678a7e62d65fe32f50d1f83ccb388c44f 100644 (file)
 
 for (deal_II_space_dimension : SPACE_DIMENSIONS)
 {
-    namespace parallel \{
     namespace GridTools \{
     template
     std::vector< std::vector< BoundingBox<deal_II_space_dimension> > >
     exchange_local_bounding_boxes(const std::vector< BoundingBox<deal_II_space_dimension> >&,
                                   MPI_Comm);
     \}
-    \}
 }
index 1db96a4fab36b25732c57ff06c7f908adf6af8a4..11d9f2f37bbd02dbe4628fd72fefc817e7ffa398 100644 (file)
@@ -3615,7 +3615,7 @@ namespace internal
             const_cast<typename DoFHandlerType::active_cell_iterator &>(cell)->set_dof_indices(local_dof_indices);
           };
 
-          parallel::GridTools::exchange_cell_data_to_ghosts<std::vector<types::global_dof_index>, DoFHandlerType>
+          GridTools::exchange_cell_data_to_ghosts<std::vector<types::global_dof_index>, DoFHandlerType>
           (dof_handler, pack, unpack);
 
           // finally update the cell DoF indices caches to make sure
index 7a25290dfed2c890a21565f24f04f7510f50f35b..fdcf1ae7776fd12780a16759885f9fc9af082d97 100644 (file)
@@ -938,7 +938,7 @@ namespace internal
                 active_fe_index);
               };
 
-              parallel::GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
+              GridTools::exchange_cell_data_to_ghosts<unsigned int, dealii::hp::DoFHandler<dim,spacedim>>
                   (dof_handler, pack, unpack);
             }
           else
index 141be4c2973bef31dd17ec25abefb02155dc828f..b7f233911bf47ee24922a33e781210a8c4b78029 100644 (file)
@@ -14,7 +14,7 @@
 // ---------------------------------------------------------------------
 
 
-// test parallel::GridTools::exchange_local_bounding_boxes
+// test GridTools::exchange_local_bounding_boxes
 
 #include "../tests.h"
 #include <deal.II/base/point.h>
@@ -52,7 +52,7 @@ void test_exchange_bbox()
     }
 
   std::vector< std::vector< BoundingBox<spacedim> > > global_boxes
-    = parallel::GridTools::exchange_local_bounding_boxes(loc_bboxes,mpi_communicator);
+    = GridTools::exchange_local_bounding_boxes(loc_bboxes,mpi_communicator);
 
 
   bool passed = true;
@@ -121,7 +121,7 @@ int main (int argc, char *argv[])
   Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
   MPILogInitAll log;
 
-  deallog << "Test: parallel::GridTools::exchange_local_bounding_boxes " << std::endl;
+  deallog << "Test: GridTools::exchange_local_bounding_boxes " << std::endl;
 
   test_exchange_bbox<1>();
   test_exchange_bbox<2>();
index 2278be30b2d0a03bb2949d9222c29e59d1b34a12..78f88124256005118cef7214e60d2f47cc1239d2 100644 (file)
@@ -1,5 +1,5 @@
 
-DEAL:0::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:0::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:0::Test for: dimension 1
 DEAL:0::12  mpi processes
 DEAL:0::Test passed
@@ -10,7 +10,7 @@ DEAL:0::Test for: dimension 3
 DEAL:0::12  mpi processes
 DEAL:0::Test passed
 
-DEAL:1::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:1::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:1::Test for: dimension 1
 DEAL:1::12  mpi processes
 DEAL:1::Test passed
@@ -22,7 +22,7 @@ DEAL:1::12  mpi processes
 DEAL:1::Test passed
 
 
-DEAL:2::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:2::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:2::Test for: dimension 1
 DEAL:2::12  mpi processes
 DEAL:2::Test passed
@@ -34,7 +34,7 @@ DEAL:2::12  mpi processes
 DEAL:2::Test passed
 
 
-DEAL:3::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:3::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:3::Test for: dimension 1
 DEAL:3::12  mpi processes
 DEAL:3::Test passed
@@ -46,7 +46,7 @@ DEAL:3::12  mpi processes
 DEAL:3::Test passed
 
 
-DEAL:4::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:4::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:4::Test for: dimension 1
 DEAL:4::12  mpi processes
 DEAL:4::Test passed
@@ -58,7 +58,7 @@ DEAL:4::12  mpi processes
 DEAL:4::Test passed
 
 
-DEAL:5::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:5::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:5::Test for: dimension 1
 DEAL:5::12  mpi processes
 DEAL:5::Test passed
@@ -70,7 +70,7 @@ DEAL:5::12  mpi processes
 DEAL:5::Test passed
 
 
-DEAL:6::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:6::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:6::Test for: dimension 1
 DEAL:6::12  mpi processes
 DEAL:6::Test passed
@@ -82,7 +82,7 @@ DEAL:6::12  mpi processes
 DEAL:6::Test passed
 
 
-DEAL:7::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:7::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:7::Test for: dimension 1
 DEAL:7::12  mpi processes
 DEAL:7::Test passed
@@ -94,7 +94,7 @@ DEAL:7::12  mpi processes
 DEAL:7::Test passed
 
 
-DEAL:8::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:8::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:8::Test for: dimension 1
 DEAL:8::12  mpi processes
 DEAL:8::Test passed
@@ -106,7 +106,7 @@ DEAL:8::12  mpi processes
 DEAL:8::Test passed
 
 
-DEAL:9::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:9::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:9::Test for: dimension 1
 DEAL:9::12  mpi processes
 DEAL:9::Test passed
@@ -118,7 +118,7 @@ DEAL:9::12  mpi processes
 DEAL:9::Test passed
 
 
-DEAL:10::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:10::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:10::Test for: dimension 1
 DEAL:10::12  mpi processes
 DEAL:10::Test passed
@@ -130,7 +130,7 @@ DEAL:10::12  mpi processes
 DEAL:10::Test passed
 
 
-DEAL:11::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:11::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:11::Test for: dimension 1
 DEAL:11::12  mpi processes
 DEAL:11::Test passed
index 6a3512516151b19775468fa96fcc5a350eb65736..096d23072ea14f103cba3f98376fa64453940ddb 100644 (file)
@@ -1,5 +1,5 @@
 
-DEAL:0::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:0::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:0::Test for: dimension 1
 DEAL:0::4  mpi processes
 DEAL:0::Test passed
@@ -10,7 +10,7 @@ DEAL:0::Test for: dimension 3
 DEAL:0::4  mpi processes
 DEAL:0::Test passed
 
-DEAL:1::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:1::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:1::Test for: dimension 1
 DEAL:1::4  mpi processes
 DEAL:1::Test passed
@@ -22,7 +22,7 @@ DEAL:1::4  mpi processes
 DEAL:1::Test passed
 
 
-DEAL:2::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:2::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:2::Test for: dimension 1
 DEAL:2::4  mpi processes
 DEAL:2::Test passed
@@ -34,7 +34,7 @@ DEAL:2::4  mpi processes
 DEAL:2::Test passed
 
 
-DEAL:3::Test: parallel::GridTools::exchange_local_bounding_boxes 
+DEAL:3::Test: GridTools::exchange_local_bounding_boxes 
 DEAL:3::Test for: dimension 1
 DEAL:3::4  mpi processes
 DEAL:3::Test passed
index 224874cb556885b91536028b7102d747b0f9845c..717c1f7b678feee782416be62f9dc7155da3b96e 100644 (file)
@@ -42,7 +42,7 @@ void test ()
   typedef typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell_iterator;
   typedef double DT;
   DT counter = 0.0;
-  parallel::GridTools::exchange_cell_data_to_ghosts<
+  GridTools::exchange_cell_data_to_ghosts<
   DT,
   parallel::distributed::Triangulation<dim>>
   (tria,
index fb271676762d9700fb7f103ad2e1347f5d844d03..a7abfd3676fc19d2b467fd2a4fa813ff5ddcb50e 100644 (file)
@@ -49,7 +49,7 @@ void test ()
   typedef typename DoFHandler<dim>::active_cell_iterator cell_iterator;
   typedef short DT;
   short counter = 0;
-  parallel::GridTools::exchange_cell_data_to_ghosts<
+  GridTools::exchange_cell_data_to_ghosts<
   DT,
   DoFHandler<dim> >
   (dofhandler,
index f98f540194d3ed5853d2f5eccd16c7cddb796012..0dec046d3d996c23426ed177c4b8eabc0b311fdf 100644 (file)
@@ -47,7 +47,7 @@ void test ()
   typedef typename parallel::shared::Triangulation<dim>::active_cell_iterator cell_iterator;
   typedef double DT;
   DT counter = 0.0;
-  parallel::GridTools::exchange_cell_data_to_ghosts<
+  GridTools::exchange_cell_data_to_ghosts<
   DT, parallel::shared::Triangulation<dim>>
                                          (tria,
                                           [&](const cell_iterator& cell)
index 447daed1fdff70a557f0fca4af838dd4a8c731cf..1e106ea7afbd56883f35cb31ddf6025166d223bd 100644 (file)
@@ -46,7 +46,7 @@ void test ()
   typedef typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell_iterator;
   typedef double DT;
   int counter = 0;
-  parallel::GridTools::exchange_cell_data_to_ghosts<
+  GridTools::exchange_cell_data_to_ghosts<
   DT,
   parallel::distributed::Triangulation<dim>>
   (tria,

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.