]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Rename NoncontiguousPartitioner::uptate_values() 9847/head
authorPeter Munch <peterrmuench@gmail.com>
Tue, 7 Apr 2020 08:47:05 +0000 (10:47 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Mon, 4 May 2020 20:06:47 +0000 (22:06 +0200)
include/deal.II/base/mpi_noncontiguous_partitioner.h
include/deal.II/base/mpi_noncontiguous_partitioner.templates.h
include/deal.II/base/mpi_tags.h
source/base/mpi_noncontiguous_partitioner.inst.in
tests/base/mpi_noncontiguous_partitioner_01.cc
tests/base/mpi_noncontiguous_partitioner_02.cc
tests/base/mpi_noncontiguous_partitioner_03.cc

index 45ca33b2f0f8d26c2976d7cc7ecd662cefeba36f..fe37f3869993a57a9187349d547da1988d78a902 100644 (file)
@@ -38,7 +38,6 @@ namespace Utilities
      *
      * @author Peter Munch, 2020
      */
-    template <typename Number = double>
     class NoncontiguousPartitioner
       : public dealii::LinearAlgebra::CommunicationPatternBase
     {
@@ -51,16 +50,16 @@ namespace Utilities
 
       /**
        * Constructor. Set up point-to-point communication pattern based on the
-       * IndexSets arguments @p indexset_has and @p indexset_want for the MPI
+       * IndexSets arguments @p indexset_locally_owned and @p indexset_ghost for the MPI
        * communicator @p communicator.
        */
-      NoncontiguousPartitioner(const IndexSet &indexset_has,
-                               const IndexSet &indexset_want,
+      NoncontiguousPartitioner(const IndexSet &indexset_locally_owned,
+                               const IndexSet &indexset_ghost,
                                const MPI_Comm &communicator);
 
       /**
-       * Constructor. Same as above but for vectors of indices @p indices_has
-       * and @p indices_want. This allows the indices to not be sorted and the
+       * Constructor. Same as above but for vectors of indices @p indices_locally_owned
+       * and @p indices_ghost. This allows the indices to not be sorted and the
        * values are read and written automatically at the right position of
        * the vector during update_values(), update_values_start(), and
        * update_values_finish(). It is allowed to include entries with the
@@ -68,42 +67,95 @@ namespace Utilities
        * exchange but are present in the data vectors as padding.
        */
       NoncontiguousPartitioner(
-        const std::vector<types::global_dof_index> &indices_has,
-        const std::vector<types::global_dof_index> &indices_want,
+        const std::vector<types::global_dof_index> &indices_locally_owned,
+        const std::vector<types::global_dof_index> &indices_ghost,
         const MPI_Comm &                            communicator);
 
       /**
-       * Fill the vector @p dst according to the precomputed communication
-       * pattern with values from @p src.
+       * Fill the vector @p ghost_array according to the precomputed communication
+       * pattern with values from @p locally_owned_array.
        *
        * @pre The vectors only have to provide a method begin(), which allows
        *   to access their raw data.
        *
+       * @pre The size of both vectors must be at least as large as the number
+       *   of entries in the index sets passed to the constructors or the
+       *   reinit() functions.
+       *
        * @note This function calls the methods update_values_start() and
        *   update_values_finish() in sequence. Users can call these two
        *   functions separately and hereby overlap communication and
        *   computation.
        */
-      template <typename VectorType>
+      template <typename Number>
+      void
+      export_to_ghosted_array(
+        const ArrayView<const Number> &locally_owned_array,
+        const ArrayView<Number> &      ghost_array) const;
+
+      /**
+       * Same as above but with an interface similar to
+       * Utilities::MPI::Partitioner::export_to_ghosted_array_start and
+       * Utilities::MPI::Partitioner::export_to_ghosted_array_finish. In this
+       * function, the user can provide the temporary data structures to be
+       * used.
+       *
+       * @pre The size of the @p temporary_storage vector has to be at least
+       *   as large as the sum of the number of entries in the index sets
+       *   passed to the constructor and the reinit() functions. The reason
+       *   for this is that this vector is used as buffer for both sending
+       *   and receiving data.
+       */
+      template <typename Number>
       void
-      update_values(VectorType &dst, const VectorType &src) const;
+      export_to_ghosted_array(
+        const unsigned int             communication_channel,
+        const ArrayView<const Number> &locally_owned_array,
+        const ArrayView<Number> &      temporary_storage,
+        const ArrayView<Number> &      ghost_array,
+        std::vector<MPI_Request> &     requests) const;
 
       /**
        * Start update: Data is packed, non-blocking send and receives
        * are started.
+       *
+       * @note In contrast to the function
+       *   Utilities::MPI::Partitioner::export_to_ghosted_array_start, the user
+       *   does not pass a reference to the destination vector, since the data
+       *   is received into a designated part of the buffer @p temporary_storage. This
+       *   allows for padding and other post-processing of the received data.
+       *
+       * @pre The required size of the vectors are the same as in the functions
+       * above.
        */
-      template <typename VectorType>
+      template <typename Number>
       void
-      update_values_start(const VectorType &src, const unsigned int tag) const;
+      export_to_ghosted_array_start(
+        const unsigned int             communication_channel,
+        const ArrayView<const Number> &locally_owned_array,
+        const ArrayView<Number> &      temporary_storage,
+        std::vector<MPI_Request> &     requests) const;
 
       /**
        * Finish update. The method waits until all data has been sent and
        * received. Once data from any process is received it is processed and
        * placed at the right position of the vector @p dst.
+       *
+       * @note In contrast to the function
+       *   Utilities::MPI::Partitioner::export_to_ghosted_array_finish, the user
+       *   also has to pass a reference to the buffer @p temporary_storage,
+       *   since the data has been received into the buffer and not into the
+       *   destination vector.
+       *
+       * @pre The required size of the vectors are the same as in the functions
+       * above.
        */
-      template <typename VectorType>
+      template <typename Number>
       void
-      update_values_finish(VectorType &dst, const unsigned int tag) const;
+      export_to_ghosted_array_finish(
+        const ArrayView<const Number> &temporary_storage,
+        const ArrayView<Number> &      ghost_array,
+        std::vector<MPI_Request> &     requests) const;
 
       /**
        * Returns the number of processes this process sends data to and the
@@ -128,16 +180,16 @@ namespace Utilities
        * Initialize the inner data structures.
        */
       void
-      reinit(const IndexSet &indexset_has,
-             const IndexSet &indexset_want,
+      reinit(const IndexSet &indexset_locally_owned,
+             const IndexSet &indexset_ghost,
              const MPI_Comm &communicator) override;
 
       /**
        * Initialize the inner data structures.
        */
       void
-      reinit(const std::vector<types::global_dof_index> &indices_has,
-             const std::vector<types::global_dof_index> &indices_want,
+      reinit(const std::vector<types::global_dof_index> &indices_locally_owned,
+             const std::vector<types::global_dof_index> &indices_ghost,
              const MPI_Comm &                            communicator);
 
     private:
@@ -166,16 +218,6 @@ namespace Utilities
        */
       std::vector<types::global_dof_index> send_indices;
 
-      /**
-       *  Buffer containing the values sorted accoding to the ranks.
-       */
-      mutable std::vector<Number> send_buffers;
-
-      /**
-       * MPI requests for sending.
-       */
-      mutable std::vector<MPI_Request> send_requests;
-
       /**
        * The ranks this process receives data from.
        */
@@ -197,14 +239,22 @@ namespace Utilities
       std::vector<types::global_dof_index> recv_indices;
 
       /**
-       * Buffer containing the values sorted by rank.
+       * Buffer containing the values sorted by rank for sending and receiving.
+       *
+       * @note Only allocated if not provided externally by user.
+       *
+       * @note At this place we do not know the type of the data to be sent. So
+       *   we use an arbitrary type of size 1 byte. The type is cast to the
+       *   requested type in the relevant functions.
        */
-      mutable std::vector<Number> recv_buffers;
+      mutable std::vector<uint8_t> buffers;
 
       /**
-       * MPI requests for receiving.
+       * MPI requests for sending and receiving.
+       *
+       * @note Only allocated if not provided externally by user.
        */
-      mutable std::vector<MPI_Request> recv_requests;
+      mutable std::vector<MPI_Request> requests;
     };
 
   } // namespace MPI
index 8b4cff10ca5f5992e507e57bdb180f5667e6b122..588baa5bedefb0090ef95cf2456cc570f3d99a84 100644 (file)
@@ -33,8 +33,7 @@ namespace Utilities
 {
   namespace MPI
   {
-    template <typename Number>
-    NoncontiguousPartitioner<Number>::NoncontiguousPartitioner(
+    NoncontiguousPartitioner::NoncontiguousPartitioner(
       const IndexSet &indexset_has,
       const IndexSet &indexset_want,
       const MPI_Comm &communicator)
@@ -44,8 +43,7 @@ namespace Utilities
 
 
 
-    template <typename Number>
-    NoncontiguousPartitioner<Number>::NoncontiguousPartitioner(
+    NoncontiguousPartitioner::NoncontiguousPartitioner(
       const std::vector<types::global_dof_index> &indices_has,
       const std::vector<types::global_dof_index> &indices_want,
       const MPI_Comm &                            communicator)
@@ -55,47 +53,41 @@ namespace Utilities
 
 
 
-    template <typename Number>
     std::pair<unsigned int, unsigned int>
-    NoncontiguousPartitioner<Number>::n_targets()
+    NoncontiguousPartitioner::n_targets()
     {
       return {send_ranks.size(), recv_ranks.size()};
     }
 
 
 
-    template <typename Number>
     types::global_dof_index
-    NoncontiguousPartitioner<Number>::memory_consumption()
+    NoncontiguousPartitioner::memory_consumption()
     {
       return MemoryConsumption::memory_consumption(send_ranks) +
              MemoryConsumption::memory_consumption(send_ptr) +
              MemoryConsumption::memory_consumption(send_indices) +
-             MemoryConsumption::memory_consumption(send_buffers) +
-             MemoryConsumption::memory_consumption(send_requests) +
              MemoryConsumption::memory_consumption(recv_ranks) +
              MemoryConsumption::memory_consumption(recv_ptr) +
              MemoryConsumption::memory_consumption(recv_indices) +
-             MemoryConsumption::memory_consumption(recv_buffers) +
-             MemoryConsumption::memory_consumption(recv_requests);
+             MemoryConsumption::memory_consumption(buffers) +
+             MemoryConsumption::memory_consumption(requests);
     }
 
 
 
-    template <typename Number>
     const MPI_Comm &
-    NoncontiguousPartitioner<Number>::get_mpi_communicator() const
+    NoncontiguousPartitioner::get_mpi_communicator() const
     {
       return communicator;
     }
 
 
 
-    template <typename Number>
     void
-    NoncontiguousPartitioner<Number>::reinit(const IndexSet &indexset_has,
-                                             const IndexSet &indexset_want,
-                                             const MPI_Comm &communicator)
+    NoncontiguousPartitioner::reinit(const IndexSet &indexset_has,
+                                     const IndexSet &indexset_want,
+                                     const MPI_Comm &communicator)
     {
       this->communicator = communicator;
 
@@ -103,13 +95,11 @@ namespace Utilities
       send_ranks.clear();
       send_ptr.clear();
       send_indices.clear();
-      send_buffers.clear();
-      send_requests.clear();
       recv_ranks.clear();
       recv_ptr.clear();
       recv_indices.clear();
-      recv_buffers.clear();
-      recv_requests.clear();
+      buffers.clear();
+      requests.clear();
 
       // setup communication pattern
       std::vector<unsigned int> owning_ranks_of_ghosts(
@@ -150,15 +140,12 @@ namespace Utilities
 
             recv_ptr.push_back(recv_indices.size());
           }
-
-        recv_buffers.resize(recv_indices.size());
-        recv_requests.resize(recv_map.size());
       }
 
       {
         const auto targets_with_indexset = process.get_requesters();
 
-        send_ptr.push_back(send_indices.size() /*=0*/);
+        send_ptr.push_back(recv_ptr.back());
         for (const auto &target_with_indexset : targets_with_indexset)
           {
             send_ranks.push_back(target_with_indexset.first);
@@ -166,19 +153,15 @@ namespace Utilities
             for (const auto &cell_index : target_with_indexset.second)
               send_indices.push_back(indexset_has.index_within_set(cell_index));
 
-            send_ptr.push_back(send_indices.size());
+            send_ptr.push_back(send_indices.size() + recv_ptr.back());
           }
-
-        send_buffers.resize(send_indices.size());
-        send_requests.resize(targets_with_indexset.size());
       }
     }
 
 
 
-    template <typename Number>
     void
-    NoncontiguousPartitioner<Number>::reinit(
+    NoncontiguousPartitioner::reinit(
       const std::vector<types::global_dof_index> &indices_has,
       const std::vector<types::global_dof_index> &indices_want,
       const MPI_Comm &                            communicator)
@@ -259,65 +242,104 @@ namespace Utilities
 
 
     template <typename Number>
-    template <typename VectorType>
     void
-    NoncontiguousPartitioner<Number>::update_values(VectorType &      dst,
-                                                    const VectorType &src) const
+    NoncontiguousPartitioner::export_to_ghosted_array(
+      const ArrayView<const Number> &src,
+      const ArrayView<Number> &      dst) const
     {
-      const auto tag = internal::Tags::noncontiguous_partitioner_update_values;
+      // allocate internal memory since needed
+      if (requests.size() != send_ranks.size() + recv_ranks.size())
+        requests.resize(send_ranks.size() + recv_ranks.size());
+
+      if (this->buffers.size() != send_ptr.back() * sizeof(Number))
+        this->buffers.resize(send_ptr.back() * sizeof(Number), 0);
+
+      // perform actual exchange
+      this->template export_to_ghosted_array<Number>(
+        0,
+        src,
+        ArrayView<Number>(reinterpret_cast<Number *>(this->buffers.data()),
+                          send_ptr.back()),
+        dst,
+        this->requests);
+    }
+
 
-      this->update_values_start(src, tag);
-      this->update_values_finish(dst, tag);
+    template <typename Number>
+    void
+    NoncontiguousPartitioner::export_to_ghosted_array(
+      const unsigned int             communication_channel,
+      const ArrayView<const Number> &locally_owned_array,
+      const ArrayView<Number> &      temporary_storage,
+      const ArrayView<Number> &      ghost_array,
+      std::vector<MPI_Request> &     requests) const
+    {
+      this->template export_to_ghosted_array_start<Number>(
+        communication_channel,
+        locally_owned_array,
+        temporary_storage,
+        requests);
+      this->template export_to_ghosted_array_finish<Number>(temporary_storage,
+                                                            ghost_array,
+                                                            requests);
     }
 
 
 
     template <typename Number>
-    template <typename VectorType>
     void
-    NoncontiguousPartitioner<Number>::update_values_start(
-      const VectorType & src,
-      const unsigned int tag) const
+    NoncontiguousPartitioner::export_to_ghosted_array_start(
+      const unsigned int             communication_channel,
+      const ArrayView<const Number> &src,
+      const ArrayView<Number> &      buffers,
+      std::vector<MPI_Request> &     requests) const
     {
 #ifndef DEAL_II_WITH_MPI
+      (void)communication_channel;
       (void)src;
-      (void)tag;
+      (void)buffers;
+      (void)requests;
       Assert(false, ExcNeedsMPI());
 #else
+      AssertIndexRange(communication_channel, 10);
+
+      const auto tag =
+        communication_channel +
+        internal::Tags::noncontiguous_partitioner_update_ghost_values;
+
       // post recv
       for (types::global_dof_index i = 0; i < recv_ranks.size(); i++)
         {
-          const auto ierr = MPI_Irecv(recv_buffers.data() + recv_ptr[i],
-                                      recv_ptr[i + 1] - recv_ptr[i],
-                                      Utilities::MPI::internal::mpi_type_id(
-                                        recv_buffers.data()),
-                                      recv_ranks[i],
-                                      tag,
-                                      communicator,
-                                      &recv_requests[i]);
+          const auto ierr =
+            MPI_Irecv(buffers.data() + recv_ptr[i],
+                      recv_ptr[i + 1] - recv_ptr[i],
+                      Utilities::MPI::internal::mpi_type_id(buffers.data()),
+                      recv_ranks[i],
+                      tag,
+                      communicator,
+                      &requests[i + send_ranks.size()]);
           AssertThrowMPI(ierr);
         }
 
       auto src_iterator = src.begin();
 
       // post send
-      for (types::global_dof_index i = 0; i < send_ranks.size(); i++)
+      for (types::global_dof_index i = 0, k = 0; i < send_ranks.size(); i++)
         {
           // collect data to be send
-          for (types::global_dof_index j = send_ptr[i], c = 0;
-               j < send_ptr[i + 1];
+          for (types::global_dof_index j = send_ptr[i]; j < send_ptr[i + 1];
                j++)
-            send_buffers[send_ptr[i] + c++] = src_iterator[send_indices[j]];
+            buffers[j] = src_iterator[send_indices[k++]];
 
           // send data
-          const auto ierr = MPI_Isend(send_buffers.data() + send_ptr[i],
-                                      send_ptr[i + 1] - send_ptr[i],
-                                      Utilities::MPI::internal::mpi_type_id(
-                                        send_buffers.data()),
-                                      send_ranks[i],
-                                      tag,
-                                      communicator,
-                                      &send_requests[i]);
+          const auto ierr =
+            MPI_Isend(buffers.data() + send_ptr[i],
+                      send_ptr[i + 1] - send_ptr[i],
+                      Utilities::MPI::internal::mpi_type_id(buffers.data()),
+                      send_ranks[i],
+                      tag,
+                      communicator,
+                      &requests[i]);
           AssertThrowMPI(ierr);
         }
 #endif
@@ -326,28 +348,27 @@ namespace Utilities
 
 
     template <typename Number>
-    template <typename VectorType>
     void
-    NoncontiguousPartitioner<Number>::update_values_finish(
-      VectorType &       dst,
-      const unsigned int tag) const
+    NoncontiguousPartitioner::export_to_ghosted_array_finish(
+      const ArrayView<const Number> &buffers,
+      const ArrayView<Number> &      dst,
+      std::vector<MPI_Request> &     requests) const
     {
-      (void)tag;
-
 #ifndef DEAL_II_WITH_MPI
+      (void)buffers;
       (void)dst;
+      (void)requests;
       Assert(false, ExcNeedsMPI());
 #else
       auto dst_iterator = dst.begin();
 
       // receive all data packages and copy data from buffers
-      for (types::global_dof_index proc = 0; proc < recv_requests.size();
-           proc++)
+      for (types::global_dof_index proc = 0; proc < recv_ranks.size(); proc++)
         {
           int        i;
           MPI_Status status;
-          const auto ierr = MPI_Waitany(recv_requests.size(),
-                                        recv_requests.data(),
+          const auto ierr = MPI_Waitany(recv_ranks.size(),
+                                        requests.data() + send_ranks.size(),
                                         &i,
                                         &status);
           AssertThrowMPI(ierr);
@@ -355,13 +376,12 @@ namespace Utilities
           for (types::global_dof_index j = recv_ptr[i], c = 0;
                j < recv_ptr[i + 1];
                j++)
-            dst_iterator[recv_indices[j]] = recv_buffers[recv_ptr[i] + c++];
+            dst_iterator[recv_indices[j]] = buffers[recv_ptr[i] + c++];
         }
 
       // wait that all data packages have been sent
-      const auto ierr = MPI_Waitall(send_requests.size(),
-                                    send_requests.data(),
-                                    MPI_STATUSES_IGNORE);
+      const auto ierr =
+        MPI_Waitall(send_ranks.size(), requests.data(), MPI_STATUSES_IGNORE);
       AssertThrowMPI(ierr);
 #endif
     }
index 4c5d2f533326c172fad3012aaa6e90fcfa2ca7a8..3cc35e1d397920f486d313a4f140c400579c27dc 100644 (file)
@@ -130,7 +130,7 @@ namespace Utilities
           partitioner_export_end = partitioner_export_start + 200,
 
           /// NoncontiguousPartitioner::update_values
-          noncontiguous_partitioner_update_values,
+          noncontiguous_partitioner_update_ghost_values,
 
           // Utilities::MPI::compute_union
           compute_union,
index e7719c667cb23952afcb1908302826625045f65f..08b73a779f85d17be878d29e57698270cddcf548 100644 (file)
@@ -21,32 +21,10 @@ for (S : REAL_SCALARS)
     \{
       namespace MPI
       \{
-        template class NoncontiguousPartitioner<S>;
-
-        template void
-        NoncontiguousPartitioner<S>::update_values(
-          std::vector<S> &      dst,
-          const std::vector<S> &src) const;
-
-        template void
-        NoncontiguousPartitioner<S>::update_values(
-          AlignedVector<S> &      dst,
-          const AlignedVector<S> &src) const;
-
-        template void
-        NoncontiguousPartitioner<S>::update_values(
-          ArrayView<S> &      dst,
-          const ArrayView<S> &src) const;
-
-        template void
-        NoncontiguousPartitioner<S>::update_values(
-          LinearAlgebra::Vector<S> &      dst,
-          const LinearAlgebra::Vector<S> &src) const;
-
         template void
-        NoncontiguousPartitioner<S>::update_values(
-          LinearAlgebra::distributed::Vector<S> &      dst,
-          const LinearAlgebra::distributed::Vector<S> &src) const;
+        NoncontiguousPartitioner::export_to_ghosted_array(
+          const ArrayView<const S> &src,
+          const ArrayView<S> &      dst) const;
       \}
     \}
   }
index 9cefef7c522fb76dbe46b17f9d9189523fb0ec03..5e2a2edd763fe04e7fd8691d3d5b51e19f5c3690 100644 (file)
@@ -40,16 +40,18 @@ test(const MPI_Comm comm)
       index_set_want.add_index(2);
     }
 
-  Utilities::MPI::NoncontiguousPartitioner<double> vector(index_set_has,
-                                                          index_set_want,
-                                                          comm);
+  Utilities::MPI::NoncontiguousPartitioner vector(index_set_has,
+                                                  index_set_want,
+                                                  comm);
 
   AlignedVector<double> src(index_set_has.n_elements());
   AlignedVector<double> dst(index_set_want.n_elements());
 
   src[0] = Utilities::MPI::this_mpi_process(comm) * 100 + 1;
 
-  vector.update_values(dst, src);
+  vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+                                                         src.size()),
+                                 ArrayView<double>(dst.data(), dst.size()));
 
   for (size_t i = 0; i < src.size(); i++)
     deallog << static_cast<int>(src[i]) << " ";
index d419bd6974ef90474aefa037d4e1bda549b3fc6d..11e9912dfa1d29cf08dade0893ff0b019db51d67 100644 (file)
@@ -114,9 +114,9 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir)
   if (do_revert)
     std::reverse(indices_want.begin(), indices_want.end());
 
-  Utilities::MPI::NoncontiguousPartitioner<double> vector(indices_has,
-                                                          indices_want,
-                                                          comm);
+  Utilities::MPI::NoncontiguousPartitioner vector(indices_has,
+                                                  indices_want,
+                                                  comm);
 
   AlignedVector<double> src(indices_has.size());
   for (unsigned int i = 0; i < indices_has.size(); i++)
@@ -125,7 +125,9 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir)
 
   AlignedVector<double> dst(indices_want.size());
 
-  vector.update_values(dst, src);
+  vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+                                                         src.size()),
+                                 ArrayView<double>(dst.data(), dst.size()));
 
   for (size_t i = 0; i < src.size(); i++)
     deallog << static_cast<int>(src[i]) << " ";
index 3e0932d0ec1a6dfdf07ebd4883b18b2fe6a871c0..bededdf795d1f8f5f12c6af6501f75f7aa2200e5 100644 (file)
@@ -27,7 +27,7 @@ test(const MPI_Comm                       comm,
      std::vector<types::global_dof_index> index_set_has,
      std::vector<types::global_dof_index> index_set_want)
 {
-  Utilities::MPI::NoncontiguousPartitioner<double> vector;
+  Utilities::MPI::NoncontiguousPartitioner vector;
   vector.reinit(index_set_has, index_set_want, comm);
 
   AlignedVector<double> src(index_set_has.size(), 0);
@@ -36,7 +36,9 @@ test(const MPI_Comm                       comm,
   for (unsigned int i = 0; i < index_set_has.size(); i++)
     src[i] = Utilities::MPI::this_mpi_process(comm) * 100 + i;
 
-  vector.update_values(dst, src);
+  vector.export_to_ghosted_array(ArrayView<const double>(src.data(),
+                                                         src.size()),
+                                 ArrayView<double>(dst.data(), dst.size()));
 
   for (size_t i = 0; i < src.size(); i++)
     deallog << static_cast<int>(src[i]) << " ";

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.