--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check correct initialization of parallel vector without any ghosts
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_owned, MPI_COMM_WORLD);
+
+ // set local values
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ v.import(rw_vector, VectorOperation::insert);
+
+ v *= 2.0;
+
+ rw_vector.import(v, VectorOperation::insert);
+ if (myid == 0)
+ {
+ deallog << myid * 2 << ":" << rw_vector(myid * 2) << std::endl;
+ deallog << myid * 2 + 1 << ":" << rw_vector(myid * 2 + 1) << std::endl;
+ }
+
+ Assert(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ // check l2 norm
+ const double l2_norm = v.l2_norm();
+ if (myid == 0)
+ deallog << "L2 norm: " << l2_norm << std::endl;
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::0:0
+DEAL:0::1:2.000
+DEAL:0::L2 norm: 7.483
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check addition into ghosts for parallel vector
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ v.import(rw_vector, VectorOperation::add);
+
+ v *= 2.0;
+
+ rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ AssertThrow(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ // set ghost dof, compress
+ LinearAlgebra::ReadWriteVector<double> rw_relevant_vector(numproc * 2);
+ rw_relevant_vector(1) = 7;
+ v.import(rw_relevant_vector, VectorOperation::add);
+
+ rw_vector.import(v, VectorOperation::insert);
+ if (myid == 0)
+ {
+ deallog << myid * 2 << ":" << rw_vector(myid * 2) << std::endl;
+ deallog << myid * 2 + 1 << ":" << rw_vector(myid * 2 + 1) << std::endl;
+ }
+
+ rw_relevant_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_relevant_vector(1) == 7. * numproc + 2, ExcInternalError());
+
+ // check l2 norm
+ const double l2_norm = v.l2_norm();
+ if (myid == 0)
+ deallog << "L2 norm: " << l2_norm << std::endl;
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::0:0
+DEAL:0::1:16.00
+DEAL:0::L2 norm: 17.55
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check set of ghosts for parallel vector
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ v.import(rw_vector, VectorOperation::insert);
+
+ v *= 2.0;
+
+ rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ AssertThrow(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ // set ghost dof on all processors, compress
+ // (insert mode)
+ IndexSet ghost_entry(numproc * 2);
+ ghost_entry.add_index(1);
+ LinearAlgebra::ReadWriteVector<double> rw_ghost_entry(ghost_entry);
+ rw_ghost_entry(1) = 7.;
+ v.import(rw_ghost_entry, VectorOperation::insert);
+
+ rw_vector.import(v, VectorOperation::insert);
+ if (myid == 0)
+ {
+ deallog << myid * 2 << ":" << rw_vector(myid * 2) << std::endl;
+ deallog << myid * 2 + 1 << ":" << rw_vector(myid * 2 + 1) << std::endl;
+ }
+ // import ghosts onto all procs
+ v.update_ghost_values();
+ rw_ghost_entry(1) = 0.;
+ rw_ghost_entry.import(v, VectorOperation::insert);
+ AssertThrow(rw_ghost_entry(1) == 7.0, ExcInternalError());
+
+ // check l2 norm
+ const double l2_norm = v.l2_norm();
+ if (myid == 0)
+ deallog << "L2 norm: " << l2_norm << std::endl;
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::0:0
+DEAL:0::1:7.000
+DEAL:0::L2 norm: 10.05
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// similar to parallel_sparse_vector_03.cc, but make sure
+// compress(insert) zeroes out ghosts in Release mode
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+void
+check(const unsigned int myid, const LinearAlgebra::ReadWriteVector<double> &v)
+{
+ if (myid == 0)
+ {
+ AssertThrow(v(10) == 10.0, ExcInternalError());
+ AssertThrow(v(11) == 0., ExcInternalError());
+ AssertThrow(v(12) == 0., ExcInternalError());
+ AssertThrow(v(14) == 14., ExcInternalError());
+
+ AssertThrow(v(5) == 55., ExcInternalError());
+ }
+ else
+ {
+ AssertThrow(v(4) == 0., ExcInternalError());
+ AssertThrow(v(5) == 55., ExcInternalError());
+ AssertThrow(v(6) == 66., ExcInternalError());
+ }
+}
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ Assert(numproc == 2, ExcNotImplemented());
+
+ const unsigned int size = 20;
+ IndexSet local_owned(size);
+ IndexSet local_nonzero(size);
+ IndexSet local_relevant(size);
+ if (myid == 0)
+ {
+ local_owned.add_range(0, 10);
+ local_nonzero.add_range(5, 10);
+ local_relevant = local_owned;
+ local_relevant.add_range(10, 13);
+ local_relevant.add_range(14, 15);
+ }
+ else
+ {
+ local_owned.add_range(10, size);
+ local_nonzero.add_range(10, 11);
+ local_nonzero.add_range(13, 15);
+ local_relevant = local_owned;
+ local_relevant.add_range(4, 7);
+ }
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+ v = 0.;
+
+ // set local values
+ IndexSet indexset_1(local_owned);
+ indexset_1.add_index(5);
+ indexset_1.compress();
+ LinearAlgebra::ReadWriteVector<double> rw_vector(indexset_1);
+ for (unsigned int i = 0; i < local_nonzero.n_elements(); i++)
+ rw_vector(local_nonzero.nth_index_in_set(i)) =
+ local_nonzero.nth_index_in_set(i);
+
+ // set value from processor which does not own it:
+ rw_vector(5) = 55.;
+ v.import(rw_vector, VectorOperation::insert);
+
+ // add to value from processor which has it as a ghost
+ // Because of limitation in import, the ReadWriteVector needs to have locally
+ // owned elements
+ IndexSet indexset_2(local_owned.size());
+ LinearAlgebra::ReadWriteVector<double> rw_add;
+ if (myid == 1)
+ {
+ indexset_2.add_index(6);
+ rw_add.reinit(indexset_2);
+ rw_add(6) = 60;
+ }
+ else
+ {
+ rw_add.reinit(indexset_2);
+ }
+ v.import(rw_add, VectorOperation::add); // 60 + 6
+ // compress(insert) used to leave ghosts un-touched which resulted in
+ // the wrong 55+55 for this compress(add) operation.
+
+ v.update_ghost_values();
+
+ IndexSet indexset_3(size);
+ if (myid == 0)
+ {
+ indexset_3.add_index(10);
+ indexset_3.add_index(11);
+ indexset_3.add_index(12);
+ indexset_3.add_index(14);
+
+ indexset_3.add_index(5);
+ }
+ else
+ {
+ indexset_3.add_index(4);
+ indexset_3.add_index(5);
+ indexset_3.add_index(6);
+ }
+
+ rw_vector.reinit(indexset_3);
+ rw_vector.import(v, VectorOperation::insert);
+
+ check(myid, rw_vector);
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that operator= resets ghosts, both if they have been set and if they
+// have not been set
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+__global__ void
+set_value(double *values_dev, unsigned int index, double val)
+{
+ values_dev[index] = val;
+}
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+
+ v.import(rw_vector, VectorOperation::insert);
+ v *= 2.0;
+
+ rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ AssertThrow(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ // set ghost dof on remote process, no compress called. Since we don't want to
+ // call compress we cannot use import
+ auto partitioner = v.get_partitioner();
+ if (myid > 0)
+ {
+ unsigned int local_index = partitioner->global_to_local(1);
+ double * values_dev = v.get_values();
+ set_value<<<1, 1>>>(values_dev, local_index, 7);
+ }
+
+ unsigned int allocated_size = local_relevant.n_elements();
+ std::vector<double> v_host(allocated_size);
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2)] == myid * 4.0,
+ ExcInternalError());
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2 + 1)] ==
+ myid * 4.0 + 2.0,
+ ExcInternalError());
+
+ if (myid > 0)
+ AssertThrow(v_host[partitioner->global_to_local(1)] == 7.0,
+ ExcInternalError());
+
+ // reset to zero
+ v = 0;
+
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2)] == 0.,
+ ExcInternalError());
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2 + 1)] == 0.,
+ ExcInternalError());
+
+ // check that everything remains zero also
+ // after compress
+ v.compress(VectorOperation::add);
+
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2)] == 0.,
+ ExcInternalError());
+ AssertThrow(v_host[partitioner->global_to_local(myid * 2 + 1)] == 0.,
+ ExcInternalError());
+
+ // set element 1 on owning process to
+ // something nonzero
+ if (myid == 0)
+ {
+ unsigned int local_index = partitioner->global_to_local(1);
+ double * values_dev = v.get_values();
+ set_value<<<1, 1>>>(values_dev, local_index, 2);
+ }
+ if (myid > 0)
+ {
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+ AssertThrow(v_host[partitioner->global_to_local(1)] == 0.,
+ ExcInternalError());
+ }
+
+ // check that all processors get the correct
+ // value again, and that it is erased by
+ // operator=
+ v.update_ghost_values();
+
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+ AssertThrow(v_host[partitioner->global_to_local(1)] == 2.,
+ ExcInternalError());
+
+ v = 0;
+ Utilities::CUDA::copy_to_host(v.get_values(), v_host);
+ AssertThrow(v_host[partitioner->global_to_local(1)] == 0.,
+ ExcInternalError());
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that compress(add) with zero add does not change the vector entry
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+
+ v.import(rw_vector, VectorOperation::insert);
+ v *= 2.0;
+
+ rw_vector.import(v, VectorOperation::insert);
+ Assert(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ // set ghost dof to zero on remote processors,
+ // compress
+ IndexSet ghost_set(numproc * 2);
+ LinearAlgebra::ReadWriteVector<double> ghost_vector;
+ if (myid > 0)
+ {
+ ghost_set.add_index(1);
+ ghost_vector.reinit(ghost_set);
+ ghost_vector(1) = 0;
+ }
+ else
+ ghost_vector.reinit(ghost_set);
+
+
+ v.import(ghost_vector, VectorOperation::add);
+
+ // check that nothing has changed
+ rw_vector.import(v, VectorOperation::insert);
+ Assert(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0, ExcInternalError());
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check global reduction operation (norms, operator==, operator!=) on
+// parallel vector
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all processors are
+ // ghosting element 1 (the second)
+ IndexSet local_owned(std::min(16U, numproc * 2));
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_owned, MPI_COMM_WORLD);
+
+ // set local values
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ {
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ }
+ v.import(rw_vector, VectorOperation::insert);
+ v *= 2.0;
+ {
+ rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_vector(myid * 2) == myid * 4.0, ExcInternalError());
+ AssertThrow(rw_vector(myid * 2 + 1) == myid * 4.0 + 2.0,
+ ExcInternalError());
+ }
+
+ // check l2 norm
+ {
+ const double l2_norm = v.l2_norm();
+ if (myid == 0)
+ deallog << "l2 norm: " << l2_norm << std::endl;
+ }
+
+ // check l1 norm
+ {
+ const double l1_norm = v.l1_norm();
+ if (myid == 0)
+ deallog << "l1 norm: " << l1_norm << std::endl;
+ }
+
+ // check linfty norm
+ {
+ const double linfty_norm = v.linfty_norm();
+ if (myid == 0)
+ deallog << "linfty norm: " << linfty_norm << std::endl;
+ }
+
+ // check mean value (should be equal to l1
+ // norm divided by vector size here since we
+ // have no negative entries)
+ {
+ const double mean = v.mean_value();
+ if (myid == 0)
+ deallog << "Mean value: " << mean << std::endl;
+
+ Assert(std::fabs(mean * v.size() - v.l1_norm()) < 1e-15,
+ ExcInternalError());
+ }
+ // check inner product
+ {
+ const double norm_sqr = v.l2_norm() * v.l2_norm();
+ AssertThrow(std::fabs(v * v - norm_sqr) < 1e-15, ExcInternalError());
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v2;
+ v2 = v;
+ AssertThrow(std::fabs(v2 * v - norm_sqr) < 1e-15, ExcInternalError());
+
+ const double inner_prod = v * v2;
+ if (myid == 0)
+ deallog << "Inner product: " << inner_prod << std::endl;
+ }
+
+ // check all_zero
+ {
+ bool allzero = v.all_zero();
+ if (myid == 0)
+ deallog << " v==0 ? " << allzero << std::endl;
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v2;
+ v2.reinit(v);
+ allzero = v2.all_zero();
+ if (myid == 0)
+ deallog << " v2==0 ? " << allzero << std::endl;
+
+ v2.import(rw_vector, VectorOperation::insert);
+ allzero = v2.all_zero();
+ if (myid == 0)
+ deallog << " v2==0 ? " << allzero << std::endl;
+ }
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::l2 norm: 7.483
+DEAL:0::l1 norm: 12.00
+DEAL:0::linfty norm: 6.00
+DEAL:0::Mean value:3.000
+DEAL:0::Inner product: 56.00
+DEAL:0:: v==0 ? 0
+DEAL:0:: v2==0 ? 1
+DEAL:0:: v2==0 ? 0
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check parallel_vector::copy_from to update ghost values. Same vector layout
+// as in parallel_vector_07.cc
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+ const unsigned int set = 200;
+ AssertIndexRange(numproc, set - 2);
+ const unsigned int local_size = set - myid;
+ unsigned int global_size = 0;
+ unsigned int my_start = 0;
+ for (unsigned int i = 0; i < numproc; ++i)
+ {
+ global_size += set - i;
+ if (i < myid)
+ my_start += set - i;
+ }
+ // each processor owns some indices and all
+ // are ghosting elements from three
+ // processors (the second). some entries
+ // are right around the border between two
+ // processors
+ IndexSet local_owned(global_size);
+ local_owned.add_range(my_start, my_start + local_size);
+ IndexSet local_relevant(global_size);
+ local_relevant = local_owned;
+ unsigned int ghost_indices[10] = {
+ 1, 2, 13, set - 3, set - 2, set - 1, set, set + 1, set + 2, set + 3};
+ local_relevant.add_indices(&ghost_indices[0], &ghost_indices[0] + 10);
+
+ // v has ghosts, w has none. set some entries
+ // on w, copy into v and check if they are
+ // there
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> w(
+ local_owned, local_owned, MPI_COMM_WORLD);
+
+ // set a few of the local elements
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ for (unsigned i = 0; i < local_size; ++i)
+ rw_vector.local_element(i) = 2.0 * (i + my_start);
+ w.import(rw_vector, VectorOperation::insert);
+
+ v = w;
+ v.update_ghost_values();
+
+ // check local values for correctness
+ rw_vector.import(v, VectorOperation::insert);
+ for (unsigned int i = 0; i < local_size; ++i)
+ AssertThrow(rw_vector.local_element(i) == 2.0 * (i + my_start),
+ ExcInternalError());
+
+ // check non-local entries on all processors
+ LinearAlgebra::ReadWriteVector<double> ghost_vector(local_relevant);
+ ghost_vector.import(v, VectorOperation::insert);
+ for (unsigned int i = 0; i < 10; ++i)
+ AssertThrow(ghost_vector(ghost_indices[i]) == 2. * ghost_indices[i],
+ ExcInternalError());
+
+ // now the same again, but import ghosts automatically because v had ghosts
+ // set before calling operator =
+ v.reinit(local_owned, local_relevant, MPI_COMM_WORLD);
+ v.update_ghost_values();
+ v = w;
+
+ // check local values for correctness
+ rw_vector.import(v, VectorOperation::insert);
+ for (unsigned int i = 0; i < local_size; ++i)
+ AssertThrow(rw_vector.local_element(i) == 2.0 * (i + my_start),
+ ExcInternalError());
+
+ // check non-local entries on all processors
+ ghost_vector.import(v, VectorOperation::insert);
+ for (unsigned int i = 0; i < 10; ++i)
+ AssertThrow(ghost_vector(ghost_indices[i]) == 2. * ghost_indices[i],
+ ExcInternalError());
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check operator= when we do some operations with ghosts
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> w(v);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ v.import(rw_vector, VectorOperation::insert);
+
+ v.update_ghost_values();
+
+ // check that the value of the ghost is 1.0
+ IndexSet ghost_set(numproc * 2);
+ ghost_set.add_index(1);
+ LinearAlgebra::ReadWriteVector<double> ghost_vector(ghost_set);
+ ghost_vector.import(v, VectorOperation::insert);
+ AssertThrow(ghost_vector(1) == 1., ExcInternalError());
+
+ // copy vector
+ w = v;
+ v *= 2.0;
+
+ v.update_ghost_values();
+ w.update_ghost_values();
+ ghost_vector.import(v, VectorOperation::insert);
+ AssertThrow(ghost_vector(1) == 2., ExcInternalError());
+ ghost_vector.import(w, VectorOperation::insert);
+ AssertThrow(ghost_vector(1) == 1., ExcInternalError());
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that add, sadd, equ, scale work correctly on a vector where some
+// processor do not own any degrees of freedom
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+ // global size: 20, local_size: 3 as long as
+ // less than 20
+ const unsigned int local_size = 3;
+ const unsigned int global_size = std::min(20U, local_size * numproc);
+ const int my_start = std::min(local_size * myid, global_size);
+ const int my_end = std::min(local_size * (myid + 1), global_size);
+ const int actual_local_size = my_end - my_start;
+
+ IndexSet local_owned(global_size);
+ if (my_end > my_start)
+ local_owned.add_range(static_cast<unsigned int>(my_start),
+ static_cast<unsigned int>(my_end));
+ IndexSet local_relevant(global_size);
+ local_relevant = local_owned;
+ local_relevant.add_index(2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+ AssertDimension(static_cast<unsigned int>(actual_local_size), v.local_size());
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> w(v), x(v),
+ y(v);
+
+ // set local elements
+ LinearAlgebra::ReadWriteVector<double> v_rw(local_owned);
+ LinearAlgebra::ReadWriteVector<double> w_rw(local_owned);
+ LinearAlgebra::ReadWriteVector<double> x_rw(local_owned);
+ for (int i = 0; i < actual_local_size; ++i)
+ {
+ v_rw.local_element(i) = i + my_start;
+ w_rw.local_element(i) = 1000 + 2 * (my_start + i);
+ x_rw.local_element(i) = 10000;
+ }
+ v.import(v_rw, VectorOperation::insert);
+ w.import(w_rw, VectorOperation::insert);
+ x.import(x_rw, VectorOperation::insert);
+
+ y = v;
+ LinearAlgebra::ReadWriteVector<double> y_rw(local_owned);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == i + my_start, ExcInternalError());
+
+ if (myid == 0)
+ deallog << "Check add (scalar): ";
+ y.add(42);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == i + my_start + 42, ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check add (vector): ";
+ y.add(1., w);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 3 * (i + my_start) + 1042,
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check add (factor, vector): ";
+ y.add(-1., w);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == i + my_start + 42, ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check add (factor, vector, factor, vector): ";
+ y.add(2., w, -0.5, x);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 5 * (i + my_start) + 2042 - 5000,
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check sadd (factor, factor, vector): ";
+ y = v;
+ y.sadd(-3., 2., v);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == (-i - my_start), ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check sadd (factor, factor, vector, factor, vector): ";
+ y.sadd(2., 3., v, 2., w);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ {
+ AssertThrow(y_rw.local_element(i) == 5 * (i + my_start) + 2000,
+ ExcInternalError());
+ }
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog
+ << "Check sadd (factor, factor, vector, factor, vector, factor, vector): ";
+ y.sadd(-1., 1., v, 2., w);
+ y.add(2., x);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 20000, ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check add (factor, vector_1, factor, vector_1): ";
+ y = 0;
+ y.add(1., v, 3., v);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 4 * (i + my_start),
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check operator * (scalar): ";
+ x *= 2.;
+ x_rw.import(x, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(x_rw.local_element(i) == 20000., ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check operator / (scalar): ";
+ x /= 2.;
+ x_rw.import(x, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(x_rw.local_element(i) == 10000., ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check scale (vector): ";
+ y.scale(x);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 40000. * (i + my_start),
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check equ (factor, vector): ";
+ y.equ(10., x);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 100000., ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check equ (factor, vector, factor, vector): ";
+ y.equ(10., v, -2., w);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 6. * (i + my_start) - 2000,
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+
+ if (myid == 0)
+ deallog << "Check equ (factor, vector, factor, vector, factor, vector): ";
+ y.equ(10., v, -2., w);
+ y.add(3., x);
+ y_rw.import(y, VectorOperation::insert);
+ for (int i = 0; i < actual_local_size; ++i)
+ AssertThrow(y_rw.local_element(i) == 6. * (i + my_start) + 28000,
+ ExcInternalError());
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::Check add (scalar): OK
+DEAL:0::Check add (vector): OK
+DEAL:0::Check add (factor, vector): OK
+DEAL:0::Check add (factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector, factor, vector): OK
+DEAL:0::Check add (factor, vector_1, factor, vector_1): OK
+DEAL:0::Check operator * (scalar): OK
+DEAL:0::Check operator / (scalar): OK
+DEAL:0::Check scale (vector): OK
+DEAL:0::Check equ (factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector, factor, vector): OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check LinearAlgebra::distributed::Vector::swap
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+DeclException2(ExcNonEqual,
+ double,
+ double,
+ << "Left compare: " << arg1 << ", right compare: " << arg2);
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+ // vector 0:
+ // global size: 20, local_size: 3 as long as
+ // less than 20
+ const unsigned int local_size0 = 3;
+ const unsigned int global_size0 = std::min(20U, local_size0 * numproc);
+ const unsigned int my_start0 = std::min(local_size0 * myid, global_size0);
+ const unsigned int my_end0 = std::min(local_size0 * (myid + 1), global_size0);
+ const unsigned int actual_local_size0 = my_end0 - my_start0;
+
+ IndexSet local_owned0(global_size0);
+ if (my_end0 > my_start0)
+ local_owned0.add_range(static_cast<unsigned int>(my_start0),
+ static_cast<unsigned int>(my_end0));
+ IndexSet local_relevant0(global_size0);
+ local_relevant0 = local_owned0;
+ local_relevant0.add_index(2);
+ if (numproc > 2)
+ local_relevant0.add_index(8);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v0(
+ local_owned0, local_relevant0, MPI_COMM_WORLD);
+
+ // vector1: local size 4
+ const unsigned int local_size1 = 4;
+ const unsigned int global_size1 = local_size1 * numproc;
+ const int my_start1 = local_size1 * myid;
+ const int my_end1 = local_size1 * (myid + 1);
+
+ IndexSet local_owned1(global_size1);
+ local_owned1.add_range(static_cast<unsigned int>(my_start1),
+ static_cast<unsigned int>(my_end1));
+ IndexSet local_relevant1(global_size1);
+ local_relevant1 = local_owned1;
+ local_relevant1.add_index(0);
+ local_relevant1.add_index(2);
+ if (numproc > 2)
+ {
+ local_relevant1.add_index(8);
+ local_relevant1.add_index(10);
+ }
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v1(
+ local_owned1, local_relevant1, MPI_COMM_WORLD);
+
+ v0 = 1;
+ v1 = 2;
+ // check assignment in initial state
+ LinearAlgebra::ReadWriteVector<double> v0_rw(local_owned0);
+ v0_rw.import(v0, VectorOperation::insert);
+ LinearAlgebra::ReadWriteVector<double> v1_rw(local_owned1);
+ v1_rw.import(v1, VectorOperation::insert);
+ for (unsigned int i = 0; i < v0.local_size(); ++i)
+ AssertThrow(v0_rw.local_element(i) == 1.,
+ ExcNonEqual(v0_rw.local_element(i), 1.));
+ for (unsigned int i = 0; i < v1.local_size(); ++i)
+ AssertThrow(v1_rw.local_element(i) == 2.,
+ ExcNonEqual(v1_rw.local_element(i), 2.));
+
+ // check ghost elements in initial state
+ v0.update_ghost_values();
+ v1.update_ghost_values();
+ LinearAlgebra::ReadWriteVector<double> v0_ghost_rw(local_relevant0);
+ v0_ghost_rw.import(v0, VectorOperation::insert);
+ LinearAlgebra::ReadWriteVector<double> v1_ghost_rw(local_relevant1);
+ v1_ghost_rw.import(v1, VectorOperation::insert);
+ AssertThrow(v0_ghost_rw(2) == 1., ExcNonEqual(v0_ghost_rw(2), 1.));
+ if (numproc > 2)
+ AssertThrow(v0_ghost_rw(8) == 1., ExcNonEqual(v0_ghost_rw(8), 2.));
+ AssertThrow(v1_ghost_rw(0) == 2., ExcNonEqual(v1_ghost_rw(0), 2.));
+ AssertThrow(v1_ghost_rw(2) == 2., ExcNonEqual(v1_ghost_rw(2), 2.));
+ if (numproc > 2)
+ {
+ AssertThrow(v1_ghost_rw(8) == 2., ExcNonEqual(v1_ghost_rw(8), 2.));
+ AssertThrow(v1_ghost_rw(10) == 2., ExcNonEqual(v1_ghost_rw(10), 2.));
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "Initial set and ghost update OK" << std::endl;
+
+ // now swap v1 and v0
+ v0.swap(v1);
+ AssertDimension(v0.local_size(), local_size1);
+ AssertDimension(v1.local_size(), actual_local_size0);
+ AssertDimension(v0.size(), global_size1);
+ AssertDimension(v1.size(), global_size0);
+ v1_rw.import(v0, VectorOperation::insert);
+ for (unsigned int i = 0; i < local_size1; ++i)
+ AssertThrow(v1_rw.local_element(i) == 2.,
+ ExcNonEqual(v1_rw.local_element(i), 2.));
+ v0_rw.import(v1, VectorOperation::insert);
+ for (unsigned int i = 0; i < actual_local_size0; ++i)
+ AssertThrow(v0_rw.local_element(i) == 1.,
+ ExcNonEqual(v0_rw.local_element(i), 1.));
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "First swap OK" << std::endl;
+ v0.update_ghost_values();
+ v1.update_ghost_values();
+ v0_ghost_rw.import(v1, VectorOperation::insert);
+ AssertThrow(v0_ghost_rw(2) == 1., ExcNonEqual(v0_ghost_rw(2), 1.));
+ if (numproc > 2)
+ AssertThrow(v0_ghost_rw(8) == 1., ExcNonEqual(v0_ghost_rw(8), 1.));
+ v1_ghost_rw.import(v0, VectorOperation::insert);
+ AssertThrow(v1_ghost_rw(0) == 2., ExcNonEqual(v1_ghost_rw(0), 2.));
+ AssertThrow(v1_ghost_rw(2) == 2., ExcNonEqual(v1_ghost_rw(2), 2.));
+ if (numproc > 2)
+ {
+ AssertThrow(v1_ghost_rw(8) == 2., ExcNonEqual(v1_ghost_rw(8), 2.));
+ AssertThrow(v1_ghost_rw(10) == 2., ExcNonEqual(v1_ghost_rw(10), 2.));
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "Ghost values after first swap OK" << std::endl;
+
+ // now set the vectors to some different
+ // values and check the ghost values again
+ v0 = 7.;
+ v1 = 42.;
+ v0.update_ghost_values();
+ v1.update_ghost_values();
+ v0_ghost_rw.import(v1, VectorOperation::insert);
+ AssertThrow(v0_ghost_rw(2) == 42., ExcNonEqual(v0_ghost_rw(2), 42.));
+ if (numproc > 2)
+ AssertThrow(v0_ghost_rw(8) == 42., ExcNonEqual(v0_ghost_rw(8), 42.));
+ v1_ghost_rw.import(v0, VectorOperation::insert);
+ AssertThrow(v1_ghost_rw(0) == 7., ExcNonEqual(v1_ghost_rw(0), 7.));
+ AssertThrow(v1_ghost_rw(2) == 7., ExcNonEqual(v1_ghost_rw(2), 7.));
+ if (numproc > 2)
+ {
+ AssertThrow(v1_ghost_rw(8) == 7., ExcNonEqual(v1_ghost_rw(8), 7.));
+ AssertThrow(v1_ghost_rw(10) == 7., ExcNonEqual(v1_ghost_rw(10), 7.));
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "Ghost values after re-set OK" << std::endl;
+
+ // swap with an empty vector
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v2;
+ v2.swap(v0);
+ AssertDimension(v0.size(), 0);
+ AssertDimension(v2.size(), global_size1);
+ AssertDimension(v2.local_size(), local_size1);
+ v1_rw.import(v2, VectorOperation::insert);
+ for (int i = my_start1; i < my_end1; ++i)
+ AssertThrow(v1_rw(i) == 7., ExcNonEqual(v1_rw(i), 7.));
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "Second swap OK" << std::endl;
+ v2 = -1.;
+ v2.update_ghost_values();
+ v1_ghost_rw.import(v2, VectorOperation::insert);
+ AssertThrow(v1_ghost_rw(0) == -1., ExcNonEqual(v1_ghost_rw(0), -1.));
+ AssertThrow(v1_ghost_rw(2) == -1., ExcNonEqual(v1_ghost_rw(2), -1.));
+ if (numproc > 2)
+ {
+ AssertThrow(v1_ghost_rw(8) == -1., ExcNonEqual(v1_ghost_rw(8), -1.));
+ AssertThrow(v1_ghost_rw(10) == -1., ExcNonEqual(v1_ghost_rw(10), -1.));
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ if (myid == 0)
+ deallog << "Ghost values after second swap OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::Initial set and ghost update OK
+DEAL:0::First swap OK
+DEAL:0::Ghost values after first swap OK
+DEAL:0::Ghost values after re-set OK
+DEAL:0::Second swap OK
+DEAL:0::Ghost values after second swap OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check for ghosts on parallel vector: similar to parallel_vector_03, but
+// setting where one ghost is zero and should not have an effect on vector
+// entries
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // each processor owns 2 indices and all
+ // are ghosting element 1 (the second)
+ IndexSet local_owned(numproc * 2);
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(numproc * 2);
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values and check them
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+
+ v.import(rw_vector, VectorOperation::insert);
+ v *= 2.0;
+ v.add(1.0);
+
+ rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(rw_vector(myid * 2) == myid * 4.0 + 1, ExcInternalError());
+ AssertThrow(rw_vector(myid * 2 + 1) == myid * 4.0 + 3.0, ExcInternalError());
+
+ // set ghost dof on all processors, compress
+ // (insert mode)
+ IndexSet index(numproc * 2);
+ index.add_index(1);
+ LinearAlgebra::ReadWriteVector<double> local_rw_vector(index);
+ local_rw_vector(1) = 7;
+ v.import(local_rw_vector, VectorOperation::insert);
+
+ {
+ rw_vector.import(v, VectorOperation::insert);
+ deallog << myid * 2 << ":" << rw_vector(myid * 2) << std::endl;
+ deallog << myid * 2 + 1 << ":" << rw_vector(myid * 2 + 1) << std::endl;
+ }
+
+ local_rw_vector(1) = -7;
+ v.import(local_rw_vector, VectorOperation::insert);
+
+ {
+ rw_vector.import(v, VectorOperation::insert);
+ deallog << myid * 2 << ":" << rw_vector(myid * 2) << std::endl;
+ deallog << myid * 2 + 1 << ":" << rw_vector(myid * 2 + 1) << std::endl;
+ }
+
+ // import ghosts onto all procs
+ v.update_ghost_values();
+ local_rw_vector.import(v, VectorOperation::insert);
+ AssertThrow(local_rw_vector(1) == -7.0, ExcInternalError());
+
+ // check l2 norm
+ const double l2_norm = v.l2_norm();
+ if (myid == 0)
+ deallog << "L2 norm: " << l2_norm << std::endl;
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::0:1.000
+DEAL:0::1:7.000
+DEAL:0::0:1.000
+DEAL:0::1:-7.000
+DEAL:0::L2 norm: 11.14
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that handling of ghost elements in parallel distributed vectors works
+// appropriately when assigning from ghosted to non-ghosted vectors
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+
+ // processor 0 and 1 own 2 indices each, higher processors nothing, all are
+ // ghosting global elements 1 and 3
+ IndexSet local_owned(std::min(numproc * 2, 4U));
+ if (myid < 2)
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(local_owned.size());
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+ if (numproc > 1)
+ local_relevant.add_range(3, 4);
+
+ // run this twice, once where the vectors have called update_ghost_values
+ // and once where they have not
+ for (unsigned int run = 0; run < 2; ++run)
+ {
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ if (myid < 2)
+ {
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ }
+
+ v.import(rw_vector, VectorOperation::insert);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> w(v), u(v);
+ u = 0;
+
+ v *= 2.0;
+ v.add(1.0);
+
+ if (run == 1)
+ {
+ v.update_ghost_values();
+ w.update_ghost_values();
+ u.update_ghost_values();
+ }
+
+ rw_vector.import(v, VectorOperation::insert);
+ if (myid < 2)
+ {
+ Assert(rw_vector(myid * 2) == myid * 4.0 + 1, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 4.0 + 3.0,
+ ExcInternalError());
+ }
+
+ // copy vector content to non-ghosted vectors, manually created.
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v_dist(
+ local_owned, MPI_COMM_WORLD),
+ w_dist(v_dist), u_dist(v_dist);
+
+ v_dist = v;
+ w_dist = w;
+ u_dist = u;
+
+ u_dist.add(1.0, v_dist, -1.0, w);
+
+ // copy back to a ghosted vector and update ghost values there
+ u = u_dist;
+ u.update_ghost_values();
+
+ rw_vector.import(u_dist, VectorOperation::insert);
+ if (myid < 2)
+ {
+ Assert(rw_vector(myid * 2) == myid * 2.0 + 1, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 2.0 + 2.0,
+ ExcInternalError());
+ }
+
+ rw_vector.import(u, VectorOperation::insert);
+ if (myid < 2)
+ {
+ Assert(rw_vector(myid * 2) == myid * 2.0 + 1, ExcInternalError());
+ Assert(rw_vector(myid * 2 + 1) == myid * 2.0 + 2.0,
+ ExcInternalError());
+ }
+
+ IndexSet u_ghost_set(local_owned.size());
+ u_ghost_set.add_index(1);
+ u_ghost_set.add_index(3);
+ LinearAlgebra::ReadWriteVector<double> u_ghost_vector(u_ghost_set);
+ u_ghost_vector.import(u, VectorOperation::insert);
+ Assert(u_ghost_vector(1) == 2., ExcInternalError());
+ if (numproc > 1)
+ {
+ if (run == 1)
+ {
+ IndexSet v_ghost_set(local_owned.size());
+ v_ghost_set.add_index(3);
+ LinearAlgebra::ReadWriteVector<double> v_ghost_vector(
+ v_ghost_set);
+ v_ghost_vector.import(v, VectorOperation::insert);
+ Assert(v_ghost_vector(3) == 7., ExcInternalError());
+ }
+ Assert(u_ghost_vector(3) == 4., ExcInternalError());
+ }
+
+ // check l2 norm
+ const double l2_norm = u.l2_norm();
+ if (myid == 0 && run == 1)
+ deallog << "L2 norm: " << l2_norm << std::endl;
+ }
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::L2 norm: 5.477
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that handling of ghost elements in parallel distributed vectors works
+// appropriately when creating a vector from a non-ghosted source vector using
+// the assignment operator
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ if (myid == 0)
+ deallog << "numproc=" << numproc << std::endl;
+
+ // processor 0 and 1 own 2 indices each, higher processors nothing, all are
+ // ghosting global elements 1 and 3
+ IndexSet local_owned(std::min(numproc * 2, 4U));
+ if (myid < 2)
+ local_owned.add_range(myid * 2, myid * 2 + 2);
+ IndexSet local_relevant(local_owned.size());
+ local_relevant = local_owned;
+ local_relevant.add_range(1, 2);
+ if (numproc > 1)
+ local_relevant.add_range(3, 4);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ local_owned, local_relevant, MPI_COMM_WORLD);
+
+ // set local values
+ LinearAlgebra::ReadWriteVector<double> rw_vector(local_owned);
+ if (myid < 2)
+ {
+ rw_vector(myid * 2) = myid * 2.0;
+ rw_vector(myid * 2 + 1) = myid * 2.0 + 1.0;
+ }
+
+ v.import(rw_vector, VectorOperation::insert);
+
+ if (myid == 0)
+ deallog << "v has ghost elements: " << v.has_ghost_elements() << std::endl;
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> w, x;
+ w = v;
+ if (myid == 0)
+ deallog << "w has ghost elements: " << w.has_ghost_elements() << std::endl;
+
+ v.update_ghost_values();
+ w = v;
+ if (myid == 0)
+ deallog << "w has ghost elements: " << w.has_ghost_elements() << std::endl;
+
+ v.zero_out_ghosts();
+ w = v;
+ if (myid == 0)
+ deallog << "w has ghost elements: " << w.has_ghost_elements() << std::endl;
+
+ w.zero_out_ghosts();
+ w = v;
+ if (myid == 0)
+ deallog << "w has ghost elements: " << w.has_ghost_elements() << std::endl;
+
+ v.update_ghost_values();
+ x = v;
+ if (myid == 0)
+ deallog << "x has ghost elements: " << x.has_ghost_elements() << std::endl;
+
+ x.zero_out_ghosts();
+ if (myid == 0)
+ deallog << "x has ghost elements: " << x.has_ghost_elements() << std::endl;
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = myid % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ if (myid == 0)
+ {
+ initlog();
+ deallog << std::setprecision(4);
+
+ test();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:0::numproc=2
+DEAL:0::v has ghost elements: 0
+DEAL:0::w has ghost elements: 0
+DEAL:0::w has ghost elements: 1
+DEAL:0::w has ghost elements: 1
+DEAL:0::w has ghost elements: 0
+DEAL:0::x has ghost elements: 1
+DEAL:0::x has ghost elements: 0
+DEAL:0::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Check that the data range representing ghosts is really initialized to zero
+// when doing reinit() from another vector and manually setting the local
+// range
+
+#include <deal.II/base/cuda.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/read_write_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+__global__ void
+set(double *v)
+{
+ v[threadIdx.x] = 1.;
+}
+
+void
+test()
+{
+ unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ IndexSet locally_owned(n_procs * 2);
+ locally_owned.add_range(my_id * 2, my_id * 2 + 2);
+ IndexSet ghost_set(n_procs * 2);
+ ghost_set.add_index(0);
+ ghost_set.add_index(2);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v(
+ locally_owned, ghost_set, MPI_COMM_WORLD);
+
+ // create vector without actually setting the entries since they will be
+ // overwritten soon anyway
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v2;
+ v2.reinit(v, true);
+
+ // set locally owned range of v2 manually
+ set<<<1, v2.local_size()>>>(v2.get_values());
+
+ // add entries to ghost values
+ // Because of limitation in import, the IndexSet of the ReadWriteVector needs
+ // to have the local elements.
+ IndexSet workaround_set(locally_owned);
+ workaround_set.add_index(0);
+ workaround_set.add_index(2);
+ workaround_set.compress();
+ LinearAlgebra::ReadWriteVector<double> rw_vector(workaround_set);
+ rw_vector(0) += 1.;
+ rw_vector(2) += 1.;
+ v2.import(rw_vector, VectorOperation::add);
+
+ // now we should have the correct data, not some uninitialized trash that
+ // resided in the ghost range
+ v2.print(deallog.get_file_stream());
+
+ v2.update_ghost_values();
+ v2.print(deallog.get_file_stream());
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ Utilities::CUDA::Handle cuda_handle;
+ // By default, all the ranks will try to access the device 0. This is fine if
+ // we have one rank per node _and_ one gpu per node. If we have multiple GPUs
+ // on one node, we need each process to access a different GPU. We assume that
+ // each node has the same number of GPUs.
+ int n_devices = 0;
+ cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+ AssertCuda(cuda_error_code);
+ int device_id = my_id % n_devices;
+ cuda_error_code = cudaSetDevice(device_id);
+ AssertCuda(cuda_error_code);
+
+ MPILogInitAll log;
+ test();
+}
--- /dev/null
+
+Process #0
+Local range: [0, 2), global size: 4
+Vector data:
+3.000e+00 1.000e+00
+Process #0
+Local range: [0, 2), global size: 4
+Vector data:
+3.000e+00 1.000e+00
+Ghost entries (global index / value):
+(2/3.000e+00)
+
+Process #1
+Local range: [2, 4), global size: 4
+Vector data:
+3.000e+00 1.000e+00
+Process #1
+Local range: [2, 4), global size: 4
+Vector data:
+3.000e+00 1.000e+00
+Ghost entries (global index / value):
+(0/3.000e+00)
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check that LinearAlgebra::distributed::Vector::reinit does not carry over any
+// state that can lead to invalid memory access. In this test, the MPI
+// communicator is deleted.
+
+
+#include <deal.II/base/mpi.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.templates.h>
+#include <deal.II/lac/vector_memory.h>
+#include <deal.II/lac/vector_memory.templates.h>
+
+#include "../tests.h"
+
+template <typename VectorType>
+void
+do_test()
+{
+ IndexSet set(5);
+ set.add_range(0, 5);
+
+ VectorType v1, v2;
+
+ {
+ MPI_Comm communicator =
+ Utilities::MPI::duplicate_communicator(MPI_COMM_WORLD);
+ v1.reinit(set, communicator);
+ deallog << "reinit: " << v1.size() << " ";
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Comm_free(&communicator);
+#endif
+ }
+
+ {
+ MPI_Comm communicator =
+ Utilities::MPI::duplicate_communicator(MPI_COMM_WORLD);
+ v2.reinit(set, communicator);
+ v1.reinit(v2);
+ deallog << v1.size() << " ";
+ v1.reinit(v2);
+ deallog << v1.size() << std::endl;
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Comm_free(&communicator);
+#endif
+ }
+
+ {
+ MPI_Comm communicator =
+ Utilities::MPI::duplicate_communicator(MPI_COMM_WORLD);
+ v2.reinit(set, communicator);
+ v1 = v2;
+ deallog << "assign " << v1.size() << " ";
+ v1 = v2;
+ deallog << v1.size() << std::endl;
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Comm_free(&communicator);
+#endif
+ }
+
+ {
+ MPI_Comm communicator =
+ Utilities::MPI::duplicate_communicator(MPI_COMM_WORLD);
+ GrowingVectorMemory<VectorType> memory;
+ typename VectorMemory<VectorType>::Pointer v3(memory);
+ v1.reinit(set, communicator);
+ v3->reinit(v1);
+ deallog << "reinit pool " << v1.size() << " " << v3->size() << " ";
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Comm_free(&communicator);
+#endif
+ }
+
+ {
+ MPI_Comm communicator =
+ Utilities::MPI::duplicate_communicator(MPI_COMM_WORLD);
+ GrowingVectorMemory<VectorType> memory;
+ typename VectorMemory<VectorType>::Pointer v3(memory);
+ v1.reinit(set, communicator);
+ v3->reinit(v1);
+ deallog << "reinit pool " << v3->size() << std::endl;
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Comm_free(&communicator);
+#endif
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+ Utilities::CUDA::Handle cuda_handle;
+
+ initlog();
+
+ do_test<
+ LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::CUDA>>();
+}
--- /dev/null
+
+DEAL::reinit: 5 5 5
+DEAL::assign 5 5
+DEAL::reinit pool 5 5 reinit pool 5