From 6bd9aa8a00566aa992eacce7d807b645707f11a2 Mon Sep 17 00:00:00 2001 From: bangerth Date: Sat, 25 May 2013 12:46:37 +0000 Subject: [PATCH] New tests. git-svn-id: https://svn.dealii.org/trunk@29594 0785d39b-7218-0410-832d-ea1e28bc413d --- tests/mpi/petsc_distribute_01_block.cc | 199 +++++++++++++++++ .../ncpu_1/cmp/generic | 2 + .../ncpu_10/cmp/generic | 2 + .../ncpu_2/cmp/generic | 2 + .../ncpu_4/cmp/generic | 2 + tests/mpi/trilinos_distribute_01_block.cc | 203 ++++++++++++++++++ .../ncpu_1/cmp/generic | 2 + .../ncpu_10/cmp/generic | 2 + .../ncpu_2/cmp/generic | 2 + .../ncpu_4/cmp/generic | 2 + 10 files changed, 418 insertions(+) create mode 100644 tests/mpi/petsc_distribute_01_block.cc create mode 100644 tests/mpi/petsc_distribute_01_block/ncpu_1/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_block/ncpu_10/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_block/ncpu_2/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_block/ncpu_4/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_block.cc create mode 100644 tests/mpi/trilinos_distribute_01_block/ncpu_1/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_block/ncpu_10/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_block/ncpu_2/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_block/ncpu_4/cmp/generic diff --git a/tests/mpi/petsc_distribute_01_block.cc b/tests/mpi/petsc_distribute_01_block.cc new file mode 100644 index 0000000000..996d6ae2bf --- /dev/null +++ b/tests/mpi/petsc_distribute_01_block.cc @@ -0,0 +1,199 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + + +// check ConstraintMatrix.distribute() for a petsc vector +// +// like _01, but for a block vector. this has the additional complication that +// (at a global level) the set of indices owned by this processor is not +// contiguous + +#include "../tests.h" +#include +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + PETScWrappers::MPI::BlockVector vec(2, MPI_COMM_WORLD, 100 * n_processes, 100); + vec.block(0).reinit(MPI_COMM_WORLD, 100 * n_processes, 100); + vec.block(1).reinit(MPI_COMM_WORLD, 100 * n_processes, 100); + vec.collect_sizes(); + Assert (vec.block(0).local_size() == 100, ExcInternalError()); + Assert (vec.block(0).local_range().first == 100*myid, ExcInternalError()); + Assert (vec.block(0).local_range().second == 100*myid+100, ExcInternalError()); + Assert (vec.block(1).local_size() == 100, ExcInternalError()); + Assert (vec.block(1).local_range().first == 100*myid, ExcInternalError()); + Assert (vec.block(1).local_range().second == 100*myid+100, ExcInternalError()); + + for (unsigned int i=vec.block(0).local_range().first; i (100*myid-50, 0), + std::min (100*myid+150, vec.block(0).size())); + locally_relevant_range.add_range (vec.block(0).size()+std::max (100*myid-50, 0), + vec.block(0).size()+std::min (100*myid+150, vec.block(0).size())); + ConstraintMatrix cm (locally_relevant_range); + + // add constraints that constrain an element in the middle of the + // local range of each processor against an element outside, both in + // the ghost range before and after + // + // note that we tell each processor about all constraints, but most + // of them will throw away this information since it is not for a + // DoF inside the locally relevant range + for (unsigned int p=0; p +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + TrilinosWrappers::MPI::BlockVector vec(2); + { + IndexSet is (100*n_processes); + is.add_range (100*myid, 100*myid+100); + vec.block(0).reinit (is, MPI_COMM_WORLD); + vec.block(1).reinit (is, MPI_COMM_WORLD); + } + vec.collect_sizes(); + Assert (vec.block(0).local_size() == 100, ExcInternalError()); + Assert (vec.block(0).local_range().first == 100*myid, ExcInternalError()); + Assert (vec.block(0).local_range().second == 100*myid+100, ExcInternalError()); + Assert (vec.block(1).local_size() == 100, ExcInternalError()); + Assert (vec.block(1).local_range().first == 100*myid, ExcInternalError()); + Assert (vec.block(1).local_range().second == 100*myid+100, ExcInternalError()); + + for (unsigned int i=vec.block(0).local_range().first; i (100*myid-50, 0), + std::min (100*myid+150, vec.block(0).size())); + locally_relevant_range.add_range (vec.block(0).size()+std::max (100*myid-50, 0), + vec.block(0).size()+std::min (100*myid+150, vec.block(0).size())); + ConstraintMatrix cm (locally_relevant_range); + + // add constraints that constrain an element in the middle of the + // local range of each processor against an element outside, both in + // the ghost range before and after + // + // note that we tell each processor about all constraints, but most + // of them will throw away this information since it is not for a + // DoF inside the locally relevant range + for (unsigned int p=0; p