From: bangerth Date: Tue, 21 May 2013 03:52:02 +0000 (+0000) Subject: Add a test. This doesn't currently work, but I'd like to continue debugging on a... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d6c7096274bff65d5802165d6e8cd297d486fdda;p=dealii-svn.git Add a test. This doesn't currently work, but I'd like to continue debugging on a system where this is faster... git-svn-id: https://svn.dealii.org/trunk@29530 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/tests/mpi/petsc_condense_01.cc b/tests/mpi/petsc_condense_01.cc new file mode 100644 index 0000000000..001b6e98e1 --- /dev/null +++ b/tests/mpi/petsc_condense_01.cc @@ -0,0 +1,167 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + + +// check ConstraintMatrix.distribute() for a petsc vector +// +// we do this by creating a vector where each processor has 100 +// elements but no ghost elements. then we add constraints on each +// processor that constrain elements within each processor's local +// range to ones outside. these have to be added on all +// processors. then call distribute() and verify that the result is +// true. +// +// we use constraints of the form x_i = x_j with sequentially growing +// x_j's so that we can verify the correctness analytically + +#include "../tests.h" +#include +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + PETScWrappers::MPI::Vector vec (MPI_COMM_WORLD, 100 * n_processes, 100); + Assert (vec.local_size() == 100, ExcInternalError()); + Assert (vec.local_range().first == 100*myid, ExcInternalError()); + Assert (vec.local_range().second == 100*myid+100, ExcInternalError()); + for (unsigned int i=vec.local_range().first; i (100*myid-50, 0), + std::min (100*myid+150, vec.size())); + ConstraintMatrix cm (locally_relevant_range); + + // add constraints that constrain an element in the middle of the + // local range of each processor against an element outside, both in + // the ghost range before and after + // + // note that we tell each processor about all constraints, but most + // of them will throw away this information since it is not for a + // DoF inside the locally relevant range + for (unsigned int p=0; p