From: heister Date: Wed, 24 Aug 2011 20:17:44 +0000 (+0000) Subject: test for ConstraintMatrix with hanging nodes and no_normal_flux being X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2ac6ae565beafebb3997513cbbfda73363114003;p=dealii-svn.git test for ConstraintMatrix with hanging nodes and no_normal_flux being consistent in distributed computation: git-svn-id: https://svn.dealii.org/trunk@24185 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/tests/mpi/no_flux_constraints_03.cc b/tests/mpi/no_flux_constraints_03.cc new file mode 100644 index 0000000000..55e04eb3de --- /dev/null +++ b/tests/mpi/no_flux_constraints_03.cc @@ -0,0 +1,198 @@ +//--------------------------------------------------------------------------- +// $Id: no_flux_constraints.cc 24175 2011-08-24 12:14:21Z kronbichler $ +// Version: $Name$ +// +// Copyright (C) 2009, 2010 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + + +// check that the ConstraintMatrix with hanging nodes and no-normal-flux +// constraints on an adaptively refined hyper_cube are the same independet +// of the number of CPUs + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +template +void test() +{ + Assert (dim == 3, ExcNotImplemented()); + unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD); + unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD); + + parallel::distributed::Triangulation triangulation(MPI_COMM_WORLD); + + GridGenerator::hyper_cube (triangulation, -1.0, 1.0); + triangulation.refine_global (3); + + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + if (!cell->is_ghost() && !cell->is_artificial()) + if (cell->center().norm() < 0.3) + { + cell->set_refine_flag(); + } + + triangulation.prepare_coarsening_and_refinement(); + triangulation.execute_coarsening_and_refinement(); + + if (myid == 0) + deallog << "#cells = " << triangulation.n_global_active_cells() + << std::endl; + + // create FE_System and fill in no-normal flux + // conditions on boundary 1 (outer) + static const FESystem fe(FE_Q (1), dim); + DoFHandler dofh(triangulation); + dofh.distribute_dofs (fe); + DoFRenumbering::hierarchical(dofh); + + if (myid == 0) + deallog << "#dofs = " << dofh.locally_owned_dofs().size() + << std::endl; + + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dofh, relevant_set); + + ConstraintMatrix constraints; + constraints.reinit(relevant_set); + DoFTools::make_hanging_node_constraints (dofh, constraints); + std::set no_normal_flux_boundaries; + no_normal_flux_boundaries.insert (0); + const unsigned int degree = 1; + VectorTools::compute_no_normal_flux_constraints (dofh, 0, + no_normal_flux_boundaries, + constraints, + MappingQ(degree)); + constraints.close(); + + if (myid==0) + system("rm -rf no_flux_constraints_03/cm_?.dot"); + + MPI_Barrier(MPI_COMM_WORLD); + + { //write the constraintmatrix to a file on each cpu + char fname[] = "no_flux_constraints_03/cm_0.dot"; + fname[26]+=myid; + std::ofstream file(fname); + constraints.print(file); + } + MPI_Barrier(MPI_COMM_WORLD); + sleep(1); + if (myid==0) + { + //sort and merge the constraint matrices on proc 0, generate a checksum + //and output that into the deallog + system("cat no_flux_constraints_03/cm_?.dot|sort -n|uniq >no_flux_constraints_03/cm"); + system("md5sum no_flux_constraints_03/cm >no_flux_constraints_03/cm.check"); + { + std::ifstream file("no_flux_constraints_03/cm.check"); + std::string str; + while (!file.eof()) + { + std::getline(file, str); + deallog << str << std::endl; + } + } + } + + // print the number of constraints. since + // processors might write info in different + // orders, copy all numbers to root processor + std::vector n_constraints_glob (numprocs); + unsigned int n_constraints = constraints.n_constraints(); + MPI_Gather (&n_constraints, 1, MPI_UNSIGNED, + &n_constraints_glob[0], 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD); + if (myid == 0) + for (unsigned int i=0; i local_dof_indices (dofs_per_cell); + Vector local_vector (dofs_per_cell); + for (unsigned int i=0; i::active_cell_iterator + cell = dofh.begin_active(), + endc = dofh.end(); + for (; cell!=endc; ++cell) + if (cell->subdomain_id() == triangulation.locally_owned_subdomain()) + { + cell->get_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (local_vector, + local_dof_indices, + vector); + } + vector.compress (Add); + } + + // now check that no entries were generated + // for constrained entries on the locally + // owned range. + const std::pair range = vector.local_range(); + for (unsigned int i=range.first; i(); + deallog.pop(); + } + else + test<3>(); + } + + return 0; +} diff --git a/tests/mpi/no_flux_constraints_03/ncpu_1/cmp/generic b/tests/mpi/no_flux_constraints_03/ncpu_1/cmp/generic new file mode 100644 index 0000000000..9e98fde012 --- /dev/null +++ b/tests/mpi/no_flux_constraints_03/ncpu_1/cmp/generic @@ -0,0 +1,7 @@ + +DEAL:0:3d::#cells = 568 +DEAL:0:3d::#dofs = 2481 +DEAL:0:3d::17a5cf8e1f66b693f8b54bf284cab4ac no_flux_constraints_03/cm +DEAL:0:3d:: +DEAL:0:3d::#constraints on 0: 702 +DEAL:0:3d::OK diff --git a/tests/mpi/no_flux_constraints_03/ncpu_9/cmp/generic b/tests/mpi/no_flux_constraints_03/ncpu_9/cmp/generic new file mode 100644 index 0000000000..b03490e36b --- /dev/null +++ b/tests/mpi/no_flux_constraints_03/ncpu_9/cmp/generic @@ -0,0 +1,15 @@ + +DEAL:0:3d::#cells = 568 +DEAL:0:3d::#dofs = 2481 +DEAL:0:3d::17a5cf8e1f66b693f8b54bf284cab4ac no_flux_constraints_03/cm +DEAL:0:3d:: +DEAL:0:3d::#constraints on 0: 198 +DEAL:0:3d::#constraints on 1: 221 +DEAL:0:3d::#constraints on 2: 266 +DEAL:0:3d::#constraints on 3: 233 +DEAL:0:3d::#constraints on 4: 300 +DEAL:0:3d::#constraints on 5: 233 +DEAL:0:3d::#constraints on 6: 266 +DEAL:0:3d::#constraints on 7: 221 +DEAL:0:3d::#constraints on 8: 198 +DEAL:0:3d::OK