From a948cf363b278d8d15a15551690196cd1755e8a9 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 4 Oct 2015 15:05:19 -0400 Subject: [PATCH] astyle --- examples/step-16/step-16.cc | 4 +- examples/step-50/step-50.cc | 26 ++-- include/deal.II/meshworker/simple.h | 16 +-- .../deal.II/multigrid/mg_constrained_dofs.h | 18 +-- .../deal.II/multigrid/mg_transfer.templates.h | 6 +- source/dofs/dof_tools.cc | 12 +- source/multigrid/mg_tools.cc | 8 +- source/multigrid/mg_transfer_prebuilt.cc | 33 ++--- tests/multigrid/constrained_dofs_01.cc | 113 +++++++++--------- tests/multigrid/transfer_04.cc | 66 +++++----- 10 files changed, 152 insertions(+), 150 deletions(-) diff --git a/examples/step-16/step-16.cc b/examples/step-16/step-16.cc index 921c1c7fb1..b94d2d7282 100644 --- a/examples/step-16/step-16.cc +++ b/examples/step-16/step-16.cc @@ -658,8 +658,8 @@ int main (int argc, char *argv[]) try { using namespace Step16; - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, - numbers::invalid_unsigned_int); + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, + numbers::invalid_unsigned_int); LaplaceProblem<2> laplace_problem(1); laplace_problem.run (); diff --git a/examples/step-50/step-50.cc b/examples/step-50/step-50.cc index 8573b59089..205ef37210 100644 --- a/examples/step-50/step-50.cc +++ b/examples/step-50/step-50.cc @@ -626,7 +626,7 @@ namespace Step50 boundary_constraints[level].close (); boundary_interface_constraints[level] - .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]); + .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]); boundary_interface_constraints[level].close (); } @@ -719,9 +719,9 @@ namespace Step50 for (unsigned int i=0; ilevel(),local_dof_indices[i]) - || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */ + /** old HEAD: + if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i]) + || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */ if ( !(interface_dofs_on_level.is_element(local_dof_indices[i])==true && interface_dofs_on_level.is_element(local_dof_indices[j])==false)) cell_matrix(i,j) = 0; @@ -864,7 +864,7 @@ namespace Step50 PreconditionMG > preconditioner(mg_dof_handler, mg, mg_transfer); - + // With all this together, we can finally // get about solving the linear system in // the usual way: @@ -923,7 +923,7 @@ namespace Step50 template void LaplaceProblem::refine_grid () { - + Vector estimated_error_per_cell (triangulation.n_active_cells()); TrilinosWrappers::MPI::Vector temp_solution; @@ -941,7 +941,7 @@ namespace Step50 estimated_error_per_cell, 0.3, 0.0); - + triangulation.prepare_coarsening_and_refinement (); triangulation.execute_coarsening_and_refinement (); } @@ -1005,8 +1005,8 @@ namespace Step50 std::ofstream visit_master (visit_master_filename.c_str()); data_out.write_visit_record (visit_master, filenames); - std::cout << "wrote " << pvtu_master_filename << std::endl; - + std::cout << "wrote " << pvtu_master_filename << std::endl; + } } @@ -1059,10 +1059,10 @@ namespace Step50 solve (); output_results (cycle); - TrilinosWrappers::MPI::Vector temp = solution; - system_matrix.residual(temp,solution,system_rhs); - constraints.set_zero(temp); - deallog << "residual " << temp.l2_norm() << std::endl; + TrilinosWrappers::MPI::Vector temp = solution; + system_matrix.residual(temp,solution,system_rhs); + constraints.set_zero(temp); + deallog << "residual " << temp.l2_norm() << std::endl; } } } diff --git a/include/deal.II/meshworker/simple.h b/include/deal.II/meshworker/simple.h index ca95d81cf7..e4ceda1cb9 100644 --- a/include/deal.II/meshworker/simple.h +++ b/include/deal.II/meshworker/simple.h @@ -951,11 +951,11 @@ namespace MeshWorker if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) && !mg_constrained_dofs->at_refinement_edge(level, i2[k])) { - if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) && - !mg_constrained_dofs->is_boundary_index(level, i2[k])) + if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) && + !mg_constrained_dofs->is_boundary_index(level, i2[k])) || - (mg_constrained_dofs->is_boundary_index(level, i1[j]) && - mg_constrained_dofs->is_boundary_index(level, i2[k]) && + (mg_constrained_dofs->is_boundary_index(level, i1[j]) && + mg_constrained_dofs->is_boundary_index(level, i2[k]) && i1[j] == i2[k])) G.add(i1[j], i2[k], M(j,k)); } @@ -981,11 +981,11 @@ namespace MeshWorker if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) && !mg_constrained_dofs->at_refinement_edge(level, i2[k])) { - if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) && - !mg_constrained_dofs->is_boundary_index(level, i2[k])) + if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) && + !mg_constrained_dofs->is_boundary_index(level, i2[k])) || - (mg_constrained_dofs->is_boundary_index(level, i1[j]) && - mg_constrained_dofs->is_boundary_index(level, i2[k]) && + (mg_constrained_dofs->is_boundary_index(level, i1[j]) && + mg_constrained_dofs->is_boundary_index(level, i2[k]) && i1[j] == i2[k])) G.add(i1[j], i2[k], M(k,j)); } diff --git a/include/deal.II/multigrid/mg_constrained_dofs.h b/include/deal.II/multigrid/mg_constrained_dofs.h index 4f4725d6c9..9b4129bc11 100644 --- a/include/deal.II/multigrid/mg_constrained_dofs.h +++ b/include/deal.II/multigrid/mg_constrained_dofs.h @@ -247,15 +247,15 @@ const std::vector > & MGConstrainedDoFs::get_boundary_indices () const { if (boundary_indices_old.size()!=boundary_indices.size()) - { - boundary_indices_old.resize(boundary_indices.size()); - for (unsigned int l=0;l tmp; - boundary_indices[l].fill_index_vector(tmp); - boundary_indices_old[l].insert(tmp.begin(), tmp.end()); - } - } + { + boundary_indices_old.resize(boundary_indices.size()); + for (unsigned int l=0; l tmp; + boundary_indices[l].fill_index_vector(tmp); + boundary_indices_old[l].insert(tmp.begin(), tmp.end()); + } + } return boundary_indices_old; } diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 5f56a9be9c..57fc78eba6 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -195,9 +195,9 @@ MGTransferPrebuilt::copy_to_mg ( i != copy_indices[level].end(); ++i) dst_level(i->second) = src(i->first); - for (IT i= copy_indices_global_mine[level].begin(); - i != copy_indices_global_mine[level].end(); ++i) - dst_level(i->second) = src(i->first); + for (IT i= copy_indices_global_mine[level].begin(); + i != copy_indices_global_mine[level].end(); ++i) + dst_level(i->second) = src(i->first); dst_level.compress(VectorOperation::insert); #ifdef DEBUG_OUTPUT diff --git a/source/dofs/dof_tools.cc b/source/dofs/dof_tools.cc index c57dc684d5..ae287fb9f3 100644 --- a/source/dofs/dof_tools.cc +++ b/source/dofs/dof_tools.cc @@ -976,8 +976,8 @@ namespace DoFTools template void extract_locally_relevant_mg_dofs (const DH &dof_handler, - IndexSet &dof_set, - unsigned int level) + IndexSet &dof_set, + unsigned int level) { // collect all the locally owned dofs dof_set = dof_handler.locally_owned_mg_dofs(level); @@ -989,7 +989,7 @@ namespace DoFTools std::set global_dof_indices; typename DH::cell_iterator cell = dof_handler.begin(level), - endc = dof_handler.end(level); + endc = dof_handler.end(level); for (; cell!=endc; ++cell) { types::subdomain_id id = cell->level_subdomain_id(); @@ -1003,11 +1003,11 @@ namespace DoFTools cell->get_mg_dof_indices(dof_indices); for (std::vector::iterator it=dof_indices.begin(); - it!=dof_indices.end(); - ++it) + it!=dof_indices.end(); + ++it) if (!dof_set.is_element(*it)) global_dof_indices.insert(*it); - } + } dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end()); diff --git a/source/multigrid/mg_tools.cc b/source/multigrid/mg_tools.cc index 44f3faf42c..81c8df6ac7 100644 --- a/source/multigrid/mg_tools.cc +++ b/source/multigrid/mg_tools.cc @@ -1513,7 +1513,7 @@ namespace MGTools { // do not look at artificial cells if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id - && cell->level_subdomain_id()==numbers::artificial_subdomain_id) + && cell->level_subdomain_id()==numbers::artificial_subdomain_id) continue; bool has_coarser_neighbor = false; @@ -1531,13 +1531,13 @@ namespace MGTools // only process cell pairs if one of them is mine if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id - && + && neighbor->level_subdomain_id()==numbers::artificial_subdomain_id) // neighbor->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain() -// && +// && // cell->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain()) continue; - + // Do refinement face // from the coarse side if (neighbor->level() < cell->level()) diff --git a/source/multigrid/mg_transfer_prebuilt.cc b/source/multigrid/mg_transfer_prebuilt.cc index c237e01cc0..3e05337ae7 100644 --- a/source/multigrid/mg_transfer_prebuilt.cc +++ b/source/multigrid/mg_transfer_prebuilt.cc @@ -284,7 +284,7 @@ void MGTransferPrebuilt::build_matrices ( // We keep track in the bitfield dof_touched which global dof has // been processed already (on the current level). This is the same as // the multigrid running in serial. - + struct dof_pair { unsigned int level; @@ -293,7 +293,7 @@ void MGTransferPrebuilt::build_matrices ( dof_pair(unsigned int level, unsigned int global_dof_index, unsigned int level_dof_index) : - level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index) + level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index) {} dof_pair() @@ -303,7 +303,7 @@ void MGTransferPrebuilt::build_matrices ( // map cpu_index -> vector of data // that will be copied into copy_indices_level_mine std::vector send_data_temp; - + copy_indices.resize(n_levels); copy_indices_global_mine.resize(n_levels); copy_indices_level_mine.resize(n_levels); @@ -355,12 +355,12 @@ void MGTransferPrebuilt::build_matrices ( if (global_mine && level_mine) { copy_indices[level].push_back( - std::make_pair (global_dof_indices[i], level_dof_indices[i])); + std::make_pair (global_dof_indices[i], level_dof_indices[i])); } - else if(global_mine) + else if (global_mine) { copy_indices_global_mine[level].push_back( - std::make_pair (global_dof_indices[i], level_dof_indices[i])); + std::make_pair (global_dof_indices[i], level_dof_indices[i])); //send this to the owner of the level_dof: send_data_temp.push_back(dof_pair(level, global_dof_indices[i], level_dof_indices[i])); @@ -374,7 +374,7 @@ void MGTransferPrebuilt::build_matrices ( } } } - + const dealii::parallel::distributed::Triangulation *tria = (dynamic_cast*> (&mg_dof.get_tria())); @@ -386,6 +386,7 @@ void MGTransferPrebuilt::build_matrices ( // neighbors, so we communicate with every other process. Searching the // owner for every single DoF becomes quite inefficient. Please fix // this, Timo. + std::vector neighbors; std::map > send_data; @@ -394,13 +395,13 @@ void MGTransferPrebuilt::build_matrices ( // come from Triangulation int n_proc = Utilities::MPI::n_mpi_processes(tria->get_communicator()); int myid = tria->locally_owned_subdomain(); - for (unsigned int i=0;i::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair) + for (typename std::vector::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair) { for (std::vector::iterator it = neighbors.begin(); it != neighbors.end(); ++it) { @@ -419,7 +420,7 @@ void MGTransferPrebuilt::build_matrices ( { requests.push_back(MPI_Request()); unsigned int dest = *it; - std::vector & data = send_data[dest]; + std::vector &data = send_data[dest]; if (data.size()) MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); else @@ -440,8 +441,8 @@ void MGTransferPrebuilt::build_matrices ( if (len==0) { int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - Assert(err==MPI_SUCCESS, ExcInternalError()); + tria->get_communicator(), &status); + AssertThrow(err==MPI_SUCCESS, ExcInternalError()); continue; } @@ -451,14 +452,14 @@ void MGTransferPrebuilt::build_matrices ( void *ptr = &receive[0]; int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, - tria->get_communicator(), &status); - Assert(err==MPI_SUCCESS, ExcInternalError()); + tria->get_communicator(), &status); + AssertThrow(err==MPI_SUCCESS, ExcInternalError()); for (unsigned int i=0; i (receive[i].global_dof_index, receive[i].level_dof_index) - ); + std::pair (receive[i].global_dof_index, receive[i].level_dof_index) + ); } } } diff --git a/tests/multigrid/constrained_dofs_01.cc b/tests/multigrid/constrained_dofs_01.cc index b9d7be377e..59c2ab358f 100644 --- a/tests/multigrid/constrained_dofs_01.cc +++ b/tests/multigrid/constrained_dofs_01.cc @@ -42,24 +42,24 @@ using namespace std; - std::string id_to_string(const CellId &id) - { - std::ostringstream ss; - ss << id; - return ss.str(); - } +std::string id_to_string(const CellId &id) +{ + std::ostringstream ss; + ss << id; + return ss.str(); +} template void setup_tria(parallel::distributed::Triangulation &tr) { GridGenerator::hyper_cube(tr); tr.refine_global(2); - + for (typename parallel::distributed::Triangulation::active_cell_iterator cell = tr.begin_active(); cell != tr.end(); ++cell) { if (id_to_string(cell->id()) == "0_2:11") - cell->set_refine_flag(); + cell->set_refine_flag(); } tr.execute_coarsening_and_refinement(); } @@ -83,49 +83,50 @@ void check_fe(FiniteElement &fe) dofh.distribute_mg_dofs(fe); MGConstrainedDoFs mg_constrained_dofs_ref; - { // reorder - parallel::distributed::Triangulation tr(MPI_COMM_SELF, - Triangulation::none, - parallel::distributed::Triangulation::construct_multigrid_hierarchy); - setup_tria(tr); - - DoFHandler dofhref(tr); - dofhref.distribute_dofs(fe); - dofhref.distribute_mg_dofs(fe); + { + // reorder + parallel::distributed::Triangulation tr(MPI_COMM_SELF, + Triangulation::none, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + setup_tria(tr); + + DoFHandler dofhref(tr); + dofhref.distribute_dofs(fe); + dofhref.distribute_mg_dofs(fe); + + //std::map > dofmap; + std::map > mgdofmap; + + for (typename DoFHandler::level_cell_iterator cell = dofhref.begin(); + cell != dofhref.end(); ++cell) + { + if (!cell->is_locally_owned_on_level()) + continue; + + std::vector &d = mgdofmap[id_to_string(cell->id())]; + d.resize(fe.dofs_per_cell); + cell->get_mg_dof_indices(d); + } + + for (typename DoFHandler::level_cell_iterator cell = dofh.begin(); + cell != dofh.end(); ++cell) + { + if (cell->level_subdomain_id()==numbers::artificial_subdomain_id) + continue; + + std::vector &renumbered = mgdofmap[id_to_string(cell->id())]; + cell->set_mg_dof_indices(renumbered); + cell->update_cell_dof_indices_cache(); + } + + typename FunctionMap::type dirichlet_boundary; + ZeroFunction homogeneous_dirichlet_bc (1); + dirichlet_boundary[0] = &homogeneous_dirichlet_bc; + mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary); + } - //std::map > dofmap; - std::map > mgdofmap; - for (typename DoFHandler::level_cell_iterator cell = dofhref.begin(); - cell != dofhref.end(); ++cell) - { - if (!cell->is_locally_owned_on_level()) - continue; - std::vector &d = mgdofmap[id_to_string(cell->id())]; - d.resize(fe.dofs_per_cell); - cell->get_mg_dof_indices(d); - } - - for (typename DoFHandler::level_cell_iterator cell = dofh.begin(); - cell != dofh.end(); ++cell) - { - if (cell->level_subdomain_id()==numbers::artificial_subdomain_id) - continue; - - std::vector &renumbered = mgdofmap[id_to_string(cell->id())]; - cell->set_mg_dof_indices(renumbered); - cell->update_cell_dof_indices_cache(); - } - - typename FunctionMap::type dirichlet_boundary; - ZeroFunction homogeneous_dirichlet_bc (1); - dirichlet_boundary[0] = &homogeneous_dirichlet_bc; - mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary); - } - - - MGConstrainedDoFs mg_constrained_dofs; typename FunctionMap::type dirichlet_boundary; @@ -141,25 +142,25 @@ void check_fe(FiniteElement &fe) IndexSet rei = mg_constrained_dofs.get_refinement_edge_indices (level); deallog << "get_refinement_edge_indices:" << std::endl; rei.print(deallog); - + IndexSet bi = mg_constrained_dofs.get_boundary_indices (level); deallog << "get_boundary_indices:" << std::endl; bi.print(deallog); IndexSet relevant; DoFTools::extract_locally_relevant_mg_dofs (dofh, - relevant, level); + relevant, level); deallog << "relevant:" << std::endl; relevant.print(deallog); // the indexsets should be the same when run in parallel (on the // relevant subset): deallog << ((rei == (relevant & mg_constrained_dofs_ref.get_refinement_edge_indices(level))) - ?"ok ":"FAIL ") - << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level))) - ?"ok ":"FAIL ") - << std::endl; - + ?"ok ":"FAIL ") + << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level))) + ?"ok ":"FAIL ") + << std::endl; + } } @@ -180,7 +181,7 @@ void check() } int main(int argc, char *argv[]) -{ +{ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; diff --git a/tests/multigrid/transfer_04.cc b/tests/multigrid/transfer_04.cc index f8a53a58f8..7912c169e5 100644 --- a/tests/multigrid/transfer_04.cc +++ b/tests/multigrid/transfer_04.cc @@ -48,45 +48,45 @@ using namespace std; - std::string id_to_string(const CellId &id) - { - std::ostringstream ss; - ss << id; - return ss.str(); - } +std::string id_to_string(const CellId &id) +{ + std::ostringstream ss; + ss << id; + return ss.str(); +} template void setup_tria(parallel::distributed::Triangulation &tr) { GridGenerator::hyper_cube(tr); tr.refine_global(2); - + for (typename parallel::distributed::Triangulation::active_cell_iterator cell = tr.begin_active(); cell != tr.end(); ++cell) { if (id_to_string(cell->id()) == "0_2:03" - || id_to_string(cell->id()) == "0_2:00" - || id_to_string(cell->id()) == "0_2:01" - || id_to_string(cell->id()) == "0_2:12") - cell->set_refine_flag(); + || id_to_string(cell->id()) == "0_2:00" + || id_to_string(cell->id()) == "0_2:01" + || id_to_string(cell->id()) == "0_2:12") + cell->set_refine_flag(); } tr.execute_coarsening_and_refinement(); for (typename parallel::distributed::Triangulation::active_cell_iterator cell = tr.begin_active(); cell != tr.end(); ++cell) { if (id_to_string(cell->id()) == "0_3:032" - || id_to_string(cell->id()) == "0_3:000") - cell->set_refine_flag(); + || id_to_string(cell->id()) == "0_3:000") + cell->set_refine_flag(); } tr.execute_coarsening_and_refinement(); - + for (typename parallel::distributed::Triangulation::cell_iterator cell = tr.begin(); cell != tr.end(); ++cell) { deallog << "cell=" << cell->id() - << " level_subdomain_id=" << cell->level_subdomain_id() - << std::endl; + << " level_subdomain_id=" << cell->level_subdomain_id() + << std::endl; } } @@ -103,7 +103,7 @@ void check_fe(FiniteElement &fe) { deallog << fe.get_name() << std::endl; - parallel::distributed::Triangulation tr(MPI_COMM_WORLD, + parallel::distributed::Triangulation tr(MPI_COMM_WORLD, Triangulation::none, parallel::distributed::Triangulation::construct_multigrid_hierarchy); setup_tria(tr); @@ -113,14 +113,14 @@ void check_fe(FiniteElement &fe) DataOut data_out; Vector subdomain (tr.n_active_cells()); for (unsigned int i=0; i &fe) ConstraintMatrix hanging_node_constraints; IndexSet locally_relevant_set; DoFTools::extract_locally_relevant_dofs (dofh, - locally_relevant_set); + locally_relevant_set); hanging_node_constraints.reinit (locally_relevant_set); DoFTools::make_hanging_node_constraints (dofh, hanging_node_constraints); hanging_node_constraints.close(); @@ -149,14 +149,14 @@ void check_fe(FiniteElement &fe) for (unsigned int level=u.min_level(); level<=u.max_level(); ++level) { u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD); - for (unsigned int i=0;i &fe) hanging_node_constraints.distribute(v); { - for (unsigned int i=0;i1e-5) - deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl; + unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i); + if (abs(v[index] - 1.0)>1e-5) + deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl; } } deallog << "ok" << std::endl; @@ -190,7 +190,7 @@ void check() } int main(int argc, char *argv[]) -{ +{ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; -- 2.39.5