From e0144f03c9f3d0ebd90f262943af24aaa94fca92 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 5 Jul 2015 13:00:25 -0400 Subject: [PATCH] communicate copy_indices We need to do a parallel communication step for copy_indices in MGTransfer. Simply make it work for now. --- include/deal.II/multigrid/mg_transfer.h | 4 +- .../deal.II/multigrid/mg_transfer.templates.h | 109 +++++------ source/multigrid/mg_transfer_prebuilt.cc | 179 +++++++++++++++--- 3 files changed, 197 insertions(+), 95 deletions(-) diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index 26c4608084..5a4d9f720c 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -266,7 +266,7 @@ private: * Organization of the data is like for @p copy_indices_mine. */ std::vector > > - copy_indices_to_me; + copy_indices_global_mine; /** * Additional degrees of freedom for the copy_from_mg() function. These are @@ -276,7 +276,7 @@ private: * Organization of the data is like for @p copy_indices_mine. */ std::vector > > - copy_indices_from_me; + copy_indices_level_mine; /** diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index c9f61fc135..67fbbbd17f 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -28,6 +28,9 @@ #include +// Here you can turn on some cout statements and MPI Barriers for debugging: +//#define DEBUG_OUTPUT + DEAL_II_NAMESPACE_OPEN @@ -142,46 +145,42 @@ MGTransferPrebuilt::copy_to_mg ( { reinit_vector(mg_dof_handler, component_to_block_map, dst); bool first = true; +#ifdef DEBUG_OUTPUT std::cout << "copy_to_mg src " << src.l2_norm() << std::endl; MPI_Barrier(MPI_COMM_WORLD); +#endif for (unsigned int level=mg_dof_handler.get_tria().n_global_levels(); level != 0;) { --level; VECTOR &dst_level = dst[level]; - + +#ifdef DEBUG_OUTPUT MPI_Barrier(MPI_COMM_WORLD); int myid=-1; MPI_Comm_rank (MPI_COMM_WORLD, &myid); +#endif typedef std::vector >::const_iterator IT; for (IT i= copy_indices[level].begin(); i != copy_indices[level].end(); ++i) - { - dst_level(i->second) = src(i->first); - if (i->first == 41) - std::cout << "L" << level << " " << i->first << " -> " << i->second << ": " << src(i->first) << " iam=" << myid << std::endl; - - } + dst_level(i->second) = src(i->first); - for (IT i= copy_indices_to_me[level].begin(); - i != copy_indices_to_me[level].end(); ++i) - { - dst_level(i->second) = src(i->first); - //if (i->first == 446) - if (i->first == 41) - std::cout << "L" << level << " " << i->first << " --> " << i->second << ": " << src(i->first)<< " iam=" << myid << std::endl; - } + for (IT i= copy_indices_global_mine[level].begin(); + i != copy_indices_global_mine[level].end(); ++i) + dst_level(i->second) = src(i->first); dst_level.compress(VectorOperation::insert); +#ifdef DEBUG_OUTPUT MPI_Barrier(MPI_COMM_WORLD); std::cout << "copy_to_mg dst " << level << " " << dst_level.l2_norm() << std::endl; - +#endif if (!first) { //if (level<2) restrict_and_add (level+1, dst[level], dst[level+1]); +#ifdef DEBUG_OUTPUT std::cout << "copy_to_mg restr&add " << level << " " << dst_level.l2_norm() << std::endl; - +#endif } first = false; @@ -198,6 +197,10 @@ MGTransferPrebuilt::copy_from_mg( OutVector &dst, const MGLevelObject &src) const { +#ifdef DEBUG_OUTPUT + int myid=-1; + MPI_Comm_rank (MPI_COMM_WORLD, &myid); +#endif // For non-DG: degrees of // freedom in the refinement // face may need special @@ -209,48 +212,36 @@ MGTransferPrebuilt::copy_from_mg( for (unsigned int level=0; level >::const_iterator IT; - +#ifdef DEBUG_OUTPUT MPI_Barrier(MPI_COMM_WORLD); - int myid=-1; - MPI_Comm_rank (MPI_COMM_WORLD, &myid); std::cout << "copy_from_mg src " << level << " " << src[level].l2_norm() << std::endl; MPI_Barrier(MPI_COMM_WORLD); - +#endif // First copy all indices local to this process - if (constraints==0) - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - dst(i->first) = src[level](i->second); - else - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - { - std::cout << "L" << level << " " << i->first << " <- " << i->second << ": " << src[level](i->second) << " iam=" << myid << std::endl; - - constraints->distribute_local_to_global(i->first, src[level](i->second), dst); - } + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst(i->first) = src[level](i->second); // Do the same for the indices where the level index is local, // but the global index is not - if (constraints==0) - for (IT i= copy_indices_from_me[level].begin(); - i != copy_indices_from_me[level].end(); ++i) - dst(i->first) = src[level](i->second); - else - for (IT i= copy_indices_from_me[level].begin(); - i != copy_indices_from_me[level].end(); ++i) - { - std::cout << "L" << level << " " << i->first << " <- " << i->second << ": " << src[level](i->second) << " iam=" << myid << " from-me"<< std::endl; - constraints->distribute_local_to_global(i->first, src[level](i->second), dst); - } + for (IT i= copy_indices_level_mine[level].begin(); + i != copy_indices_level_mine[level].end(); ++i) + dst(i->first) = src[level](i->second); + +#ifdef DEBUG_OUTPUT + { + dst.compress(VectorOperation::insert); + MPI_Barrier(MPI_COMM_WORLD); + std::cout << "copy_from_mg level=" << level << " " << dst.l2_norm() << std::endl; + } +#endif } - if (constraints == 0) - dst.compress(VectorOperation::insert); - else - dst.compress(VectorOperation::add); + dst.compress(VectorOperation::insert); +#ifdef DEBUG_OUTPUT MPI_Barrier(MPI_COMM_WORLD); std::cout << "copy_from_mg " << dst.l2_norm() << std::endl; +#endif } @@ -273,25 +264,15 @@ MGTransferPrebuilt::copy_from_mg_add ( for (unsigned int level=0; level >::const_iterator IT; - if (constraints==0) - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - dst(i->first) += src[level](i->second); - else - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end(); ++i) - constraints->distribute_local_to_global(i->first, src[level](i->second), dst); + for (IT i= copy_indices[level].begin(); + i != copy_indices[level].end(); ++i) + dst(i->first) += src[level](i->second); // Do the same for the indices where the level index is local, // but the global index is not - if (constraints==0) - for (IT i= copy_indices_from_me[level].begin(); - i != copy_indices_from_me[level].end(); ++i) - dst(i->first) += src[level](i->second); - else - for (IT i= copy_indices_from_me[level].begin(); - i != copy_indices_from_me[level].end(); ++i) - constraints->distribute_local_to_global(i->first, src[level](i->second), dst); + for (IT i= copy_indices_level_mine[level].begin(); + i != copy_indices_level_mine[level].end(); ++i) + dst(i->first) += src[level](i->second); } dst.compress(VectorOperation::add); } diff --git a/source/multigrid/mg_transfer_prebuilt.cc b/source/multigrid/mg_transfer_prebuilt.cc index 8e91b77ec1..a9b2f8e93f 100644 --- a/source/multigrid/mg_transfer_prebuilt.cc +++ b/source/multigrid/mg_transfer_prebuilt.cc @@ -76,8 +76,8 @@ void MGTransferPrebuilt::clear () prolongation_matrices.resize(0); prolongation_sparsities.resize(0); copy_indices.resize(0); - copy_indices_to_me.resize(0); - copy_indices_from_me.resize(0); + copy_indices_global_mine.resize(0); + copy_indices_level_mine.resize(0); component_to_block_map.resize(0); interface_dofs.resize(0); constraints = 0; @@ -256,7 +256,7 @@ void MGTransferPrebuilt::build_matrices ( prolongation_matrices[level]->compress(VectorOperation::insert); } - // Now we are filling the variables copy_indices*, which are essentially + // * Now we are filling the variables copy_indices*, which are essentially // maps from global to mgdof for each level stored as a std::vector of // pairs. We need to split this map on each level depending on the ownership // of the global and mgdof, so that we later not access non-local elements @@ -264,11 +264,29 @@ void MGTransferPrebuilt::build_matrices ( // We keep track in the bitfield dof_touched which global dof has // been processed already (on the current level). This is the same as // the multigrid running in serial. - // Only entering on the finest level gives wrong results (why?) - + + struct dof_pair + { + unsigned int level; + unsigned int global_dof_index; + unsigned int level_dof_index; + + dof_pair(unsigned int level, unsigned int global_dof_index, unsigned int level_dof_index) + : + level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index) + {} + + dof_pair() + {} + }; + + // map cpu_index -> vector of data + // that will be copied into copy_indices_level_mine + std::vector send_data_temp; + copy_indices.resize(n_levels); - copy_indices_from_me.resize(n_levels); - copy_indices_to_me.resize(n_levels); + copy_indices_global_mine.resize(n_levels); + copy_indices_level_mine.resize(n_levels); IndexSet globally_relevant; DoFTools::extract_locally_relevant_dofs(mg_dof, globally_relevant); @@ -279,8 +297,8 @@ void MGTransferPrebuilt::build_matrices ( { std::vector dof_touched(globally_relevant.n_elements(), false); copy_indices[level].clear(); - copy_indices_from_me[level].clear(); - copy_indices_to_me[level].clear(); + copy_indices_level_mine[level].clear(); + copy_indices_global_mine[level].clear(); typename DoFHandler::active_cell_iterator level_cell = mg_dof.begin_active(level); @@ -313,22 +331,125 @@ void MGTransferPrebuilt::build_matrices ( bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]); bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]); + if (global_mine && level_mine) - copy_indices[level].push_back( - std::pair (global_dof_indices[i], level_dof_indices[i])); - else if (level_mine) - copy_indices_from_me[level].push_back( - std::pair (global_dof_indices[i], level_dof_indices[i])); - else if (global_mine) - copy_indices_to_me[level].push_back( - std::pair (global_dof_indices[i], level_dof_indices[i])); -// else -// continue; + { + copy_indices[level].push_back( + std::pair (global_dof_indices[i], level_dof_indices[i])); + } + else if(global_mine) + { + copy_indices_global_mine[level].push_back( + std::pair (global_dof_indices[i], level_dof_indices[i])); + + //send this to the owner of the level_dof: + send_data_temp.push_back(dof_pair(level, global_dof_indices[i], level_dof_indices[i])); + } + else + { + // somebody will send those to me + } dof_touched[global_idx] = true; } } } + + const dealii::parallel::distributed::Triangulation *tria = + (dynamic_cast*> + (&mg_dof.get_tria())); + AssertThrow(send_data_temp.size()==0 || tria!=NULL, ExcMessage("parallel Multigrid only works with a distributed Triangulation!")); + + if (tria) + { + // TODO: This is a gigantic hack. We don't have a list of all our ghost + // neighbors, so we communicate with every other process. Searching the + // owner for every single DoF becomes quite inefficient. Please fix + // this, Timo. + std::vector neighbors; + std::map > send_data; + + { + // TODO: replace this with the minimum ghost neighbor list that should + // come from Triangulation + int n_proc = Utilities::MPI::n_mpi_processes(tria->get_communicator()); + int myid = tria->locally_owned_subdomain(); + for (unsigned int i=0;i::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair) + { + for (std::vector::iterator it = neighbors.begin(); it != neighbors.end(); ++it) + { + if (mg_dof.locally_owned_mg_dofs_per_processor(dofpair->level)[*it].is_element(dofpair->level_dof_index)) + { + send_data[*it].push_back(*dofpair); + break; + } + } + } + + // * send + std::vector requests; + { + for (std::vector::iterator it = neighbors.begin(); it != neighbors.end(); ++it) + { + requests.push_back(MPI_Request()); + unsigned int dest = *it; + std::vector & data = send_data[dest]; + if (data.size()) + MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + else + MPI_Isend(NULL, 0, MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin()); + } + } + + // * receive + { + typename std::vector receive; + for (std::vector::iterator it = neighbors.begin(); it != neighbors.end(); ++it) + { + MPI_Status status; + int len; + MPI_Probe(MPI_ANY_SOURCE, 71, tria->get_communicator(), &status); + MPI_Get_count(&status, MPI_BYTE, &len); + + if (len==0) + { + int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + Assert(err==MPI_SUCCESS, ExcInternalError()); + continue; + } + + int count = len / sizeof(dof_pair); + Assert(count * sizeof(dof_pair) == len, ExcInternalError()); + receive.resize(count); + + void *ptr = &receive[0]; + int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, + tria->get_communicator(), &status); + Assert(err==MPI_SUCCESS, ExcInternalError()); + + for (unsigned int i=0; i (receive[i].global_dof_index, receive[i].level_dof_index) + ); + } + } + } + + // * wait + if (requests.size() > 0) + { + MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + requests.clear(); + } + } // If we are in debugging mode, we order the copy indices, so we get // more reliable output for regression texts @@ -336,10 +457,10 @@ void MGTransferPrebuilt::build_matrices ( std::less > compare; for (unsigned int level=0; level::print_indices (std::ostream &os) const << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl; } - for (unsigned int level = 0; level