// algorithms on adaptive meshes; if some of the things below seem
// strange, take a look at the @ref mg_paper.
//
- // Our first job is to identify those degrees of freedom on each
- // level that are located on interfaces between adaptively refined
- // levels, and those that lie on the interface but also on the
- // exterior boundary of the domain. As in many other parts of the
- // library, we do this by using Boolean masks, i.e. vectors of
- // Booleans each element of which indicates whether the
- // corresponding degree of freedom index is an interface DoF or
- // not. The <code>MGConstraints</code> already computed the
+ // Our first job is to identify those degrees of freedom on each level
+ // that are located on interfaces between adaptively refined levels, and
+ // those that lie on the interface but also on the exterior boundary of
+ // the domain. The <code>MGConstraints</code> already computed the
// information for us when we called initialize in
- // std::vector<std::vector<bool> > interface_dofs
++
+ // <code>setup_system()</code>.
- // TODO: the replacement for this only exists on the parmg branch right
- // now:
- std::vector<std::vector<bool> > boundary_interface_dofs
- = mg_constrained_dofs.get_refinement_edge_boundary_indices ();
+ // of type IndexSet on each level (get_refinement_edge_indices(),
+ // get_refinement_edge_boundary_indices()).
+
++
++ // std::vector<std::vector<bool> > interface_dofs
+// = mg_constrained_dofs.get_refinement_edge_indices ();
+// std::vector<std::vector<bool> > boundary_interface_dofs
+// = mg_constrained_dofs.get_refinement_edge_boundary_indices ();
+
+
+ // now communicate mg_constraint_dofs
++ if (true)
+ {
+ for (unsigned int l=0;l<triangulation.n_global_levels(); ++l)
+ {
+ std::cout << "level " << l << std::endl;
+
+ std::map<unsigned int, IndexSet> to_send;
+
+
+ // determine dofs we need to get information for
+ {
+ IndexSet dofset;
+ DoFTools::extract_locally_relevant_mg_dofs (mg_dof_handler,
+ dofset, l);
+ dofset.subtract_set(mg_dof_handler.locally_owned_mg_dofs(l));
+
+ unsigned int dest_cpu = 0;
+ while (dofset.n_elements()>0)
+ {
+ types::global_dof_index first_idx = dofset.nth_index_in_set(0);
+
+ while (!mg_dof_handler.locally_owned_mg_dofs_per_processor(l)[dest_cpu]
+ .is_element(first_idx))
+ ++dest_cpu;
+ Assert(mg_dof_handler.locally_owned_mg_dofs_per_processor(l)[dest_cpu]
+ .is_element(first_idx),
+ ExcInternalError());
+ to_send[dest_cpu] = dofset & mg_dof_handler.locally_owned_mg_dofs_per_processor(l)[dest_cpu];
+ dofset.subtract_set(mg_dof_handler.locally_owned_mg_dofs_per_processor(l)[dest_cpu]);
+ }
+ }
+
+
+ // create list of processor to send to and get a list of things to
+ // receive
+ std::vector<unsigned int> destinations;
+ for (std::map<unsigned int, IndexSet>::iterator it = to_send.begin();
+ it != to_send.end(); ++it)
+ destinations.push_back(it->first);
+
+ std::vector<unsigned int> to_receive =
+ Utilities::MPI::compute_point_to_point_communication_pattern(MPI_COMM_WORLD,
+ destinations);
+
+ std::cout << "I want to send " << destinations.size()
+ << " and receive " << to_receive.size() << std::endl;
+
+ // send messages
+ std::vector<MPI_Request> requests (to_send.size() + to_receive.size());
+ unsigned int req_index = 0;
+ std::vector<std::string> buffers(to_send.size());
+ {
+ unsigned int index = 0;
+ for (std::map<unsigned int, IndexSet>::iterator it = to_send.begin();
+ it!=to_send.end();++it, ++index)
+ {
+ std::ostringstream oss;
+ it->second.block_write(oss);
+ buffers[index] = oss.str();
+
+ char *ptr = const_cast<char*>(buffers[index].c_str());
+ MPI_Isend(ptr, buffers[index].length(),
+ MPI_BYTE, it->first,
+ 1000+l, MPI_COMM_WORLD, &requests[req_index]);
+ ++req_index;
+
+ std::cout << "send to " << it->first << std::endl;
+ }
+ }
+
+ // receive
+ unsigned int n_to_recieve = to_receive.size();
+ std::vector<char> receive;
+ std::vector<std::string> buffers2(n_to_recieve);
+ unsigned int index = 0;
+ while (n_to_recieve>0)
+ {
+ MPI_Status status;
+ int len;
+ MPI_Probe(MPI_ANY_SOURCE, 1000+l, MPI_COMM_WORLD, &status);
+ MPI_Get_count(&status, MPI_BYTE, &len);
+ receive.resize(len);
+
+ char *ptr = &receive[0];
+ MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ MPI_COMM_WORLD, &status);
+
+ IndexSet idxset;
+ std::istringstream iss(std::string(ptr, len));
+ idxset.block_read(iss);
+
+ std::cout << "got idxset from " << status.MPI_SOURCE << std::endl;
+
+ // compute and send reply
+ std::ostringstream oss;
+ IndexSet reply_ref_edge = mg_constrained_dofs.refinement_edge_indices[l] & idxset;
+ IndexSet reply_ref_edge_bdry = mg_constrained_dofs.refinement_edge_boundary_indices[l] & idxset;
+ reply_ref_edge.block_write(oss);
+ reply_ref_edge_bdry.block_write(oss);
+ buffers2[index] = oss.str();
+ char *ptr2 = const_cast<char*>(buffers2[index].c_str());
+ MPI_Isend(ptr2, buffers2[index].length(),
+ MPI_BYTE, status.MPI_SOURCE,
+ 2000+l, MPI_COMM_WORLD, &requests[req_index]);
+ ++req_index;
+
+ ++index;
+ --n_to_recieve;
+ }
+
+ // receive answers
+ {
+ std::vector<char> receive;
+ for (unsigned int n=0;n<destinations.size();++n)
+ {
+ MPI_Status status;
+ int len;
+ MPI_Probe(MPI_ANY_SOURCE, 2000+l, MPI_COMM_WORLD, &status);
+ MPI_Get_count(&status, MPI_BYTE, &len);
+ receive.resize(len);
+
+ char *ptr = &receive[0];
+ MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ MPI_COMM_WORLD, &status);
+
+ std::istringstream iss(std::string(ptr,len));
+ IndexSet reply_ref_edge;
+ IndexSet reply_ref_edge_bdry;
+ reply_ref_edge.block_read(iss);
+ reply_ref_edge_bdry.block_read(iss);
+ unsigned int c1 = mg_constrained_dofs.refinement_edge_indices[l].n_elements();
+ unsigned int c2 = mg_constrained_dofs.refinement_edge_boundary_indices[l].n_elements();
+
+ mg_constrained_dofs.refinement_edge_indices[l].add_indices(reply_ref_edge);
+ mg_constrained_dofs.refinement_edge_boundary_indices[l].add_indices(reply_ref_edge_bdry);
+
+ std::cout << "new " << mg_constrained_dofs.refinement_edge_indices[l].n_elements()-c1
+ << " and " << mg_constrained_dofs.refinement_edge_boundary_indices[l].n_elements()-c2
+ << std::endl;
+ }
+
+ }
+
+ // finish all requests:
+ if (requests.size() > 0)
+ MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+ }
// The indices just identified will later be used to decide where
// the assembled value has to be added into on each level. On the
// matrix. Since it is only the transpose, we will later (in
// the <code>solve()</code> function) be able to just pass
// the transpose matrix where necessary.
+
+ const IndexSet &interface_dofs_on_level
+ = mg_constrained_dofs.get_refinement_edge_indices(cell->level());
+
+
for (unsigned int i=0; i<dofs_per_cell; ++i)
for (unsigned int j=0; j<dofs_per_cell; ++j)
- if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
- || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j]))
++ /** old HEAD:
++ if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
++ || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */
+ if ( !(interface_dofs_on_level.is_element(local_dof_indices[i])==true &&
+ interface_dofs_on_level.is_element(local_dof_indices[j])==false))
cell_matrix(i,j) = 0;
boundary_interface_constraints[cell->level()]