try
{
using namespace Step16;
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
- numbers::invalid_unsigned_int);
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
+ numbers::invalid_unsigned_int);
LaplaceProblem<2> laplace_problem(1);
laplace_problem.run ();
boundary_constraints[level].close ();
boundary_interface_constraints[level]
- .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]);
+ .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]);
boundary_interface_constraints[level].close ();
}
for (unsigned int i=0; i<dofs_per_cell; ++i)
for (unsigned int j=0; j<dofs_per_cell; ++j)
- /** old HEAD:
- if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
- || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */
+ /** old HEAD:
+ if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
+ || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */
if ( !(interface_dofs_on_level.is_element(local_dof_indices[i])==true &&
interface_dofs_on_level.is_element(local_dof_indices[j])==false))
cell_matrix(i,j) = 0;
PreconditionMG<dim, vector_t, MGTransferPrebuilt<vector_t> >
preconditioner(mg_dof_handler, mg, mg_transfer);
-
+
// With all this together, we can finally
// get about solving the linear system in
// the usual way:
template <int dim>
void LaplaceProblem<dim>::refine_grid ()
{
-
+
Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
TrilinosWrappers::MPI::Vector temp_solution;
estimated_error_per_cell,
0.3, 0.0);
-
+
triangulation.prepare_coarsening_and_refinement ();
triangulation.execute_coarsening_and_refinement ();
}
std::ofstream visit_master (visit_master_filename.c_str());
data_out.write_visit_record (visit_master, filenames);
- std::cout << "wrote " << pvtu_master_filename << std::endl;
-
+ std::cout << "wrote " << pvtu_master_filename << std::endl;
+
}
}
solve ();
output_results (cycle);
- TrilinosWrappers::MPI::Vector temp = solution;
- system_matrix.residual(temp,solution,system_rhs);
- constraints.set_zero(temp);
- deallog << "residual " << temp.l2_norm() << std::endl;
+ TrilinosWrappers::MPI::Vector temp = solution;
+ system_matrix.residual(temp,solution,system_rhs);
+ constraints.set_zero(temp);
+ deallog << "residual " << temp.l2_norm() << std::endl;
}
}
}
if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) &&
!mg_constrained_dofs->at_refinement_edge(level, i2[k]))
{
- if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
- !mg_constrained_dofs->is_boundary_index(level, i2[k]))
+ if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+ !mg_constrained_dofs->is_boundary_index(level, i2[k]))
||
- (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
- mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
+ (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+ mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
i1[j] == i2[k]))
G.add(i1[j], i2[k], M(j,k));
}
if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) &&
!mg_constrained_dofs->at_refinement_edge(level, i2[k]))
{
- if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
- !mg_constrained_dofs->is_boundary_index(level, i2[k]))
+ if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+ !mg_constrained_dofs->is_boundary_index(level, i2[k]))
||
- (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
- mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
+ (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+ mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
i1[j] == i2[k]))
G.add(i1[j], i2[k], M(k,j));
}
MGConstrainedDoFs::get_boundary_indices () const
{
if (boundary_indices_old.size()!=boundary_indices.size())
- {
- boundary_indices_old.resize(boundary_indices.size());
- for (unsigned int l=0;l<boundary_indices.size(); ++l)
- {
- std::vector<types::global_dof_index> tmp;
- boundary_indices[l].fill_index_vector(tmp);
- boundary_indices_old[l].insert(tmp.begin(), tmp.end());
- }
- }
+ {
+ boundary_indices_old.resize(boundary_indices.size());
+ for (unsigned int l=0; l<boundary_indices.size(); ++l)
+ {
+ std::vector<types::global_dof_index> tmp;
+ boundary_indices[l].fill_index_vector(tmp);
+ boundary_indices_old[l].insert(tmp.begin(), tmp.end());
+ }
+ }
return boundary_indices_old;
}
i != copy_indices[level].end(); ++i)
dst_level(i->second) = src(i->first);
- for (IT i= copy_indices_global_mine[level].begin();
- i != copy_indices_global_mine[level].end(); ++i)
- dst_level(i->second) = src(i->first);
+ for (IT i= copy_indices_global_mine[level].begin();
+ i != copy_indices_global_mine[level].end(); ++i)
+ dst_level(i->second) = src(i->first);
dst_level.compress(VectorOperation::insert);
#ifdef DEBUG_OUTPUT
template <class DH>
void
extract_locally_relevant_mg_dofs (const DH &dof_handler,
- IndexSet &dof_set,
- unsigned int level)
+ IndexSet &dof_set,
+ unsigned int level)
{
// collect all the locally owned dofs
dof_set = dof_handler.locally_owned_mg_dofs(level);
std::set<types::global_dof_index> global_dof_indices;
typename DH::cell_iterator cell = dof_handler.begin(level),
- endc = dof_handler.end(level);
+ endc = dof_handler.end(level);
for (; cell!=endc; ++cell)
{
types::subdomain_id id = cell->level_subdomain_id();
cell->get_mg_dof_indices(dof_indices);
for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
- it!=dof_indices.end();
- ++it)
+ it!=dof_indices.end();
+ ++it)
if (!dof_set.is_element(*it))
global_dof_indices.insert(*it);
- }
+ }
dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
{
// do not look at artificial cells
if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id
- && cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+ && cell->level_subdomain_id()==numbers::artificial_subdomain_id)
continue;
bool has_coarser_neighbor = false;
// only process cell pairs if one of them is mine
if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id
- &&
+ &&
neighbor->level_subdomain_id()==numbers::artificial_subdomain_id)
// neighbor->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain()
-// &&
+// &&
// cell->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain())
continue;
-
+
// Do refinement face
// from the coarse side
if (neighbor->level() < cell->level())
// We keep track in the bitfield dof_touched which global dof has
// been processed already (on the current level). This is the same as
// the multigrid running in serial.
-
+
struct dof_pair
{
unsigned int level;
dof_pair(unsigned int level, unsigned int global_dof_index, unsigned int level_dof_index)
:
- level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
+ level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
{}
dof_pair()
// map cpu_index -> vector of data
// that will be copied into copy_indices_level_mine
std::vector<dof_pair> send_data_temp;
-
+
copy_indices.resize(n_levels);
copy_indices_global_mine.resize(n_levels);
copy_indices_level_mine.resize(n_levels);
if (global_mine && level_mine)
{
copy_indices[level].push_back(
- std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+ std::make_pair (global_dof_indices[i], level_dof_indices[i]));
}
- else if(global_mine)
+ else if (global_mine)
{
copy_indices_global_mine[level].push_back(
- std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+ std::make_pair (global_dof_indices[i], level_dof_indices[i]));
//send this to the owner of the level_dof:
send_data_temp.push_back(dof_pair(level, global_dof_indices[i], level_dof_indices[i]));
}
}
}
-
+
const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
(dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&mg_dof.get_tria()));
// neighbors, so we communicate with every other process. Searching the
// owner for every single DoF becomes quite inefficient. Please fix
// this, Timo.
+
std::vector<unsigned int> neighbors;
std::map<int, std::vector<dof_pair> > send_data;
// come from Triangulation
int n_proc = Utilities::MPI::n_mpi_processes(tria->get_communicator());
int myid = tria->locally_owned_subdomain();
- for (unsigned int i=0;i<n_proc;++i)
+ for (unsigned int i=0; i<n_proc; ++i)
if (i!=myid)
neighbors.push_back(i);
}
// * find owners of the level dofs and insert into send_data accordingly
- for(typename std::vector<dof_pair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
+ for (typename std::vector<dof_pair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
{
for (std::vector<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
{
{
requests.push_back(MPI_Request());
unsigned int dest = *it;
- std::vector<dof_pair> & data = send_data[dest];
+ std::vector<dof_pair> &data = send_data[dest];
if (data.size())
MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
else
if (len==0)
{
int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria->get_communicator(), &status);
- Assert(err==MPI_SUCCESS, ExcInternalError());
+ tria->get_communicator(), &status);
+ AssertThrow(err==MPI_SUCCESS, ExcInternalError());
continue;
}
void *ptr = &receive[0];
int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
- tria->get_communicator(), &status);
- Assert(err==MPI_SUCCESS, ExcInternalError());
+ tria->get_communicator(), &status);
+ AssertThrow(err==MPI_SUCCESS, ExcInternalError());
for (unsigned int i=0; i<receive.size(); ++i)
{
copy_indices_level_mine[receive[i].level].push_back(
- std::pair<unsigned int, unsigned int> (receive[i].global_dof_index, receive[i].level_dof_index)
- );
+ std::pair<unsigned int, unsigned int> (receive[i].global_dof_index, receive[i].level_dof_index)
+ );
}
}
}
using namespace std;
- std::string id_to_string(const CellId &id)
- {
- std::ostringstream ss;
- ss << id;
- return ss.str();
- }
+std::string id_to_string(const CellId &id)
+{
+ std::ostringstream ss;
+ ss << id;
+ return ss.str();
+}
template <int dim>
void setup_tria(parallel::distributed::Triangulation<dim> &tr)
{
GridGenerator::hyper_cube(tr);
tr.refine_global(2);
-
+
for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
cell != tr.end(); ++cell)
{
if (id_to_string(cell->id()) == "0_2:11")
- cell->set_refine_flag();
+ cell->set_refine_flag();
}
tr.execute_coarsening_and_refinement();
}
dofh.distribute_mg_dofs(fe);
MGConstrainedDoFs mg_constrained_dofs_ref;
- { // reorder
- parallel::distributed::Triangulation<dim> tr(MPI_COMM_SELF,
- Triangulation<dim>::none,
- parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
- setup_tria(tr);
-
- DoFHandler<dim> dofhref(tr);
- dofhref.distribute_dofs(fe);
- dofhref.distribute_mg_dofs(fe);
+ {
+ // reorder
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_SELF,
+ Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+ setup_tria(tr);
+
+ DoFHandler<dim> dofhref(tr);
+ dofhref.distribute_dofs(fe);
+ dofhref.distribute_mg_dofs(fe);
+
+ //std::map<std::string,std::vector<types::global_dof_index> > dofmap;
+ std::map<std::string,std::vector<types::global_dof_index> > mgdofmap;
+
+ for (typename DoFHandler<dim>::level_cell_iterator cell = dofhref.begin();
+ cell != dofhref.end(); ++cell)
+ {
+ if (!cell->is_locally_owned_on_level())
+ continue;
+
+ std::vector<types::global_dof_index> &d = mgdofmap[id_to_string(cell->id())];
+ d.resize(fe.dofs_per_cell);
+ cell->get_mg_dof_indices(d);
+ }
+
+ for (typename DoFHandler<dim>::level_cell_iterator cell = dofh.begin();
+ cell != dofh.end(); ++cell)
+ {
+ if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+ continue;
+
+ std::vector<types::global_dof_index> &renumbered = mgdofmap[id_to_string(cell->id())];
+ cell->set_mg_dof_indices(renumbered);
+ cell->update_cell_dof_indices_cache();
+ }
+
+ typename FunctionMap<dim>::type dirichlet_boundary;
+ ZeroFunction<dim> homogeneous_dirichlet_bc (1);
+ dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
+ mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary);
+ }
- //std::map<std::string,std::vector<types::global_dof_index> > dofmap;
- std::map<std::string,std::vector<types::global_dof_index> > mgdofmap;
- for (typename DoFHandler<dim>::level_cell_iterator cell = dofhref.begin();
- cell != dofhref.end(); ++cell)
- {
- if (!cell->is_locally_owned_on_level())
- continue;
- std::vector<types::global_dof_index> &d = mgdofmap[id_to_string(cell->id())];
- d.resize(fe.dofs_per_cell);
- cell->get_mg_dof_indices(d);
- }
-
- for (typename DoFHandler<dim>::level_cell_iterator cell = dofh.begin();
- cell != dofh.end(); ++cell)
- {
- if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
- continue;
-
- std::vector<types::global_dof_index> &renumbered = mgdofmap[id_to_string(cell->id())];
- cell->set_mg_dof_indices(renumbered);
- cell->update_cell_dof_indices_cache();
- }
-
- typename FunctionMap<dim>::type dirichlet_boundary;
- ZeroFunction<dim> homogeneous_dirichlet_bc (1);
- dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
- mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary);
- }
-
-
-
MGConstrainedDoFs mg_constrained_dofs;
typename FunctionMap<dim>::type dirichlet_boundary;
IndexSet rei = mg_constrained_dofs.get_refinement_edge_indices (level);
deallog << "get_refinement_edge_indices:" << std::endl;
rei.print(deallog);
-
+
IndexSet bi = mg_constrained_dofs.get_boundary_indices (level);
deallog << "get_boundary_indices:" << std::endl;
bi.print(deallog);
IndexSet relevant;
DoFTools::extract_locally_relevant_mg_dofs (dofh,
- relevant, level);
+ relevant, level);
deallog << "relevant:" << std::endl;
relevant.print(deallog);
// the indexsets should be the same when run in parallel (on the
// relevant subset):
deallog << ((rei == (relevant & mg_constrained_dofs_ref.get_refinement_edge_indices(level)))
- ?"ok ":"FAIL ")
- << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level)))
- ?"ok ":"FAIL ")
- << std::endl;
-
+ ?"ok ":"FAIL ")
+ << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level)))
+ ?"ok ":"FAIL ")
+ << std::endl;
+
}
}
}
int main(int argc, char *argv[])
-{
+{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
using namespace std;
- std::string id_to_string(const CellId &id)
- {
- std::ostringstream ss;
- ss << id;
- return ss.str();
- }
+std::string id_to_string(const CellId &id)
+{
+ std::ostringstream ss;
+ ss << id;
+ return ss.str();
+}
template <int dim>
void setup_tria(parallel::distributed::Triangulation<dim> &tr)
{
GridGenerator::hyper_cube(tr);
tr.refine_global(2);
-
+
for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
cell != tr.end(); ++cell)
{
if (id_to_string(cell->id()) == "0_2:03"
- || id_to_string(cell->id()) == "0_2:00"
- || id_to_string(cell->id()) == "0_2:01"
- || id_to_string(cell->id()) == "0_2:12")
- cell->set_refine_flag();
+ || id_to_string(cell->id()) == "0_2:00"
+ || id_to_string(cell->id()) == "0_2:01"
+ || id_to_string(cell->id()) == "0_2:12")
+ cell->set_refine_flag();
}
tr.execute_coarsening_and_refinement();
for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
cell != tr.end(); ++cell)
{
if (id_to_string(cell->id()) == "0_3:032"
- || id_to_string(cell->id()) == "0_3:000")
- cell->set_refine_flag();
+ || id_to_string(cell->id()) == "0_3:000")
+ cell->set_refine_flag();
}
tr.execute_coarsening_and_refinement();
-
+
for (typename parallel::distributed::Triangulation<dim>::cell_iterator cell = tr.begin();
cell != tr.end(); ++cell)
{
deallog << "cell=" << cell->id()
- << " level_subdomain_id=" << cell->level_subdomain_id()
- << std::endl;
+ << " level_subdomain_id=" << cell->level_subdomain_id()
+ << std::endl;
}
}
{
deallog << fe.get_name() << std::endl;
- parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
Triangulation<dim>::none,
parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
setup_tria(tr);
DataOut<dim> data_out;
Vector<float> subdomain (tr.n_active_cells());
for (unsigned int i=0; i<subdomain.size(); ++i)
- subdomain(i) = tr.locally_owned_subdomain();
+ subdomain(i) = tr.locally_owned_subdomain();
data_out.attach_triangulation (tr);
data_out.add_data_vector (subdomain, "subdomain");
data_out.build_patches (0);
const std::string filename = ("solution." +
- Utilities::int_to_string
- (tr.locally_owned_subdomain(), 4) +
- ".vtu");
+ Utilities::int_to_string
+ (tr.locally_owned_subdomain(), 4) +
+ ".vtu");
std::ofstream output (filename.c_str());
data_out.write_vtu (output);
}
ConstraintMatrix hanging_node_constraints;
IndexSet locally_relevant_set;
DoFTools::extract_locally_relevant_dofs (dofh,
- locally_relevant_set);
+ locally_relevant_set);
hanging_node_constraints.reinit (locally_relevant_set);
DoFTools::make_hanging_node_constraints (dofh, hanging_node_constraints);
hanging_node_constraints.close();
for (unsigned int level=u.min_level(); level<=u.max_level(); ++level)
{
u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
- for (unsigned int i=0;i<dofh.locally_owned_mg_dofs(level).n_elements();++i)
- {
- unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
- u[level][index] = 1.0;//1000+level*100+index;
- }
+ for (unsigned int i=0; i<dofh.locally_owned_mg_dofs(level).n_elements(); ++i)
+ {
+ unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
+ u[level][index] = 1.0;//1000+level*100+index;
+ }
u[level].compress(VectorOperation::insert);
}
-
+
vector_t v;
v.reinit(dofh.locally_owned_dofs(), MPI_COMM_WORLD);
v = 0.;
hanging_node_constraints.distribute(v);
{
- for (unsigned int i=0;i<dofh.locally_owned_dofs().n_elements();++i)
+ for (unsigned int i=0; i<dofh.locally_owned_dofs().n_elements(); ++i)
{
- unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
- if (abs(v[index] - 1.0)>1e-5)
- deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl;
+ unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
+ if (abs(v[index] - 1.0)>1e-5)
+ deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl;
}
}
deallog << "ok" << std::endl;
}
int main(int argc, char *argv[])
-{
+{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;