]> https://gitweb.dealii.org/ - dealii.git/commitdiff
astyle
authorTimo Heister <timo.heister@gmail.com>
Sun, 4 Oct 2015 19:05:19 +0000 (15:05 -0400)
committerTimo Heister <timo.heister@gmail.com>
Sun, 4 Oct 2015 19:06:13 +0000 (15:06 -0400)
examples/step-16/step-16.cc
examples/step-50/step-50.cc
include/deal.II/meshworker/simple.h
include/deal.II/multigrid/mg_constrained_dofs.h
include/deal.II/multigrid/mg_transfer.templates.h
source/dofs/dof_tools.cc
source/multigrid/mg_tools.cc
source/multigrid/mg_transfer_prebuilt.cc
tests/multigrid/constrained_dofs_01.cc
tests/multigrid/transfer_04.cc

index 921c1c7fb1594bdaa37cd5d5d8d69f8c73151002..b94d2d72820ac639f89cd84830f68a8a909e1116 100644 (file)
@@ -658,8 +658,8 @@ int main (int argc, char *argv[])
   try
     {
       using namespace Step16;
-  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
-                                                      numbers::invalid_unsigned_int);
+      Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv,
+                                                          numbers::invalid_unsigned_int);
 
       LaplaceProblem<2> laplace_problem(1);
       laplace_problem.run ();
index 8573b59089250127592fc57ee3ae78ecce5cac12..205ef37210a6e941861c1392e09778cfa4792033 100644 (file)
@@ -626,7 +626,7 @@ namespace Step50
         boundary_constraints[level].close ();
 
         boundary_interface_constraints[level]
-         .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]);
+        .add_lines (mg_constrained_dofs.get_refinement_edge_boundary_indices()[ (level)]);
         boundary_interface_constraints[level].close ();
       }
 
@@ -719,9 +719,9 @@ namespace Step50
 
           for (unsigned int i=0; i<dofs_per_cell; ++i)
             for (unsigned int j=0; j<dofs_per_cell; ++j)
-             /** old HEAD:
-                 if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
-                 || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */
+              /** old HEAD:
+              if (!mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[i])
+              || mg_constrained_dofs.at_refinement_edge(cell->level(),local_dof_indices[j])) */
               if ( !(interface_dofs_on_level.is_element(local_dof_indices[i])==true &&
                      interface_dofs_on_level.is_element(local_dof_indices[j])==false))
                 cell_matrix(i,j) = 0;
@@ -864,7 +864,7 @@ namespace Step50
     PreconditionMG<dim, vector_t, MGTransferPrebuilt<vector_t> >
     preconditioner(mg_dof_handler, mg, mg_transfer);
 
-    
+
     // With all this together, we can finally
     // get about solving the linear system in
     // the usual way:
@@ -923,7 +923,7 @@ namespace Step50
   template <int dim>
   void LaplaceProblem<dim>::refine_grid ()
   {
-    
+
     Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
 
     TrilinosWrappers::MPI::Vector temp_solution;
@@ -941,7 +941,7 @@ namespace Step50
                                        estimated_error_per_cell,
                                        0.3, 0.0);
 
-    
+
     triangulation.prepare_coarsening_and_refinement ();
     triangulation.execute_coarsening_and_refinement ();
   }
@@ -1005,8 +1005,8 @@ namespace Step50
         std::ofstream visit_master (visit_master_filename.c_str());
         data_out.write_visit_record (visit_master, filenames);
 
-       std::cout << "wrote " << pvtu_master_filename << std::endl;
-       
+        std::cout << "wrote " << pvtu_master_filename << std::endl;
+
       }
   }
 
@@ -1059,10 +1059,10 @@ namespace Step50
         solve ();
         output_results (cycle);
 
-       TrilinosWrappers::MPI::Vector temp = solution;
-       system_matrix.residual(temp,solution,system_rhs);
-       constraints.set_zero(temp);
-       deallog << "residual " << temp.l2_norm() << std::endl;
+        TrilinosWrappers::MPI::Vector temp = solution;
+        system_matrix.residual(temp,solution,system_rhs);
+        constraints.set_zero(temp);
+        deallog << "residual " << temp.l2_norm() << std::endl;
       }
   }
 }
index ca95d81cf70413b7a4dc895fe5fbb0af3045308f..e4ceda1cb9ee53f90e55bdf031afad243830edb6 100644 (file)
@@ -951,11 +951,11 @@ namespace MeshWorker
             if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) &&
                 !mg_constrained_dofs->at_refinement_edge(level, i2[k]))
               {
-               if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
-                    !mg_constrained_dofs->is_boundary_index(level, i2[k]))
+                if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+                     !mg_constrained_dofs->is_boundary_index(level, i2[k]))
                     ||
-                   (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
-                    mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
+                    (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+                     mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
                      i1[j] == i2[k]))
                   G.add(i1[j], i2[k], M(j,k));
               }
@@ -981,11 +981,11 @@ namespace MeshWorker
             if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) &&
                 !mg_constrained_dofs->at_refinement_edge(level, i2[k]))
               {
-               if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
-                    !mg_constrained_dofs->is_boundary_index(level, i2[k]))
+                if ((!mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+                     !mg_constrained_dofs->is_boundary_index(level, i2[k]))
                     ||
-                   (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
-                    mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
+                    (mg_constrained_dofs->is_boundary_index(level, i1[j]) &&
+                     mg_constrained_dofs->is_boundary_index(level, i2[k]) &&
                      i1[j] == i2[k]))
                   G.add(i1[j], i2[k], M(k,j));
               }
index 4f4725d6c9999971b75213aefdca85bdf9fecbff..9b4129bc11f7ff06e7145a993e2a96387980ad62 100644 (file)
@@ -247,15 +247,15 @@ const std::vector<std::set<types::global_dof_index> > &
 MGConstrainedDoFs::get_boundary_indices () const
 {
   if (boundary_indices_old.size()!=boundary_indices.size())
-  {
-    boundary_indices_old.resize(boundary_indices.size());
-    for (unsigned int l=0;l<boundary_indices.size(); ++l)
-      {
-        std::vector<types::global_dof_index> tmp;
-        boundary_indices[l].fill_index_vector(tmp);
-        boundary_indices_old[l].insert(tmp.begin(), tmp.end());
-      }
-  }
+    {
+      boundary_indices_old.resize(boundary_indices.size());
+      for (unsigned int l=0; l<boundary_indices.size(); ++l)
+        {
+          std::vector<types::global_dof_index> tmp;
+          boundary_indices[l].fill_index_vector(tmp);
+          boundary_indices_old[l].insert(tmp.begin(), tmp.end());
+        }
+    }
   return boundary_indices_old;
 }
 
index 5f56a9be9c5f2e9a8254dd08064b3804bbe7bf1a..57fc78eba6871802a24ab24ee01ee121995c7466 100644 (file)
@@ -195,9 +195,9 @@ MGTransferPrebuilt<VECTOR>::copy_to_mg (
            i != copy_indices[level].end(); ++i)
         dst_level(i->second) = src(i->first);
 
-       for (IT i= copy_indices_global_mine[level].begin();
-            i != copy_indices_global_mine[level].end(); ++i)
-         dst_level(i->second) = src(i->first);
+      for (IT i= copy_indices_global_mine[level].begin();
+           i != copy_indices_global_mine[level].end(); ++i)
+        dst_level(i->second) = src(i->first);
 
       dst_level.compress(VectorOperation::insert);
 #ifdef DEBUG_OUTPUT
index c57dc684d526160fce3a20b3802203578d759bf6..ae287fb9f37e22a478747da48ab6f77ede885b5c 100644 (file)
@@ -976,8 +976,8 @@ namespace DoFTools
   template <class DH>
   void
   extract_locally_relevant_mg_dofs (const DH &dof_handler,
-                                 IndexSet &dof_set,
-                                 unsigned int level)
+                                    IndexSet &dof_set,
+                                    unsigned int level)
   {
     // collect all the locally owned dofs
     dof_set = dof_handler.locally_owned_mg_dofs(level);
@@ -989,7 +989,7 @@ namespace DoFTools
     std::set<types::global_dof_index> global_dof_indices;
 
     typename DH::cell_iterator cell = dof_handler.begin(level),
-                                      endc = dof_handler.end(level);
+                               endc = dof_handler.end(level);
     for (; cell!=endc; ++cell)
       {
         types::subdomain_id id = cell->level_subdomain_id();
@@ -1003,11 +1003,11 @@ namespace DoFTools
         cell->get_mg_dof_indices(dof_indices);
 
         for (std::vector<types::global_dof_index>::iterator it=dof_indices.begin();
-            it!=dof_indices.end();
-            ++it)
+             it!=dof_indices.end();
+             ++it)
           if (!dof_set.is_element(*it))
             global_dof_indices.insert(*it);
-        }
+      }
 
     dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
 
index 44f3faf42ca6d928e1ce79c7a4db0023e9e63191..81c8df6ac72e4cddbf2ed209f34d5edfc6451227 100644 (file)
@@ -1513,7 +1513,7 @@ namespace MGTools
       {
         // do not look at artificial cells
         if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id
-          && cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+            && cell->level_subdomain_id()==numbers::artificial_subdomain_id)
           continue;
 
         bool has_coarser_neighbor = false;
@@ -1531,13 +1531,13 @@ namespace MGTools
 
                 // only process cell pairs if one of them is mine
                 if (mg_dof_handler.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id
-                    && 
+                    &&
                     neighbor->level_subdomain_id()==numbers::artificial_subdomain_id)
 //                    neighbor->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain()
-//                   && 
+//                   &&
 //                   cell->level_subdomain_id()!=mg_dof_handler.get_tria().locally_owned_subdomain())
                   continue;
-                
+
                 // Do refinement face
                 // from the coarse side
                 if (neighbor->level() < cell->level())
index c237e01cc0c4922747617e5fee844b63992c58fb..3e05337ae759e7e3e418ee17a28073568fc9c982 100644 (file)
@@ -284,7 +284,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
   // We keep track in the bitfield dof_touched which global dof has
   // been processed already (on the current level). This is the same as
   // the multigrid running in serial.
-  
+
   struct dof_pair
   {
     unsigned int level;
@@ -293,7 +293,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
 
     dof_pair(unsigned int level, unsigned int global_dof_index, unsigned int level_dof_index)
       :
-        level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
+      level(level), global_dof_index(global_dof_index), level_dof_index(level_dof_index)
     {}
 
     dof_pair()
@@ -303,7 +303,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
   // map cpu_index -> vector of data
   // that will be copied into copy_indices_level_mine
   std::vector<dof_pair> send_data_temp;
-  
+
   copy_indices.resize(n_levels);
   copy_indices_global_mine.resize(n_levels);
   copy_indices_level_mine.resize(n_levels);
@@ -355,12 +355,12 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
               if (global_mine && level_mine)
                 {
                   copy_indices[level].push_back(
-                  std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+                    std::make_pair (global_dof_indices[i], level_dof_indices[i]));
                 }
-              else if(global_mine)
+              else if (global_mine)
                 {
                   copy_indices_global_mine[level].push_back(
-                  std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+                    std::make_pair (global_dof_indices[i], level_dof_indices[i]));
 
                   //send this to the owner of the level_dof:
                   send_data_temp.push_back(dof_pair(level, global_dof_indices[i], level_dof_indices[i]));
@@ -374,7 +374,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
             }
         }
     }
-  
+
   const dealii::parallel::distributed::Triangulation<dim,spacedim> *tria =
     (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
      (&mg_dof.get_tria()));
@@ -386,6 +386,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
       // neighbors, so we communicate with every other process. Searching the
       // owner for every single DoF becomes quite inefficient. Please fix
       // this, Timo.
+
       std::vector<unsigned int> neighbors;
       std::map<int, std::vector<dof_pair> > send_data;
 
@@ -394,13 +395,13 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
         // come from Triangulation
         int n_proc = Utilities::MPI::n_mpi_processes(tria->get_communicator());
         int myid = tria->locally_owned_subdomain();
-        for (unsigned int i=0;i<n_proc;++i)
+        for (unsigned int i=0; i<n_proc; ++i)
           if (i!=myid)
             neighbors.push_back(i);
       }
 
       // * find owners of the level dofs and insert into send_data accordingly
-      for(typename std::vector<dof_pair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
+      for (typename std::vector<dof_pair>::iterator dofpair=send_data_temp.begin(); dofpair != send_data_temp.end(); ++dofpair)
         {
           for (std::vector<unsigned int>::iterator it = neighbors.begin(); it != neighbors.end(); ++it)
             {
@@ -419,7 +420,7 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
           {
             requests.push_back(MPI_Request());
             unsigned int dest = *it;
-            std::vector<dof_pair> & data = send_data[dest];
+            std::vector<dof_pair> &data = send_data[dest];
             if (data.size())
               MPI_Isend(&data[0], data.size()*sizeof(data[0]), MPI_BYTE, dest, 71, tria->get_communicator(), &*requests.rbegin());
             else
@@ -440,8 +441,8 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
             if (len==0)
               {
                 int err = MPI_Recv(NULL, 0, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                                       tria->get_communicator(), &status);
-                Assert(err==MPI_SUCCESS, ExcInternalError());
+                                   tria->get_communicator(), &status);
+                AssertThrow(err==MPI_SUCCESS, ExcInternalError());
                 continue;
               }
 
@@ -451,14 +452,14 @@ void MGTransferPrebuilt<VECTOR>::build_matrices (
 
             void *ptr = &receive[0];
             int err = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
-                                   tria->get_communicator(), &status);
-            Assert(err==MPI_SUCCESS, ExcInternalError());
+                               tria->get_communicator(), &status);
+            AssertThrow(err==MPI_SUCCESS, ExcInternalError());
 
             for (unsigned int i=0; i<receive.size(); ++i)
               {
                 copy_indices_level_mine[receive[i].level].push_back(
-                      std::pair<unsigned int, unsigned int> (receive[i].global_dof_index, receive[i].level_dof_index)
-                      );
+                  std::pair<unsigned int, unsigned int> (receive[i].global_dof_index, receive[i].level_dof_index)
+                );
               }
           }
       }
index b9d7be377e664a7c9b58898a77c6570086a22c6d..59c2ab358f77a2346389f99db8b767ef6321a37e 100644 (file)
 
 using namespace std;
 
-  std::string id_to_string(const CellId &id)
-  {
-    std::ostringstream ss;
-    ss << id;
-    return ss.str();
-  }
+std::string id_to_string(const CellId &id)
+{
+  std::ostringstream ss;
+  ss << id;
+  return ss.str();
+}
 
 template <int dim>
 void setup_tria(parallel::distributed::Triangulation<dim> &tr)
 {
   GridGenerator::hyper_cube(tr);
   tr.refine_global(2);
-  
+
   for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
        cell != tr.end(); ++cell)
     {
       if (id_to_string(cell->id()) == "0_2:11")
-       cell->set_refine_flag();
+        cell->set_refine_flag();
     }
   tr.execute_coarsening_and_refinement();
 }
@@ -83,49 +83,50 @@ void check_fe(FiniteElement<dim> &fe)
   dofh.distribute_mg_dofs(fe);
 
   MGConstrainedDoFs                    mg_constrained_dofs_ref;
-  { // reorder
-  parallel::distributed::Triangulation<dim> tr(MPI_COMM_SELF,
-                                               Triangulation<dim>::none,
-                                               parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
-  setup_tria(tr);
-
-  DoFHandler<dim> dofhref(tr);
-  dofhref.distribute_dofs(fe);
-  dofhref.distribute_mg_dofs(fe);
+  {
+    // reorder
+    parallel::distributed::Triangulation<dim> tr(MPI_COMM_SELF,
+                                                 Triangulation<dim>::none,
+                                                 parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+    setup_tria(tr);
+
+    DoFHandler<dim> dofhref(tr);
+    dofhref.distribute_dofs(fe);
+    dofhref.distribute_mg_dofs(fe);
+
+    //std::map<std::string,std::vector<types::global_dof_index> > dofmap;
+    std::map<std::string,std::vector<types::global_dof_index> > mgdofmap;
+
+    for (typename DoFHandler<dim>::level_cell_iterator cell = dofhref.begin();
+         cell != dofhref.end(); ++cell)
+      {
+        if (!cell->is_locally_owned_on_level())
+          continue;
+
+        std::vector<types::global_dof_index> &d = mgdofmap[id_to_string(cell->id())];
+        d.resize(fe.dofs_per_cell);
+        cell->get_mg_dof_indices(d);
+      }
+
+    for (typename DoFHandler<dim>::level_cell_iterator cell = dofh.begin();
+         cell != dofh.end(); ++cell)
+      {
+        if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
+          continue;
+
+        std::vector<types::global_dof_index> &renumbered = mgdofmap[id_to_string(cell->id())];
+        cell->set_mg_dof_indices(renumbered);
+        cell->update_cell_dof_indices_cache();
+      }
+
+    typename FunctionMap<dim>::type      dirichlet_boundary;
+    ZeroFunction<dim>                    homogeneous_dirichlet_bc (1);
+    dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
+    mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary);
+  }
 
-  //std::map<std::string,std::vector<types::global_dof_index> > dofmap;
-  std::map<std::string,std::vector<types::global_dof_index> > mgdofmap;
 
-  for (typename DoFHandler<dim>::level_cell_iterator cell = dofhref.begin();
-       cell != dofhref.end(); ++cell)
-    {
-      if (!cell->is_locally_owned_on_level())
-       continue;
 
-      std::vector<types::global_dof_index> &d = mgdofmap[id_to_string(cell->id())];
-      d.resize(fe.dofs_per_cell);
-      cell->get_mg_dof_indices(d);
-    }
-
-  for (typename DoFHandler<dim>::level_cell_iterator cell = dofh.begin();
-       cell != dofh.end(); ++cell)
-    {
-      if (cell->level_subdomain_id()==numbers::artificial_subdomain_id)
-       continue;
-      
-      std::vector<types::global_dof_index> &renumbered = mgdofmap[id_to_string(cell->id())];
-      cell->set_mg_dof_indices(renumbered);
-      cell->update_cell_dof_indices_cache();
-    }
-  
-  typename FunctionMap<dim>::type      dirichlet_boundary;
-  ZeroFunction<dim>                    homogeneous_dirichlet_bc (1);
-  dirichlet_boundary[0] = &homogeneous_dirichlet_bc;
-  mg_constrained_dofs_ref.initialize(dofhref, dirichlet_boundary);
-  }
-  
-  
-  
   MGConstrainedDoFs                    mg_constrained_dofs;
 
   typename FunctionMap<dim>::type      dirichlet_boundary;
@@ -141,25 +142,25 @@ void check_fe(FiniteElement<dim> &fe)
       IndexSet rei = mg_constrained_dofs.get_refinement_edge_indices (level);
       deallog << "get_refinement_edge_indices:" << std::endl;
       rei.print(deallog);
-      
+
       IndexSet bi = mg_constrained_dofs.get_boundary_indices (level);
       deallog << "get_boundary_indices:" << std::endl;
       bi.print(deallog);
 
       IndexSet relevant;
       DoFTools::extract_locally_relevant_mg_dofs (dofh,
-                relevant, level);
+                                                  relevant, level);
       deallog << "relevant:" << std::endl;
       relevant.print(deallog);
 
       // the indexsets should be the same when run in parallel (on the
       // relevant subset):
       deallog << ((rei == (relevant & mg_constrained_dofs_ref.get_refinement_edge_indices(level)))
-                 ?"ok ":"FAIL ")
-             << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level)))
-                 ?"ok ":"FAIL ")
-             << std::endl;
-      
+                  ?"ok ":"FAIL ")
+              << ((bi == (relevant & mg_constrained_dofs_ref.get_boundary_indices(level)))
+                  ?"ok ":"FAIL ")
+              << std::endl;
+
 
     }
 }
@@ -180,7 +181,7 @@ void check()
 }
 
 int main(int argc, char *argv[])
-{ 
+{
   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
   MPILogInitAll log;
 
index f8a53a58f8d2db044b0a771ba4018f82446b7133..7912c169e52eb0646e5fb7e8fb43a1cdb59ca199 100644 (file)
 
 using namespace std;
 
-  std::string id_to_string(const CellId &id)
-  {
-    std::ostringstream ss;
-    ss << id;
-    return ss.str();
-  }
+std::string id_to_string(const CellId &id)
+{
+  std::ostringstream ss;
+  ss << id;
+  return ss.str();
+}
 
 template <int dim>
 void setup_tria(parallel::distributed::Triangulation<dim> &tr)
 {
   GridGenerator::hyper_cube(tr);
   tr.refine_global(2);
-  
+
   for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
        cell != tr.end(); ++cell)
     {
       if (id_to_string(cell->id()) == "0_2:03"
-         || id_to_string(cell->id()) == "0_2:00"
-         || id_to_string(cell->id()) == "0_2:01"
-         || id_to_string(cell->id()) == "0_2:12")
-       cell->set_refine_flag();
+          || id_to_string(cell->id()) == "0_2:00"
+          || id_to_string(cell->id()) == "0_2:01"
+          || id_to_string(cell->id()) == "0_2:12")
+        cell->set_refine_flag();
     }
   tr.execute_coarsening_and_refinement();
   for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
        cell != tr.end(); ++cell)
     {
       if (id_to_string(cell->id()) == "0_3:032"
-      || id_to_string(cell->id()) == "0_3:000")
-       cell->set_refine_flag();
+          || id_to_string(cell->id()) == "0_3:000")
+        cell->set_refine_flag();
     }
   tr.execute_coarsening_and_refinement();
-  
+
 
   for (typename parallel::distributed::Triangulation<dim>::cell_iterator cell = tr.begin();
        cell != tr.end(); ++cell)
     {
       deallog << "cell=" << cell->id()
-             << " level_subdomain_id=" << cell->level_subdomain_id()
-             << std::endl;
+              << " level_subdomain_id=" << cell->level_subdomain_id()
+              << std::endl;
     }
 }
 
@@ -103,7 +103,7 @@ void check_fe(FiniteElement<dim> &fe)
 {
   deallog << fe.get_name() << std::endl;
 
-    parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+  parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
                                                Triangulation<dim>::none,
                                                parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
   setup_tria(tr);
@@ -113,14 +113,14 @@ void check_fe(FiniteElement<dim> &fe)
       DataOut<dim> data_out;
       Vector<float> subdomain (tr.n_active_cells());
       for (unsigned int i=0; i<subdomain.size(); ++i)
-       subdomain(i) = tr.locally_owned_subdomain();
+        subdomain(i) = tr.locally_owned_subdomain();
       data_out.attach_triangulation (tr);
       data_out.add_data_vector (subdomain, "subdomain");
       data_out.build_patches (0);
       const std::string filename = ("solution." +
-                                   Utilities::int_to_string
-                                   (tr.locally_owned_subdomain(), 4) +
-                                   ".vtu");
+                                    Utilities::int_to_string
+                                    (tr.locally_owned_subdomain(), 4) +
+                                    ".vtu");
       std::ofstream output (filename.c_str());
       data_out.write_vtu (output);
     }
@@ -136,7 +136,7 @@ void check_fe(FiniteElement<dim> &fe)
   ConstraintMatrix hanging_node_constraints;
   IndexSet locally_relevant_set;
   DoFTools::extract_locally_relevant_dofs (dofh,
-                                             locally_relevant_set);
+                                           locally_relevant_set);
   hanging_node_constraints.reinit (locally_relevant_set);
   DoFTools::make_hanging_node_constraints (dofh, hanging_node_constraints);
   hanging_node_constraints.close();
@@ -149,14 +149,14 @@ void check_fe(FiniteElement<dim> &fe)
   for (unsigned int level=u.min_level(); level<=u.max_level(); ++level)
     {
       u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
-      for (unsigned int i=0;i<dofh.locally_owned_mg_dofs(level).n_elements();++i)
-       {
-         unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
-         u[level][index] = 1.0;//1000+level*100+index;
-       }
+      for (unsigned int i=0; i<dofh.locally_owned_mg_dofs(level).n_elements(); ++i)
+        {
+          unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
+          u[level][index] = 1.0;//1000+level*100+index;
+        }
       u[level].compress(VectorOperation::insert);
     }
-  
+
   vector_t v;
   v.reinit(dofh.locally_owned_dofs(), MPI_COMM_WORLD);
   v = 0.;
@@ -164,11 +164,11 @@ void check_fe(FiniteElement<dim> &fe)
   hanging_node_constraints.distribute(v);
 
   {
-    for (unsigned int i=0;i<dofh.locally_owned_dofs().n_elements();++i)
+    for (unsigned int i=0; i<dofh.locally_owned_dofs().n_elements(); ++i)
       {
-       unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
-       if (abs(v[index] - 1.0)>1e-5)
-         deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl;
+        unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
+        if (abs(v[index] - 1.0)>1e-5)
+          deallog << "ERROR: index=" << index << " is equal to " << v[index] << std::endl;
       }
   }
   deallog << "ok" << std::endl;
@@ -190,7 +190,7 @@ void check()
 }
 
 int main(int argc, char *argv[])
-{ 
+{
   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
   MPILogInitAll log;
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.