--- /dev/null
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/grid_refinement.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/matrix_tools.h>
+#include <fstream>
+#include <deal.II/base/conditional_ostream.h>
+#include <cmath>
+#include <deal.II/lac/generic_linear_algebra.h>
+
+#include "../tests.h"
+
+// test VectorTools::interpolate_to_different_mesh in parallel
+
+using namespace dealii;
+
+namespace LA
+{
+ using namespace dealii::LinearAlgebraTrilinos;
+}
+
+template <int dim>
+class SomeFunction : public Function<dim>
+{
+public:
+ double value (const Point<dim> &p,
+ const unsigned int) const
+ {
+ return 1+p(0)*p(0);
+ }
+};
+
+template<int dim>
+void setup(DoFHandler<dim> & dh,
+ FE_Q<dim> & fe,
+ LA::MPI::Vector & vec,
+ LA::MPI::Vector & lr_vec)
+{
+ dh.distribute_dofs (fe);
+ vec.reinit(dh.locally_owned_dofs(), MPI_COMM_WORLD);
+ IndexSet locally_relevant;
+ DoFTools::extract_locally_relevant_dofs (dh, locally_relevant);
+ lr_vec.reinit(locally_relevant, MPI_COMM_WORLD);
+}
+
+template<int dim>
+void output(DoFHandler<dim> & dh, LA::MPI::Vector & v, unsigned int loop, const std::string filename_)
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ DataOut<dim> data_out;
+ data_out.add_data_vector (dh, v, "1");
+ data_out.build_patches (1);
+ std::ostringstream filename;
+ filename << filename_
+ << Utilities::int_to_string (loop, 2)
+ << "."
+ << Utilities::int_to_string (myid,2)
+ << ".vtu";
+
+ std::ofstream output (filename.str().c_str());
+ data_out.write_vtu (output);
+ if (myid)
+ {
+ std::vector<std::string> filenames;
+ for (unsigned int i=0; i<Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
+ filenames.push_back (filename_ +
+ Utilities::int_to_string (loop, 2) +
+ "." +
+ Utilities::int_to_string(i, 2) +
+ ".vtu");
+ const std::string
+ pvtu_master_filename = (filename_ +
+ Utilities::int_to_string (loop, 2) +
+ ".pvtu");
+ std::ofstream pvtu_master (pvtu_master_filename.c_str());
+ data_out.write_pvtu_record (pvtu_master, filenames);
+ }
+}
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim,dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(2);
+ parallel::distributed::Triangulation<dim> tr2(MPI_COMM_WORLD,
+ dealii::Triangulation<dim,dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::hyper_cube(tr2);
+ tr2.refine_global(2);
+
+ FE_Q<dim> fe(1);
+ DoFHandler<dim> dh(tr);
+ DoFHandler<dim> dh2(tr2);
+
+ SomeFunction<dim> func;
+
+
+ for (unsigned int loop = 0; loop < 5; ++loop)
+ {
+ // randomly refine:
+ std::vector<bool> r_flags(tr.n_active_cells()*dim, false);
+ std::vector<bool> c_flags(tr.n_active_cells(), false);
+
+ for (unsigned int i=0;i<c_flags.size();++i)
+ {
+ int roll = Testing::rand()%4;
+ if (roll >= 2)
+ {
+ for (unsigned int j=0;j<dim;++j)
+ r_flags[i*dim+j] = true;
+ }
+ else if (roll == 1)
+ c_flags[i] = true;
+ }
+
+ tr.load_coarsen_flags(c_flags);
+ tr.load_refine_flags(r_flags);
+
+ tr.execute_coarsening_and_refinement ();
+ deallog << "locally owned cells: " << tr.n_locally_owned_active_cells()
+ << " / "
+ << tr.n_global_active_cells()
+ << std::endl;
+
+
+ LA::MPI::Vector vec1;
+ LA::MPI::Vector lr_vec1;
+ setup(dh, fe, vec1, lr_vec1);
+
+ LA::MPI::Vector vec2;
+ LA::MPI::Vector lr_vec2;
+ setup(dh2, fe, vec2, lr_vec2);
+
+ // interpolate func on old mesh:
+ VectorTools::interpolate(dh2, func, vec2);
+ lr_vec2 = vec2;
+
+ // interpolate from vec2 to vec1
+ VectorTools::interpolate_to_different_mesh(dh2, lr_vec2, dh, vec1);
+ lr_vec1 = vec1;
+
+ {
+ Vector<double> local_errors (tr.n_active_cells());
+ VectorTools::integrate_difference (dh, lr_vec1, func, local_errors,
+ QGauss<dim>(3), VectorTools::L2_norm);
+ double total_local_error = local_errors.l2_norm();
+ const double total_global_error
+ = std::sqrt (Utilities::MPI::sum (total_local_error * total_local_error, MPI_COMM_WORLD));
+ if (myid == 0)
+ deallog << "err: " << total_global_error << std::endl;
+ }
+
+ //output(dh, lr_vec1, loop, "solutionA-");
+ //output(dh2, lr_vec2, loop, "solutionB-");
+
+ // also update tr2 to be the same as tr1
+ tr2.load_coarsen_flags(c_flags);
+ tr2.load_refine_flags(r_flags);
+ tr2.execute_coarsening_and_refinement ();
+
+ // repartition both in the same way
+ tr.repartition();
+ tr2.repartition();
+
+ // checks:
+ const unsigned int checksum = tr.get_checksum ();
+ const unsigned int checksum2 = tr2.get_checksum ();
+ if (myid == 0)
+ deallog << "Checksum: "
+ << checksum << " " << checksum2
+ << std::endl;
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+ test<2>();
+}
--- /dev/null
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_boundary_lib.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/grid_refinement.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/lac/solver_control.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/lac/compressed_simple_sparsity_pattern.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/matrix_tools.h>
+#include <deal.II/numerics/error_estimator.h>
+#include <deal.II/distributed/solution_transfer.h>
+#include <fstream>
+#include <deal.II/base/conditional_ostream.h>
+#include <cmath>
+#include <deal.II/lac/generic_linear_algebra.h>
+#include <deal.II/lac/sparsity_tools.h>
+
+#include "../tests.h"
+
+// test VectorTools::interpolate_to_different_mesh in parallel
+// this is a slightly modified version from the example by Sam Cox from the mailing
+
+using namespace dealii;
+
+namespace LA
+{
+ using namespace dealii::LinearAlgebraTrilinos;
+}
+
+template <int dim>
+class SeventhProblem
+{
+public:
+ SeventhProblem (unsigned int prob_number);
+ ~SeventhProblem ();
+ void run (unsigned int cycle);
+private:
+ void setup_system ();
+ void setup_second_system ();
+ void assemble_system ();
+ void solve ();
+ MPI_Comm mpi_communicator;
+ parallel::distributed::Triangulation<2>::Settings settings;
+ parallel::distributed::Triangulation<dim> triangulation;
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+ ConstraintMatrix constraints;
+ TrilinosWrappers::SparseMatrix system_matrix;
+ TrilinosWrappers::MPI::Vector locally_relevant_solution;
+ TrilinosWrappers::MPI::Vector interpolated_locally_relevant_solution;
+ TrilinosWrappers::MPI::Vector system_rhs;
+ parallel::distributed::Triangulation<dim> second_triangulation;
+ DoFHandler<dim> second_dof_handler;
+ FE_Q<dim> second_fe;
+ IndexSet second_locally_owned_dofs;
+ IndexSet second_locally_relevant_dofs;
+ TrilinosWrappers::MPI::Vector second_locally_relevant_solution;
+ ConditionalOStream pcout;
+ unsigned int prob_number;
+};
+template <int dim>
+SeventhProblem<dim>::SeventhProblem (unsigned int prob_number)
+:
+mpi_communicator (MPI_COMM_WORLD),
+settings (parallel::distributed::Triangulation<2,2>::no_automatic_repartitioning),
+triangulation (mpi_communicator,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening),
+ settings),
+dof_handler (triangulation),
+fe (2),
+second_triangulation (mpi_communicator,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening),
+ settings),
+second_dof_handler (second_triangulation),
+second_fe (2),
+pcout (deallog.get_file_stream(),
+ (Utilities::MPI::this_mpi_process(mpi_communicator)
+ == 0)),
+prob_number(prob_number)
+{}
+
+template <int dim>
+SeventhProblem<dim>::~SeventhProblem ()
+{
+ dof_handler.clear ();
+ second_dof_handler.clear ();
+}
+
+template <int dim>
+void SeventhProblem<dim>::setup_system ()
+{
+ dof_handler.distribute_dofs (fe);
+ locally_owned_dofs = dof_handler.locally_owned_dofs ();
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+ locally_relevant_solution.reinit (locally_owned_dofs,
+ locally_relevant_dofs, mpi_communicator);
+ system_rhs.reinit (locally_owned_dofs, mpi_communicator);
+ system_rhs = 0;
+ constraints.clear ();
+ constraints.reinit (locally_relevant_dofs);
+ DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ ZeroFunction<dim>(),
+ constraints);
+ constraints.close ();
+ CompressedSimpleSparsityPattern csp (locally_relevant_dofs);
+ DoFTools::make_sparsity_pattern (dof_handler, csp,
+ constraints, false);
+ SparsityTools::distribute_sparsity_pattern (csp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ mpi_communicator,
+ locally_relevant_dofs);
+ system_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ csp,
+ mpi_communicator);
+}
+
+template <int dim>
+void SeventhProblem<dim>::setup_second_system ()
+{
+ second_dof_handler.distribute_dofs (fe);
+ second_locally_owned_dofs = second_dof_handler.locally_owned_dofs ();
+ DoFTools::extract_locally_relevant_dofs (second_dof_handler,
+ second_locally_relevant_dofs);
+ second_locally_relevant_solution.reinit (second_locally_owned_dofs,
+ second_locally_relevant_dofs, mpi_communicator);
+ interpolated_locally_relevant_solution.reinit(second_locally_owned_dofs,
+ second_locally_relevant_dofs, mpi_communicator);
+}
+
+template <int dim>
+void SeventhProblem<dim>::assemble_system ()
+{
+ const QGauss<dim> quadrature_formula(3);
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points |
+ update_JxW_values);
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+ std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->is_locally_owned())
+ {
+ cell_matrix = 0;
+ cell_rhs = 0;
+ fe_values.reinit (cell);
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ {
+ const double
+ rhs_value
+ = (fe_values.quadrature_point(q_point)[1]
+ >
+ 0.5+0.25*std::sin(4.0 * numbers::PI *
+ fe_values.quadrature_point(q_point)[0])
+ ? 1 : -1);
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad(i,q_point) *
+ fe_values.shape_grad(j,q_point) *
+ fe_values.JxW(q_point));
+ cell_rhs(i) += (rhs_value *
+ fe_values.shape_value(i,q_point) *
+ fe_values.JxW(q_point));
+ }
+ }
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global (cell_matrix,
+ cell_rhs,
+ local_dof_indices,
+ system_matrix,
+ system_rhs);
+ }
+ system_matrix.compress (VectorOperation::add);
+ system_rhs.compress (VectorOperation::add);
+}
+template <int dim>
+void SeventhProblem<dim>::solve ()
+{
+ LA::MPI::Vector
+ completely_distributed_solution (locally_owned_dofs, mpi_communicator);
+ SolverControl solver_control (dof_handler.n_dofs(), 1e-12);
+ TrilinosWrappers::SolverCG solver(solver_control, mpi_communicator);
+ TrilinosWrappers::PreconditionAMG preconditioner;
+ TrilinosWrappers::PreconditionAMG::AdditionalData data;
+ preconditioner.initialize(system_matrix, data);
+ solver.solve (system_matrix, completely_distributed_solution, system_rhs,
+ preconditioner);
+ pcout << " Solved in " << solver_control.last_step()
+ << " iterations." << std::endl;
+ constraints.distribute (completely_distributed_solution);
+ locally_relevant_solution = completely_distributed_solution;
+}
+
+template <int dim>
+void SeventhProblem<dim>::run (unsigned int cycle)
+{
+ if (cycle == 0)
+ {
+ GridGenerator::hyper_cube (triangulation);
+ triangulation.refine_global (1);
+ GridGenerator::hyper_cube (second_triangulation);
+ second_triangulation.refine_global (1);
+ setup_system();
+ }
+ else
+ {
+
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+ KellyErrorEstimator<dim>::estimate (dof_handler,
+ QGauss<dim-1>(3),
+ typename FunctionMap<dim>::type(),
+ locally_relevant_solution,
+ estimated_error_per_cell);
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.5, 0.3);
+ std::vector<bool> r_flags;
+ std::vector<bool> c_flags;
+ triangulation.prepare_coarsening_and_refinement();
+ triangulation.save_refine_flags(r_flags);
+ triangulation.save_coarsen_flags(c_flags);
+
+ triangulation.execute_coarsening_and_refinement ();
+
+ setup_system();
+ pcout << " Number of active cells: "
+ << triangulation.n_global_active_cells()
+ << std::endl
+ << " Number of degrees of freedom: "
+ << dof_handler.n_dofs()
+ << std::endl;
+ assemble_system ();
+ solve ();
+
+ setup_second_system();
+ second_locally_relevant_solution = locally_relevant_solution;
+
+ VectorTools::interpolate_to_different_mesh(dof_handler, locally_relevant_solution,
+ second_dof_handler, interpolated_locally_relevant_solution);
+ second_triangulation.load_coarsen_flags(c_flags);
+ second_triangulation.load_refine_flags(r_flags);
+ second_triangulation.execute_coarsening_and_refinement();
+ }
+}
+
+void seventh_grid()
+{
+
+ ConditionalOStream pcout(deallog.get_file_stream(),
+ (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)
+ == 0));
+
+ pcout << "7th Starting" << std::endl;
+ SeventhProblem<2> lap(1);
+ const unsigned int n_cycles = 5;
+ for (unsigned int cycle=0; cycle<n_cycles; ++cycle)
+ {
+ pcout << "Cycle " << cycle << ':' << std::endl;
+ lap.run(cycle);
+ }
+ pcout << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+ deallog.depth_file(0);
+
+ seventh_grid ();
+}