@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
}
// Matrices
--- /dev/null
+Improved: MGTransferPrebuilt now supports
+PETScWrappers::MPI::Vector and
+PETScWrappers::MPI::SparseMatrix.
+<br>
+(Alexander Knieps, 2018/05/21)
#include <deal.II/lac/block_sparsity_pattern.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/vector_memory.h>
typedef ::dealii::SparsityPattern Sparsity;
typedef ::dealii::SparseMatrix<typename VectorType::value_type> Matrix;
+ static const bool requires_distributed_sparsity_pattern = false;
+
template <typename SparsityPatternType, typename DoFHandlerType>
static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const SparsityPatternType &sp, const DoFHandlerType &)
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
+ static const bool requires_distributed_sparsity_pattern = false;
+
template <typename SparsityPatternType, typename DoFHandlerType>
static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
+ static const bool requires_distributed_sparsity_pattern = false;
+
template <typename SparsityPatternType, typename DoFHandlerType>
static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
+ static const bool requires_distributed_sparsity_pattern = false;
+
template <typename SparsityPatternType, typename DoFHandlerType>
static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh)
{
typedef ::dealii::SparsityPattern Sparsity;
typedef ::dealii::SparseMatrix<Number> Matrix;
+ static const bool requires_distributed_sparsity_pattern = false;
+
template <typename SparsityPatternType, typename DoFHandlerType>
static void reinit(Matrix &, Sparsity &, int, const SparsityPatternType &, const DoFHandlerType &)
{
};
#endif
+
+#ifdef DEAL_II_WITH_PETSC
+ template <>
+ struct MatrixSelector<dealii::PETScWrappers::MPI::Vector>
+ {
+ typedef ::dealii::DynamicSparsityPattern Sparsity;
+ typedef ::dealii::PETScWrappers::MPI::SparseMatrix Matrix;
+
+ static const bool requires_distributed_sparsity_pattern = true;
+
+ template <typename SparsityPatternType, typename DoFHandlerType>
+ static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, const DoFHandlerType &dh)
+ {
+ const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension> *dist_tria =
+ dynamic_cast<const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension>*>
+ (&(dh.get_triangulation()));
+ MPI_Comm communicator = dist_tria != nullptr ?
+ dist_tria->get_communicator() :
+ MPI_COMM_SELF;
+ // Reinit PETSc matrix
+ matrix.reinit(dh.locally_owned_mg_dofs(level+1),
+ dh.locally_owned_mg_dofs(level),
+ sp, communicator);
+ }
+
+ };
+#endif
}
/*
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/vector_view.h>
#include <deal.II/grid/tria_iterator.h>
}
}
#endif
+
+#ifdef DEAL_II_WITH_PETSC
+ /**
+ * Adjust vectors on all levels to correct size. Here, we just count the
+ * numbers of degrees of freedom on each level and @p reinit each level
+ * vector to this length.
+ */
+ template <int dim, int spacedim>
+ void
+ reinit_vector (const dealii::DoFHandler<dim,spacedim> &mg_dof,
+ const std::vector<unsigned int> &,
+ MGLevelObject<PETScWrappers::MPI::Vector> &v)
+ {
+ const dealii::parallel::Triangulation<dim,spacedim> *tria =
+ (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
+ (&mg_dof.get_triangulation()));
+ AssertThrow(tria!=nullptr, ExcMessage("multigrid with parallel PETSc vectors only works with a parallel Triangulation!"));
+
+ for (unsigned int level=v.min_level();
+ level<=v.max_level(); ++level)
+ {
+ v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria->get_communicator());
+ }
+ }
+#endif
}
//if (preset_nonzero_locations == true)
if (local_rows.n_elements()>0)
{
- Assert(local_columns.n_elements()>0, ExcInternalError());
// MatMPIAIJSetPreallocationCSR
// can be used to allocate the sparsity
// pattern of a matrix
MGLevelGlobalTransfer<TrilinosWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, TrilinosWrappers::MPI::Vector&,
const MGLevelObject<TrilinosWrappers::MPI::Vector>&) const;
#endif
+
+#ifdef DEAL_II_WITH_PETSC
+ template void
+ MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_to_mg (
+ const DoFHandler<deal_II_dimension>&, MGLevelObject<PETScWrappers::MPI::Vector>&, const PETScWrappers::MPI::Vector&) const;
+ template void
+ MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_from_mg (const DoFHandler<deal_II_dimension>&, PETScWrappers::MPI::Vector&,
+ const MGLevelObject<PETScWrappers::MPI::Vector>&) const;
+ template void
+ MGLevelGlobalTransfer<PETScWrappers::MPI::Vector>::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, PETScWrappers::MPI::Vector&,
+ const MGLevelObject<PETScWrappers::MPI::Vector>&) const;
+#endif
}
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/block_sparse_matrix.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/dofs/dof_tools.h>
}
}
+#ifdef DEAL_II_WITH_MPI
+ if (internal::MatrixSelector<VectorType>::requires_distributed_sparsity_pattern)
+ {
+ // Since PETSc matrices do not offer the functionality to fill up in-
+ // complete sparsity patterns on their own, the sparsity pattern must be
+ // manually distributed.
+
+ // Retrieve communicator from triangulation if it is parallel
+ const parallel::Triangulation<dim,spacedim> *dist_tria =
+ dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
+ (&(mg_dof.get_triangulation()));
+
+ MPI_Comm communicator = dist_tria != nullptr ?
+ dist_tria->get_communicator() :
+ MPI_COMM_SELF;
+
+ // Compute # of locally owned MG dofs / processor for distribution
+ const std::vector<::dealii::IndexSet> &locally_owned_mg_dofs_per_processor = mg_dof.locally_owned_mg_dofs_per_processor(level+1);
+ std::vector<::dealii::types::global_dof_index> n_locally_owned_mg_dofs_per_processor(locally_owned_mg_dofs_per_processor.size(), 0);
+
+ for (size_t index = 0; index < n_locally_owned_mg_dofs_per_processor.size(); ++index)
+ {
+ n_locally_owned_mg_dofs_per_processor[index] = locally_owned_mg_dofs_per_processor[index].n_elements();
+ }
+
+ // Distribute sparsity pattern
+ ::dealii::SparsityTools::distribute_sparsity_pattern(
+ dsp,
+ n_locally_owned_mg_dofs_per_processor,
+ communicator,
+ dsp.row_index_set()
+ );
+ }
+#endif
+
internal::MatrixSelector<VectorType>::reinit(*prolongation_matrices[level],
*prolongation_sparsities[level],
level,
// ---------------------------------------------------------------------
-// check mg transfer in parallel
+// check mg transfer in parallel for trilinos vectors
#include "../tests.h"
#include <deal.II/base/function.h>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2006 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// check mg transfer in parallel for PETSc vectors
+
+#include "../tests.h"
+#include <deal.II/base/function.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/block_vector.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/dofs/function_map.h>
+#include <deal.II/fe/fe_dgq.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_raviart_thomas.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/multigrid/mg_tools.h>
+#include <deal.II/multigrid/mg_transfer.h>
+#include <deal.II/multigrid/mg_constrained_dofs.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+
+#include <algorithm>
+
+using namespace std;
+
+template <int dim>
+void setup_tria(parallel::distributed::Triangulation<dim> &tr)
+{
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global(1);
+
+ for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
+ cell != tr.end(); ++cell)
+ {
+ if (cell->id().to_string() != "0_1:3")
+ cell->set_refine_flag();
+ }
+ tr.execute_coarsening_and_refinement();
+
+ for (typename parallel::distributed::Triangulation<dim>::active_cell_iterator cell = tr.begin_active();
+ cell != tr.end(); ++cell)
+ {
+ if (cell->id().to_string() == "0_2:00"
+ || cell->id().to_string() == "0_2:01"
+ || cell->id().to_string() == "0_2:02")
+ cell->set_refine_flag();
+ }
+ tr.execute_coarsening_and_refinement();
+
+
+ for (typename parallel::distributed::Triangulation<dim>::cell_iterator cell = tr.begin();
+ cell != tr.end(); ++cell)
+ {
+ deallog << "cell=" << cell->id()
+ << " level_subdomain_id=" << cell->level_subdomain_id()
+ << std::endl;
+ }
+}
+
+
+
+
+
+
+
+
+
+template <int dim>
+void check_fe(FiniteElement<dim> &fe)
+{
+ deallog << fe.get_name() << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ Triangulation<dim>::limit_level_difference_at_vertices,
+ parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+ setup_tria(tr);
+
+ if (false)
+ {
+ DataOut<dim> data_out;
+ Vector<float> subdomain (tr.n_active_cells());
+ for (unsigned int i=0; i<subdomain.size(); ++i)
+ subdomain(i) = tr.locally_owned_subdomain();
+ data_out.attach_triangulation (tr);
+ data_out.add_data_vector (subdomain, "subdomain");
+ data_out.build_patches (0);
+ const std::string filename = ("solution." +
+ Utilities::int_to_string
+ (tr.locally_owned_subdomain(), 4) +
+ ".vtu");
+ std::ofstream output (filename.c_str());
+ data_out.write_vtu (output);
+
+ }
+
+
+ Functions::ZeroFunction<dim> zero;
+ typename FunctionMap<dim>::type fmap;
+ fmap.insert(std::make_pair(0, &zero));
+
+ DoFHandler<dim> dofh(tr);
+ dofh.distribute_dofs(fe);
+ dofh.distribute_mg_dofs(fe);
+ typedef PETScWrappers::MPI::Vector vector_t;
+ {
+
+
+
+ }
+ MGTransferPrebuilt<vector_t> transfer;
+ transfer.build_matrices(dofh);
+ transfer.print_indices(deallog.get_file_stream());
+
+ MGLevelObject<vector_t> u(0, tr.n_global_levels()-1);
+ for (unsigned int level=u.min_level(); level<=u.max_level(); ++level)
+ {
+ u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
+ for (unsigned int i=0; i<dofh.locally_owned_mg_dofs(level).n_elements(); ++i)
+ {
+ unsigned int index = dofh.locally_owned_mg_dofs(level).nth_index_in_set(i);
+ u[level][index] = 1;//1000+level*100+index;
+ }
+ u[level].compress(VectorOperation::insert);
+ }
+
+ vector_t v;
+ v.reinit(dofh.locally_owned_dofs(), MPI_COMM_WORLD);
+ v = 0.;
+ transfer.copy_from_mg(dofh, v, u);
+
+ {
+ for (unsigned int i=0; i<dofh.locally_owned_dofs().n_elements(); ++i)
+ {
+ unsigned int index = dofh.locally_owned_dofs().nth_index_in_set(i);
+ deallog << v[index] << " ";
+ }
+ }
+ //v.print(deallog.get_file_stream());
+ deallog << "ok" << std::endl;
+}
+
+
+template <int dim>
+void check()
+{
+ FE_Q<dim> q1(1);
+ FE_Q<dim> q2(2);
+// FE_DGQ<dim> dq1(1);
+
+ FESystem<dim> s1(q1, 2, q2,1);
+
+ check_fe(q1);
+ // check_fe(q2);
+ //check_fe(s1);
+}
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+ check<2> ();
+ //check<3> ();
+}
--- /dev/null
+
+DEAL:0::FE_Q<2>(1)
+DEAL:0::cell=0_0: level_subdomain_id=0
+DEAL:0::cell=0_1:0 level_subdomain_id=0
+DEAL:0::cell=0_1:1 level_subdomain_id=0
+DEAL:0::cell=0_1:2 level_subdomain_id=0
+DEAL:0::cell=0_1:3 level_subdomain_id=0
+DEAL:0::cell=0_2:00 level_subdomain_id=0
+DEAL:0::cell=0_2:01 level_subdomain_id=0
+DEAL:0::cell=0_2:02 level_subdomain_id=0
+DEAL:0::cell=0_2:03 level_subdomain_id=0
+DEAL:0::cell=0_2:10 level_subdomain_id=0
+DEAL:0::cell=0_2:11 level_subdomain_id=0
+DEAL:0::cell=0_2:12 level_subdomain_id=0
+DEAL:0::cell=0_2:13 level_subdomain_id=0
+DEAL:0::cell=0_2:20 level_subdomain_id=0
+DEAL:0::cell=0_2:21 level_subdomain_id=0
+DEAL:0::cell=0_2:22 level_subdomain_id=0
+DEAL:0::cell=0_2:23 level_subdomain_id=0
+DEAL:0::cell=0_3:000 level_subdomain_id=0
+DEAL:0::cell=0_3:001 level_subdomain_id=0
+DEAL:0::cell=0_3:002 level_subdomain_id=0
+DEAL:0::cell=0_3:003 level_subdomain_id=0
+DEAL:0::cell=0_3:010 level_subdomain_id=0
+DEAL:0::cell=0_3:011 level_subdomain_id=0
+DEAL:0::cell=0_3:012 level_subdomain_id=0
+DEAL:0::cell=0_3:013 level_subdomain_id=0
+DEAL:0::cell=0_3:020 level_subdomain_id=0
+DEAL:0::cell=0_3:021 level_subdomain_id=0
+DEAL:0::cell=0_3:022 level_subdomain_id=0
+DEAL:0::cell=0_3:023 level_subdomain_id=0
+copy_indices[1] 0 3
+copy_indices[1] 1 5
+copy_indices[1] 2 7
+copy_indices[1] 3 8
+copy_indices[2] 0 8
+copy_indices[2] 1 14
+copy_indices[2] 2 20
+copy_indices[2] 4 3
+copy_indices[2] 5 5
+copy_indices[2] 6 7
+copy_indices[2] 7 4
+copy_indices[2] 8 9
+copy_indices[2] 9 10
+copy_indices[2] 10 11
+copy_indices[2] 11 12
+copy_indices[2] 12 13
+copy_indices[2] 13 6
+copy_indices[2] 14 15
+copy_indices[2] 15 16
+copy_indices[2] 16 17
+copy_indices[2] 17 18
+copy_indices[2] 18 19
+copy_indices[3] 4 8
+copy_indices[3] 5 14
+copy_indices[3] 6 20
+copy_indices[3] 7 11
+copy_indices[3] 13 18
+copy_indices[3] 19 0
+copy_indices[3] 20 1
+copy_indices[3] 21 2
+copy_indices[3] 22 3
+copy_indices[3] 23 4
+copy_indices[3] 24 5
+copy_indices[3] 25 6
+copy_indices[3] 26 7
+copy_indices[3] 27 9
+copy_indices[3] 28 10
+copy_indices[3] 29 12
+copy_indices[3] 30 13
+copy_indices[3] 31 15
+copy_indices[3] 32 16
+copy_indices[3] 33 17
+copy_indices[3] 34 19
+DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
--- /dev/null
+
+DEAL:0::FE_Q<2>(1)
+DEAL:0::cell=0_0: level_subdomain_id=0
+DEAL:0::cell=0_1:0 level_subdomain_id=0
+DEAL:0::cell=0_1:1 level_subdomain_id=1
+DEAL:0::cell=0_1:2 level_subdomain_id=1
+DEAL:0::cell=0_1:3 level_subdomain_id=1
+DEAL:0::cell=0_2:00 level_subdomain_id=0
+DEAL:0::cell=0_2:01 level_subdomain_id=0
+DEAL:0::cell=0_2:02 level_subdomain_id=0
+DEAL:0::cell=0_2:03 level_subdomain_id=1
+DEAL:0::cell=0_2:10 level_subdomain_id=1
+DEAL:0::cell=0_2:11 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:12 level_subdomain_id=1
+DEAL:0::cell=0_2:13 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:20 level_subdomain_id=1
+DEAL:0::cell=0_2:21 level_subdomain_id=1
+DEAL:0::cell=0_2:22 level_subdomain_id=4294967294
+DEAL:0::cell=0_2:23 level_subdomain_id=4294967294
+DEAL:0::cell=0_3:000 level_subdomain_id=0
+DEAL:0::cell=0_3:001 level_subdomain_id=0
+DEAL:0::cell=0_3:002 level_subdomain_id=0
+DEAL:0::cell=0_3:003 level_subdomain_id=0
+DEAL:0::cell=0_3:010 level_subdomain_id=0
+DEAL:0::cell=0_3:011 level_subdomain_id=0
+DEAL:0::cell=0_3:012 level_subdomain_id=0
+DEAL:0::cell=0_3:013 level_subdomain_id=0
+DEAL:0::cell=0_3:020 level_subdomain_id=0
+DEAL:0::cell=0_3:021 level_subdomain_id=0
+DEAL:0::cell=0_3:022 level_subdomain_id=0
+DEAL:0::cell=0_3:023 level_subdomain_id=0
+copy_indices[2] 8 3
+copy_indices[2] 11 4
+copy_indices[2] 14 5
+copy_indices[2] 18 6
+copy_indices[2] 20 7
+copy_indices[3] 0 0
+copy_indices[3] 1 1
+copy_indices[3] 2 2
+copy_indices[3] 3 3
+copy_indices[3] 4 4
+copy_indices[3] 5 5
+copy_indices[3] 6 6
+copy_indices[3] 7 7
+copy_indices[3] 8 8
+copy_indices[3] 9 9
+copy_indices[3] 10 10
+copy_indices[3] 11 11
+copy_indices[3] 12 12
+copy_indices[3] 13 13
+copy_indices[3] 14 14
+copy_indices[3] 15 15
+copy_indices[3] 16 16
+copy_indices[3] 17 17
+copy_indices[3] 18 18
+copy_indices[3] 19 19
+copy_indices[3] 20 20
+copy_ifrom [1] 21 3
+DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
+
+DEAL:1::FE_Q<2>(1)
+DEAL:1::cell=0_0: level_subdomain_id=0
+DEAL:1::cell=0_1:0 level_subdomain_id=0
+DEAL:1::cell=0_1:1 level_subdomain_id=1
+DEAL:1::cell=0_1:2 level_subdomain_id=1
+DEAL:1::cell=0_1:3 level_subdomain_id=1
+DEAL:1::cell=0_2:00 level_subdomain_id=0
+DEAL:1::cell=0_2:01 level_subdomain_id=0
+DEAL:1::cell=0_2:02 level_subdomain_id=0
+DEAL:1::cell=0_2:03 level_subdomain_id=1
+DEAL:1::cell=0_2:10 level_subdomain_id=1
+DEAL:1::cell=0_2:11 level_subdomain_id=1
+DEAL:1::cell=0_2:12 level_subdomain_id=1
+DEAL:1::cell=0_2:13 level_subdomain_id=1
+DEAL:1::cell=0_2:20 level_subdomain_id=1
+DEAL:1::cell=0_2:21 level_subdomain_id=1
+DEAL:1::cell=0_2:22 level_subdomain_id=1
+DEAL:1::cell=0_2:23 level_subdomain_id=1
+DEAL:1::cell=0_3:000 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:001 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:002 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:003 level_subdomain_id=0
+DEAL:1::cell=0_3:010 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:011 level_subdomain_id=0
+DEAL:1::cell=0_3:012 level_subdomain_id=0
+DEAL:1::cell=0_3:013 level_subdomain_id=0
+DEAL:1::cell=0_3:020 level_subdomain_id=4294967294
+DEAL:1::cell=0_3:021 level_subdomain_id=0
+DEAL:1::cell=0_3:022 level_subdomain_id=0
+DEAL:1::cell=0_3:023 level_subdomain_id=0
+copy_indices[1] 22 5
+copy_indices[1] 23 7
+copy_indices[1] 24 8
+copy_indices[2] 21 8
+copy_indices[2] 22 14
+copy_indices[2] 23 20
+copy_indices[2] 25 9
+copy_indices[2] 26 10
+copy_indices[2] 27 11
+copy_indices[2] 28 12
+copy_indices[2] 29 13
+copy_indices[2] 30 15
+copy_indices[2] 31 16
+copy_indices[2] 32 17
+copy_indices[2] 33 18
+copy_indices[2] 34 19
+copy_ito [1] 21 3
+DEAL:1::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok
+