* Return the partitioning of the domain space of this matrix, i.e., the
* partitioning of the vectors this matrix has to be multiplied with.
*/
- std::vector< size_type > locally_domain_sizes() const;
+ std::vector< size_type > locally_owned_domain_sizes() const;
/**
* Return the partitioning of the range space of this matrix, i.e., the
* partitioning of the vectors that are result from matrix-vector
* products.
*/
- std::vector< size_type > locally_range_sizes() const;
+ std::vector< size_type > locally_owned_range_sizes() const;
/**
* Make the clear() function in the base class visible, though it is
PETScWrappers::BlockVector &v,
bool omit_zeroing_entries)
{
- v.reinit(matrix.locally_range_sizes(), omit_zeroing_entries);
+ v.reinit(matrix.locally_owned_range_sizes(), omit_zeroing_entries);
}
template <typename Matrix>
PETScWrappers::BlockVector &v,
bool omit_zeroing_entries)
{
- v.reinit(matrix.locally_domain_sizes(), omit_zeroing_entries);
+ v.reinit(matrix.locally_owned_domain_sizes(), omit_zeroing_entries);
}
};
std::vector<BlockSparseMatrix::size_type >
BlockSparseMatrix::
- locally_domain_sizes() const
+ locally_owned_domain_sizes() const
{
std::vector< size_type > index_sets;
std::vector<BlockSparseMatrix::size_type >
BlockSparseMatrix::
- locally_range_sizes() const
+ locally_owned_range_sizes() const
{
std::vector< size_type > index_sets;
#if DEAL_II_PETSC_VERSION_LT(3,3,0)
Assert(false,ExcNotImplemented());
#else
- PetscInt n_rows, n_cols, min, max, size;
+ PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max, size;
PetscErrorCode ierr;
- IS rows, cols;
ierr = MatGetSize (matrix, &n_rows, &n_cols);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- ierr = MatGetOwnershipIS(matrix, &rows, &cols);
+ ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- ierr = ISGetMinMax(rows, &min, &max);
+ ierr = MatGetOwnershipRangeColumn(matrix, &min, &max);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- IndexSet indices(n_rows);
- indices.add_range(min, max);
+ Assert(n_loc_cols==max-min, ExcMessage("PETSc is requiring non contiguous memory allocation."));
- ierr = ISGetLocalSize(rows, &size);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- Assert(size==max-min+1, ExcMessage("PETSc is requiring non contiguous memory allocation."));
+ IndexSet indices(n_cols);
+ indices.add_range(min, max);
+ indices.compress();
return indices;
#endif
#if DEAL_II_PETSC_VERSION_LT(3,3,0)
Assert(false,ExcNotImplemented());
#else
- PetscInt n_rows, n_cols, min, max, size;
+ PetscInt n_rows, n_cols, n_loc_rows, n_loc_cols, min, max, size;
PetscErrorCode ierr;
- IS rows, cols;
ierr = MatGetSize (matrix, &n_rows, &n_cols);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- ierr = MatGetOwnershipIS(matrix, &rows, &cols);
+ ierr = MatGetLocalSize(matrix, &n_loc_rows, &n_loc_cols);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- ierr = ISGetMinMax(cols, &min, &max);
+ ierr = MatGetOwnershipRange(matrix, &min, &max);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- IndexSet indices(n_cols);
- indices.add_range(min, max);
+ Assert(n_loc_rows==max-min, ExcMessage("PETSc is requiring non contiguous memory allocation."));
- ierr = ISGetLocalSize(cols, &size);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- Assert(size==max-min+1, ExcMessage("PETSc is requiring non contiguous memory allocation."));
+ IndexSet indices(n_rows);
+ indices.add_range(min, max);
+ indices.compress();
return indices;
#endif
int main(int argc, char *argv[])
{
+ typedef PETScWrappers::MPI::SparseMatrix::size_type size_type;
+
Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
initlog();
deallog << std::setprecision(10);
{
- PETScWrappers::SparseMatrix a;
+ PETScWrappers::SparseMatrix a(2,2,2);
+ for (unsigned int i = 0; i<2; ++i)
+ for (unsigned int j = 0; j<2; ++j)
+ a.add (i,j, 2);
+ a.compress (VectorOperation::add);
+
+ PETScWrappers::Vector v(2);
+ for (unsigned int i = 0; i<2; ++i)
+ v[i] = 1;
+ PETScWrappers::Vector u(v);
auto op_a = linear_operator<PETScWrappers::Vector>(a);
+ op_a.vmult(u,v);
+ deallog << "SparseMatrix -> OK" << std::endl;
}
{
- PETScWrappers::MPI::SparseMatrix a;
- auto op_a = linear_operator<PETScWrappers::MPI::Vector>(a);
+ unsigned int np = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ if (4%np==0 && np<=4)
+ {
+ PETScWrappers::MPI::SparseMatrix a (MPI_COMM_WORLD, 4, 4, 4/np, 4/np, 1);
+ for (unsigned int i = 0; i<4; ++i)
+ for (unsigned int j = 0; j<4; ++j)
+ a.add (i,i, 1);
+ a.compress (VectorOperation::add);
+ auto op_a = linear_operator<PETScWrappers::MPI::Vector>(a);
+
+ PETScWrappers::MPI::Vector u,v;
+ op_a.reinit_domain_vector(u, true);
+ op_a.reinit_range_vector(v, true);
+ for (auto i : u.locally_owned_elements()) u[i] = 1;
+ for (auto i : v.locally_owned_elements()) v[i] = 1;
+
+ op_a.vmult(v,u);
+ }
+ deallog << "SparseMatrix MPI -> OK" << std::endl;
}
{
PETScWrappers::BlockSparseMatrix a;
auto op_a = linear_operator<PETScWrappers::BlockVector>(a);
+ deallog << "BlockSparseMatrix -> OK" << std::endl;
}
{
PETScWrappers::MPI::BlockSparseMatrix a;
auto op_a = linear_operator<PETScWrappers::MPI::BlockVector>(a);
+ deallog << "BlockSparseMatrix MPI -> OK" << std::endl;
}
deallog << "OK" << std::endl;
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
DEAL::OK
--- /dev/null
+
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
+DEAL::OK
--- /dev/null
+
+DEAL::SparseMatrix -> OK
+DEAL::SparseMatrix MPI -> OK
+DEAL::BlockSparseMatrix -> OK
+DEAL::BlockSparseMatrix MPI -> OK
+DEAL::OK