for (TrilinosWrappers::types::int_type i=0; i<n_columns; ++i)
std::cout << col_index_ptr[i] << " ";
std::cout << std::endl << std::endl;
- std::cout << "Matrix row has the following indices:" << std::endl;
+ std::cout << "Matrix row "
+ << (row_partitioner().MyGID(static_cast<TrilinosWrappers::types::int_type>(row)) == false ? "(nonlocal part)" : "")
+ << " has the following indices:" << std::endl;
std::vector<TrilinosWrappers::types::int_type> indices;
const Epetra_CrsGraph* graph =
(nonlocal_matrix.get() != 0 &&
{
// Refinement edges are taken care of by coarser
// cells
-
- // TODO: in the distributed case, we miss out the
- // constraints when the neighbor cell is coarser, but
- // only the current cell is owned locally!
- if (cell->neighbor_is_coarser(face))
+ if (cell->neighbor_is_coarser(face) &&
+ neighbor->subdomain_id() == cell->subdomain_id())
continue;
const unsigned int n_dofs_on_neighbor
// around
if (!cell->neighbor(face)->active()
||
- (cell->neighbor(face)->subdomain_id() !=
- cell->subdomain_id()))
+ (neighbor->subdomain_id() != cell->subdomain_id()))
{
constraints.add_entries_local_to_global
(dofs_on_other_cell, dofs_on_this_cell,
sparsity, keep_constrained_dofs);
- if (cell->neighbor(face)->subdomain_id() !=
- cell->subdomain_id())
+ if (neighbor->subdomain_id() != cell->subdomain_id())
constraints.add_entries_local_to_global
(dofs_on_other_cell, sparsity, keep_constrained_dofs);
}
Assert (status.MPI_TAG==124, ExcInternalError());
MPI_Get_count(&status, MPI_BYTE, &len);
- Assert( len%sizeof(unsigned int)==0, ExcInternalError());
+ Assert( len%sizeof(size_type)==0, ExcInternalError());
recv_buf.resize(len/sizeof(size_type));
}
}
- // complete all sends, so that we can
- // safely destroy the buffers.
+ // complete all sends, so that we can safely destroy the buffers.
MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
}
}
}
- // complete all sends, so that we can
- // safely destroy the buffers.
+ // complete all sends, so that we can safely destroy the buffers.
MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
}
Utilities::MPI::internal::mpi_type_id(&my_elements),
communicator->Comm());
+ AssertDimension(std::accumulate(owned_per_proc.begin(),
+ owned_per_proc.end(), size_type()), sparsity_pattern.n_rows());
+
SparsityTools::distribute_sparsity_pattern
(const_cast<CompressedSimpleSparsityPattern&>(sparsity_pattern),
owned_per_proc, communicator->Comm(), sparsity_pattern.row_index_set());
+
void
SparseMatrix::reinit (const SparsityPattern &sparsity_pattern)
{
template void
SparseMatrix::reinit (const Epetra_Map &,
const Epetra_Map &,
- const CompressedSimpleSparsityPattern &,
+ const CompressedSetSparsityPattern &,
const bool);
}