ArrayView<ElementType>
make_array_view (std::vector<ElementType> &vector)
{
- return ArrayView<ElementType> (&vector[0], vector.size());
+ return ArrayView<ElementType> (vector.data(), vector.size());
}
ArrayView<const ElementType>
make_array_view (const std::vector<ElementType> &vector)
{
- return ArrayView<const ElementType> (&vector[0], vector.size());
+ return ArrayView<const ElementType> (vector.data(), vector.size());
}
decompressing_stream.push(boost::iostreams::gzip_decompressor());
decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
- decompressing_stream.write (&buffer[0], buffer.size());
+ decompressing_stream.write (buffer.data(), buffer.size());
}
// then restore the object from the buffer
receive.resize(len);
- char *ptr = &receive[0];
+ char *ptr = receive.data();
ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
tria->get_communicator(), &status);
AssertThrowMPI(ierr);
// when we leave this function.
if (requests.size())
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
#endif // DEAL_II_WITH_MPI
{
buffer.resize(bytes_for_buffer());
- char *ptr = &buffer[0];
+ char *ptr = buffer.data();
unsigned int n_dofs = dof_values.size ();
std::memcpy(ptr, &n_dofs, sizeof(unsigned int));
std::memcpy(ptr,&quadrant,sizeof(typename dealii::internal::p4est::types<dim>::quadrant));
ptr += sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
- Assert (ptr == &buffer[0]+buffer.size(),
+ Assert (ptr == buffer.data()+buffer.size(),
ExcInternalError());
}
void unpack_data (const std::vector<char> &buffer)
{
- const char *ptr = &buffer[0];
+ const char *ptr = buffer.data();
unsigned int n_dofs;
memcpy(&n_dofs, ptr, sizeof(unsigned int));
ptr += sizeof(unsigned int);
std::memcpy(&quadrant,ptr,sizeof(typename dealii::internal::p4est::types<dim>::quadrant));
ptr += sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
- Assert (ptr == &buffer[0]+buffer.size(),
+ Assert (ptr == buffer.data()+buffer.size(),
ExcInternalError());
}
};
AssertThrowMPI(ierr);
receive.resize (len);
- char *buf = &receive[0];
+ char *buf = receive.data();
ierr = MPI_Recv (buf, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG, communicator, &status);
AssertThrowMPI(ierr);
if (requests.size () > 0)
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
// call of ARPACK dsaupd/dnaupd routine
if (additional_data.symmetric)
dsaupd_(&ido, bmat, &n, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0],
- &workd[0], &workl[0], &lworkl, &info);
+ resid.data(), &ncv, v.data(), &ldv, iparam.data(), ipntr.data(),
+ workd.data(), workl.data(), &lworkl, &info);
else
dnaupd_(&ido, bmat, &n, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0],
- &workd[0], &workl[0], &lworkl, &info);
+ resid.data(), &ncv, v.data(), &ldv, iparam.data(), ipntr.data(),
+ workd.data(), workl.data(), &lworkl, &info);
if (ido == 99)
break;
if (additional_data.symmetric)
{
std::vector<double> z (ldz*nev, 0.);
- dseupd_(&rvec, &howmany, &select[0], &eigenvalues_real[0],
- &z[0], &ldz, &sigmar, bmat, &n, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ dseupd_(&rvec, &howmany, select.data(), eigenvalues_real.data(),
+ z.data(), &ldz, &sigmar, bmat, &n, which, &nev, &tol,
+ resid.data(), &ncv, v.data(), &ldv,
+ iparam.data(), ipntr.data(), workd.data(), workl.data(), &lworkl, &info);
}
else
{
std::vector<double> workev (3*ncv, 0.);
- dneupd_(&rvec, &howmany, &select[0], &eigenvalues_real[0],
- &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai,
- &workev[0], bmat, &n, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ dneupd_(&rvec, &howmany, select.data(), eigenvalues_real.data(),
+ eigenvalues_im.data(), v.data(), &ldz, &sigmar, &sigmai,
+ workev.data(), bmat, &n, which, &nev, &tol,
+ resid.data(), &ncv, v.data(), &ldv,
+ iparam.data(), ipntr.data(), workd.data(), workl.data(), &lworkl, &info);
}
if (info == 1)
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ set (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (values.n() == values.m(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- set (indices[i], indices.size(), &indices[0], &values(i,0),
+ set (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- set (row, col_indices.size(), &col_indices[0], &values[0],
+ set (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- add (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ add (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (values.n() == values.m(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- add (indices[i], indices.size(), &indices[0], &values(i,0),
+ add (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- add (row, col_indices.size(), &col_indices[0], &values[0],
+ add (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
{
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(), values.size()));
- add (indices.size(), &indices[0], &values[0]);
+ add (indices.size(), indices.data(), values.data());
}
val_ptr += chunk_size * chunk_size;
}
}
- Assert(std::size_t(colnum_ptr-&colnums[0]) == rowstart[end_row],
+ Assert(std::size_t(colnum_ptr-colnums) == rowstart[end_row],
ExcInternalError());
- Assert(std::size_t(val_ptr-&values[0]) ==
+ Assert(std::size_t(val_ptr - values) ==
rowstart[end_row] * chunk_size * chunk_size,
ExcInternalError());
}
val.get()),
grain_size);
else if (matrix_size > 0)
- std::memset (&val[0], 0, matrix_size*sizeof(number));
+ std::memset (val.get(), 0, matrix_size*sizeof(number));
return *this;
}
// around the matrix. since we have the invariant that padding elements are
// zero, nothing bad can happen here
const size_type chunk_size = cols->get_chunk_size();
- return std::count_if(&val[0],
- &val[cols->sparsity_pattern.n_nonzero_elements () *
- chunk_size * chunk_size],
+ return std::count_if(val.get(),
+ val.get() + cols->sparsity_pattern.n_nonzero_elements ()
+ * chunk_size * chunk_size,
std::bind(std::not_equal_to<double>(), std::placeholders::_1, 0));
}
// copy everything, including padding elements
const size_type chunk_size = cols->get_chunk_size();
- std::copy (&matrix.val[0],
- &matrix.val[cols->sparsity_pattern.n_nonzero_elements()
- * chunk_size * chunk_size],
- &val[0]);
+ std::copy (matrix.val.get(),
+ matrix.val.get() + cols->sparsity_pattern.n_nonzero_elements()
+ * chunk_size * chunk_size,
+ val.get());
return *this;
}
// add everything, including padding elements
const size_type chunk_size = cols->get_chunk_size();
- number *val_ptr = &val[0];
+ number *val_ptr = val.get();
const somenumber *matrix_ptr = &matrix.val[0];
- const number *const end_ptr = &val[cols->sparsity_pattern.n_nonzero_elements()
- * chunk_size * chunk_size];
+ const number *const end_ptr = val.get() + cols->sparsity_pattern.n_nonzero_elements()
+ * chunk_size * chunk_size;
while (val_ptr != end_ptr)
*val_ptr++ += factor **matrix_ptr++;
//
// padding elements are zero, so we can add them up as well
real_type norm_sqr = 0;
- for (const number *ptr = &val[0]; ptr != &val[max_len]; ++ptr)
+ for (const number *ptr = val.get(); ptr != val.get() + max_len; ++ptr)
norm_sqr += numbers::NumberTraits<number>::abs_square(*ptr);
return std::sqrt (norm_sqr);
// Use the LAPACK function getrf for
// calculating the LU factorization.
- getrf(&nn, &nn, &this->values[0], &nn, &ipiv[0], &info);
+ getrf(&nn, &nn, &this->values[0], &nn, ipiv.data(), &info);
Assert(info >= 0, ExcInternalError());
Assert(info == 0, LACExceptions::ExcSingular());
// Use the LAPACK function getri for
// calculating the actual inverse using
// the LU factorization.
- getri(&nn, &this->values[0], &nn, &ipiv[0], &inv_work[0], &nn, &info);
+ getri(&nn, &this->values[0], &nn, ipiv.data(), inv_work.data(), &nn, &info);
Assert(info >= 0, ExcInternalError());
Assert(info == 0, LACExceptions::ExcSingular());
// first wait for the receive to complete
if (compress_requests.size() > 0 && n_import_targets > 0)
{
- const int ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
+ const int ierr = MPI_Waitall (n_import_targets, compress_requests.data(),
MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
Threads::Mutex::ScopedLock lock (mutex);
const int ierr = MPI_Waitall (update_ghost_values_requests.size(),
- &update_ghost_values_requests[0],
+ update_ghost_values_requests.data(),
MPI_STATUSES_IGNORE);
AssertThrowMPI (ierr);
}
if (update_ghost_values_requests.size()>0)
{
const int ierr = MPI_Testall (update_ghost_values_requests.size(),
- &update_ghost_values_requests[0],
+ update_ghost_values_requests.data(),
&flag, MPI_STATUSES_IGNORE);
AssertThrowMPI (ierr);
Assert (flag == 1,
}
if (compress_requests.size()>0)
{
- const int ierr = MPI_Testall (compress_requests.size(), &compress_requests[0],
+ const int ierr = MPI_Testall (compress_requests.size(), compress_requests.data(),
&flag, MPI_STATUSES_IGNORE);
AssertThrowMPI (ierr);
Assert (flag == 1,
AssertDimension (c_indices.size(), values.n());
for (size_type i=0; i<row_indices.size(); ++i)
- add (r_indices[i], c_indices.size(), &c_indices[0], &values(i,0),
+ add (r_indices[i], c_indices.size(), c_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (values.n() == values.m(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- add (indices[i], indices.size(), &indices[0], &values(i,0),
+ add (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
Assert(column_indices.size() != 0, ExcNotInitialized());
AssertDimension (col_indices.size(), values.size());
- add (row, col_indices.size(), &col_indices[0], &values[0],
+ add (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
ExcDimensionMismatch(resid.size(),local_indices.size()));
vec.extract_subvector_to (local_indices.begin(),
local_indices.end(),
- &resid[0]);
+ resid.data());
}
// call of ARPACK pdnaupd routine
if (additional_data.symmetric)
pdsaupd_(&mpi_communicator_fortran,&ido, bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0],
- &workd[0], &workl[0], &lworkl, &info);
+ resid.data(), &ncv, v.data(), &ldv, iparam.data(), ipntr.data(),
+ workd.data(), workl.data(), &lworkl, &info);
else
pdnaupd_(&mpi_communicator_fortran,&ido, bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv, &iparam[0], &ipntr[0],
- &workd[0], &workl[0], &lworkl, &info);
+ resid.data(), &ncv, v.data(), &ldv, iparam.data(), ipntr.data(),
+ workd.data(), workl.data(), &lworkl, &info);
AssertThrow (info == 0, PArpackExcInfoPdnaupd(info));
// compute Y = OP * X
{
src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_x );
+ local_indices.data(),
+ workd.data()+shift_x );
src.compress (VectorOperation::add);
if (mode == 3)
// store M*X in X
tmp.extract_subvector_to (local_indices.begin(),
local_indices.end(),
- &workd[0]+shift_x);
+ workd.data()+shift_x);
inverse.vmult(dst,tmp);
}
else if (mode == 1)
// B*X
src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_b_x );
+ local_indices.data(),
+ workd.data()+shift_b_x );
src.compress (VectorOperation::add);
// solving linear system
// compute Y = B * X
{
src.add (nloc,
- &local_indices[0],
- &workd[0]+shift_x );
+ local_indices.data(),
+ workd.data()+shift_x );
src.compress (VectorOperation::add);
// Multiplication with mass matrix M
// store the result
dst.extract_subvector_to (local_indices.begin(),
local_indices.end(),
- &workd[0]+shift_y);
+ workd.data()+shift_y);
} // end of pd*aupd_ loop
// 1 - compute eigenvectors,
// call of ARPACK pdneupd routine
if (additional_data.symmetric)
- pdseupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
- &z[0], &ldz, &sigmar,
+ pdseupd_(&mpi_communicator_fortran, &rvec, howmany, select.data(), eigenvalues_real.data(),
+ z.data(), &ldz, &sigmar,
bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ resid.data(), &ncv, v.data(), &ldv,
+ iparam.data(), ipntr.data(), workd.data(), workl.data(), &lworkl, &info);
else
- pdneupd_(&mpi_communicator_fortran, &rvec, howmany, &select[0], &eigenvalues_real[0],
- &eigenvalues_im[0], &v[0], &ldz, &sigmar, &sigmai,
- &workev[0], bmat, &n_inside_arpack, which, &nev, &tol,
- &resid[0], &ncv, &v[0], &ldv,
- &iparam[0], &ipntr[0], &workd[0], &workl[0], &lworkl, &info);
+ pdneupd_(&mpi_communicator_fortran, &rvec, howmany, select.data(), eigenvalues_real.data(),
+ eigenvalues_im.data(), v.data(), &ldz, &sigmar, &sigmai,
+ workev.data(), bmat, &n_inside_arpack, which, &nev, &tol,
+ resid.data(), &ncv, v.data(), &ldv,
+ iparam.data(), ipntr.data(), workd.data(), workl.data(), &lworkl, &info);
if (info == 1)
{
Assert (i*nloc + nloc <= (int)v.size(), dealii::ExcInternalError() );
eigenvectors[i]->add (nloc,
- &local_indices[0],
+ local_indices.data(),
&v[i*nloc] );
eigenvectors[i]->compress (VectorOperation::add);
}
tmp = 0.0;
tmp.add (nloc,
- &local_indices[0],
- &resid[0]);
+ local_indices.data(),
+ resid.data());
solver_control.check ( iparam[2], tmp.l2_norm() );
}
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- set (indices[i], indices.size(), &indices[0], &values(i,0),
+ set (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ set (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- set (row, col_indices.size(), &col_indices[0], &values[0],
+ set (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
}
Assert(n_columns <= (int)n_cols, ExcInternalError());
- col_index_ptr = &column_indices[0];
- col_value_ptr = &column_values[0];
+ col_index_ptr = column_indices.data();
+ col_value_ptr = column_values.data();
}
const PetscErrorCode ierr = MatSetValues (matrix, 1, &petsc_i, n_columns,
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- add (indices[i], indices.size(), &indices[0], &values(i,0),
+ add (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- add (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ add (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- add (row, col_indices.size(), &col_indices[0], &values[0],
+ add (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
}
Assert(n_columns <= (int)n_cols, ExcInternalError());
- col_index_ptr = &column_indices[0];
- col_value_ptr = &column_values[0];
+ col_index_ptr = column_indices.data();
+ col_value_ptr = column_values.data();
}
const PetscErrorCode ierr = MatSetValues (matrix, 1, &petsc_i, n_columns,
const std::vector<Number2> &values)
{
AssertDimension (indices.size(), values.size());
- add (indices.size(), &indices[0], &values[0]);
+ add (indices.size(), indices.data(), values.data());
}
{
// Copy the vector from the device to a temporary vector on the host
std::vector<Number> tmp(n_elements);
- cudaError_t error_code = cudaMemcpy(&tmp[0], cuda_vec.get_values(),
+ cudaError_t error_code = cudaMemcpy(tmp.data(), cuda_vec.get_values(),
n_elements*sizeof(Number),
cudaMemcpyDeviceToHost);
AssertCuda(error_code);
// One could still build a vector that is rich in the directions of all guesses,
// by taking a linear combination of them. (TODO: make function virtual?)
- const PetscErrorCode ierr = EPSSetInitialSpace (eps, vecs.size(), &vecs[0]);
+ const PetscErrorCode ierr = EPSSetInitialSpace (eps, vecs.size(), vecs.data());
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- set (indices[i], indices.size(), &indices[0], &values(i,0),
+ set (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ set (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- set (row, col_indices.size(), &col_indices[0], &values[0],
+ set (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- add (indices[i], indices.size(), &indices[0], &values(i,0),
+ add (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- add (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ add (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- add (row, col_indices.size(), &col_indices[0], &values[0],
+ add (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
Assert (cols != nullptr, ExcNotInitialized());
Assert (val != nullptr, ExcNotInitialized());
- number *val_ptr = &val[0];
- const number *const end_ptr = &val[cols->n_nonzero_elements()];
+ number *val_ptr = val.get();
+ const number *const end_ptr = val.get() + cols->n_nonzero_elements();
while (val_ptr != end_ptr)
*val_ptr++ *= factor;
const number factor_inv = number(1.) / factor;
- number *val_ptr = &val[0];
- const number *const end_ptr = &val[cols->n_nonzero_elements()];
+ number *val_ptr = val.get();
+ const number *const end_ptr = val.get() + cols->n_nonzero_elements();
while (val_ptr != end_ptr)
*val_ptr++ *= factor_inv;
val.get()),
grain_size);
else if (matrix_size > 0)
- std::memset (&val[0], 0, matrix_size*sizeof(number));
+ std::memset (val.get(), 0, matrix_size*sizeof(number));
return *this;
}
Assert (val != nullptr, ExcNotInitialized());
Assert (cols == matrix.cols, ExcDifferentSparsityPatterns());
- std::copy (&matrix.val[0], &matrix.val[cols->n_nonzero_elements()],
- &val[0]);
+ std::copy (matrix.val.get(), matrix.val.get() + cols->n_nonzero_elements(),
+ val.get());
return *this;
}
Assert (val != nullptr, ExcNotInitialized());
Assert (cols == matrix.cols, ExcDifferentSparsityPatterns());
- number *val_ptr = &val[0];
- const somenumber *matrix_ptr = &matrix.val[0];
- const number *const end_ptr = &val[cols->n_nonzero_elements()];
+ number *val_ptr = val.get();
+ const somenumber *matrix_ptr = matrix.val.get();
+ const number *const end_ptr = val.get() + cols->n_nonzero_elements();
while (val_ptr != end_ptr)
*val_ptr++ += factor * number(*matrix_ptr++);
// now the innermost loop that goes over all the elements in row
// 'col' of matrix B. Cache the elements, and then write them into C
// at once
- numberC *new_ptr = &new_entries[0];
+ numberC *new_ptr = new_entries.data();
const numberB *B_val_ptr =
&B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
const numberB *const end_cols = &B.val[sp_B.rowstart[col+1]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
*new_ptr++ = numberC(A_val) * numberC(*B_val_ptr) * numberC(use_vector ? V(col) : 1);
- C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+ C.add (i, new_ptr-new_entries.data(), new_cols, new_entries.data(),
false, true);
}
}
// now the innermost loop that goes over all the elements in row
// 'col' of matrix B. Cache the elements, and then write them into C
// at once
- numberC *new_ptr = &new_entries[0];
+ numberC *new_ptr = new_entries.data();
const numberB *B_val_ptr =
&B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
*new_ptr++ = numberC(A_val) * numberC(*B_val_ptr) * numberC(use_vector ? V(i) : 1);
- C.add (row, new_ptr-&new_entries[0], new_cols, &new_entries[0],
+ C.add (row, new_ptr-new_entries.data(), new_cols, new_entries.data(),
false, true);
}
}
// reference to rows or columns
real_type norm_sqr = 0;
const size_type n_rows = m();
- for (const number *ptr = &val[0];
- ptr != &val[cols->rowstart[n_rows]]; ++ptr)
+ for (const number *ptr = val.get();
+ ptr != val.get() + cols->rowstart[n_rows]; ++ptr)
norm_sqr += numbers::NumberTraits<number>::abs_square(*ptr);
return std::sqrt (norm_sqr);
// bracketed in [...]
out << '[' << max_len << "][";
// then write out real data
- out.write (reinterpret_cast<const char *>(&val[0]),
- reinterpret_cast<const char *>(&val[max_len])
- - reinterpret_cast<const char *>(&val[0]));
+ out.write (reinterpret_cast<const char *>(val.get()),
+ reinterpret_cast<const char *>(val.get() + max_len)
+ - reinterpret_cast<const char *>(val.get()));
out << ']';
AssertThrow (out, ExcIO());
val.reset (new number[max_len]);
// then read data
- in.read (reinterpret_cast<char *>(&val[0]),
- reinterpret_cast<char *>(&val[max_len])
- - reinterpret_cast<char *>(&val[0]));
+ in.read (reinterpret_cast<char *>(val.get()),
+ reinterpret_cast<char *>(val.get() + max_len)
+ - reinterpret_cast<char *>(val.get()));
in >> c;
AssertThrow (c == ']', ExcIO());
}
DEAL_II_CHECK_INPUT(in,'[',c);
// then read data
- in.read(reinterpret_cast<char *>(&row_info[0]),
+ in.read(reinterpret_cast<char *>(row_info.data()),
sizeof(RowInfo) * row_info.size());
DEAL_II_CHECK_INPUT(in,']',c);
DEAL_II_CHECK_INPUT(in,'[',c);
- in.read(reinterpret_cast<char *>(&data[0]),
+ in.read(reinterpret_cast<char *>(data.data()),
sizeof(Entry) * data.size());
DEAL_II_CHECK_INPUT(in,']',c);
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- set (indices[i], indices.size(), &indices[0], &values(i,0),
+ set (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
- set (indices.size(), &indices[0], &values[0]);
+ set (indices.size(), indices.data(), values.data());
}
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
- set (indices.size(), &indices[0], values.begin());
+ set (indices.size(), indices.data(), values.begin());
}
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
- add (indices.size(), &indices[0], &values[0]);
+ add (indices.size(), indices.data(), values.data());
}
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(),values.size()));
- add (indices.size(), &indices[0], values.begin());
+ add (indices.size(), indices.data(), values.begin());
}
int info;
// call lapack_templates.h wrapper:
stev ("N", &n,
- &diagonal[0], &subdiagonal[0],
- &Z[0], &ldz, &work[0],
+ diagonal.data(), subdiagonal.data(),
+ Z.data(), &ldz, work.data(),
&info);
Assert (info == 0,
// make sure we allocate an even number of elements,
// access to the new last element is needed in do_sum()
large_array.resize(2*((n_chunks+1)/2));
- array_ptr = &large_array[0];
+ array_ptr = large_array.data();
}
else
array_ptr = &small_array[0];
std::vector<Number> old(array_host.size());
old.swap(array_host);
- transpose(n, m, &old[0], &array_host[0]);
+ transpose(n, m, old.data(), array_host.data());
}
cudaError_t error_code = cudaMalloc(array_device, n*sizeof(Number1));
AssertCuda(error_code);
- error_code = cudaMemcpy(*array_device, &array_host[0], n*sizeof(Number1),
+ error_code = cudaMemcpy(*array_device, array_host.data(), n*sizeof(Number1),
cudaMemcpyHostToDevice);
AssertCuda(error_code);
}
for (unsigned int i=0; i<dofs_per_cell; ++i)
lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]];
- memcpy(&local_to_global_host[cell_id*padding_length], &lexicographic_dof_indices[0],
+ memcpy(&local_to_global_host[cell_id*padding_length], lexicographic_dof_indices.data(),
dofs_per_cell*sizeof(unsigned int));
fe_values.reinit(cell);
if (update_flags & update_quadrature_points)
{
const std::vector<Point<dim>> &q_points = fe_values.get_quadrature_points();
- memcpy(&q_points_host[cell_id*padding_length], &q_points[0],
+ memcpy(&q_points_host[cell_id*padding_length], q_points.data(),
q_points_per_cell*sizeof(Point<dim>));
}
{
const std::vector<DerivativeForm<1,dim,dim>> &inv_jacobians =
fe_values.get_inverse_jacobians();
- memcpy(&inv_jacobian_host[cell_id*padding_length*dim*dim], &inv_jacobians[0],
+ memcpy(&inv_jacobian_host[cell_id*padding_length*dim*dim], inv_jacobians.data(),
q_points_per_cell*sizeof(DerivativeForm<1,dim,dim>));
}
}
sizeof(dealii::types::global_dof_index));
AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(constrained_dofs, &constrained_dofs_host[0],
+ cuda_error = cudaMemcpy(constrained_dofs, constrained_dofs_host.data(),
n_constrained_dofs * sizeof(dealii::types::global_dof_index),
cudaMemcpyHostToDevice);
AssertCuda(cuda_error);
{
AssertIndexRange (row, constraint_pool_row_index.size()-1);
return constraint_pool_data.empty() ? nullptr :
- &constraint_pool_data[0] + constraint_pool_row_index[row];
+ constraint_pool_data.data() + constraint_pool_row_index[row];
}
{
AssertIndexRange (row, constraint_pool_row_index.size()-1);
return constraint_pool_data.empty() ? nullptr :
- &constraint_pool_data[0] + constraint_pool_row_index[row+1];
+ constraint_pool_data.data() + constraint_pool_row_index[row+1];
}
#endif
std::pair<unsigned int,unsigned int> return_range;
return_range.first =
- std::lower_bound(&fe_indices[0] + range.first,
- &fe_indices[0] + range.second, fe_index)
- -&fe_indices[0] ;
+ std::lower_bound(fe_indices.data() + range.first,
+ fe_indices.data() + range.second, fe_index)
+ -fe_indices.data() ;
return_range.second =
- std::lower_bound(&fe_indices[0] + return_range.first,
- &fe_indices[0] + range.second,
- fe_index + 1)-&fe_indices[0];
+ std::lower_bound(fe_indices.data() + return_range.first,
+ fe_indices.data() + range.second,
+ fe_index + 1)-fe_indices.data();
Assert(return_range.first >= range.first &&
return_range.second <= range.second, ExcInternalError());
return return_range;
char *compressed_data = new char[compressed_data_length];
int err = compress2 ((Bytef *) compressed_data,
&compressed_data_length,
- (const Bytef *) &data[0],
+ (const Bytef *) data.data(),
data.size() * sizeof(T),
get_zlib_compression_level(flags.compression_level));
(void)err;
{
if (flags.data_binary)
{
- stream.write(reinterpret_cast<const char *>(&values[0]),
+ stream.write(reinterpret_cast<const char *>(values.data()),
values.size()*sizeof(data));
}
else
// And finally, write the node data
data_filter.fill_node_data(node_data_vec);
- status = H5Dwrite(node_dataset, H5T_NATIVE_DOUBLE, node_memory_dataspace, node_file_dataspace, plist_id, &node_data_vec[0]);
+ status = H5Dwrite(node_dataset, H5T_NATIVE_DOUBLE, node_memory_dataspace, node_file_dataspace, plist_id, node_data_vec.data());
AssertThrow(status >= 0, ExcIO());
node_data_vec.clear();
// And the cell data
data_filter.fill_cell_data(global_node_cell_offsets[0], cell_data_vec);
- status = H5Dwrite(cell_dataset, H5T_NATIVE_UINT, cell_memory_dataspace, cell_file_dataspace, plist_id, &cell_data_vec[0]);
+ status = H5Dwrite(cell_dataset, H5T_NATIVE_UINT, cell_memory_dataspace, cell_file_dataspace, plist_id, cell_data_vec.data());
AssertThrow(status >= 0, ExcIO());
cell_data_vec.clear();
const unsigned int n = interpolation_points.size();
cspline = gsl_spline_alloc (gsl_interp_cspline, n);
// gsl_spline_init returns something but it seems nobody knows what
- gsl_spline_init (cspline, &interpolation_points[0], &interpolation_values[0], n);
+ gsl_spline_init (cspline, interpolation_points.data(), interpolation_values.data(), n);
}
TrilinosWrappers::types::int_type(n_elements()),
(n_elements() > 0
?
- reinterpret_cast<TrilinosWrappers::types::int_type *>(&indices[0])
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(indices.data())
:
nullptr),
0,
// processors in this case, which is more expensive than the reduction
// operation above in MPI_Allreduce)
std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
- const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
- &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ const int ierr = MPI_Allgather (my_destinations.data(), max_n_destinations, MPI_UNSIGNED,
+ all_destinations.data(), max_n_destinations, MPI_UNSIGNED,
mpi_comm);
AssertThrowMPI(ierr);
std::vector<char> all_hostnames(max_hostname_size *
MPI::n_mpi_processes(MPI_COMM_WORLD));
- const int ierr = MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
- &all_hostnames[0], max_hostname_size, MPI_CHAR,
+ const int ierr = MPI_Allgather (hostname_array.data(), max_hostname_size, MPI_CHAR,
+ all_hostnames.data(), max_hostname_size, MPI_CHAR,
MPI_COMM_WORLD);
AssertThrowMPI(ierr);
unsigned int n_local_processes=0;
unsigned int nth_process_on_host = 0;
for (unsigned int i=0; i<MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
- if (std::string (&all_hostnames[0] + i*max_hostname_size) == hostname)
+ if (std::string (all_hostnames.data() + i*max_hostname_size) == hostname)
{
++n_local_processes;
if (i <= MPI::this_mpi_process (MPI_COMM_WORLD))
// Allow non-zero start index for the vector. send this data to all
// processors
first_index[0] = local_range_data.first;
- int ierr = MPI_Bcast(&first_index[0], 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ int ierr = MPI_Bcast(first_index.data(), 1, DEAL_II_DOF_INDEX_MPI_TYPE,
0, communicator);
AssertThrowMPI(ierr);
for (unsigned int i=0; i<n_ghost_targets; i++)
send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
- const int ierr = MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
+ const int ierr = MPI_Alltoall (send_buffer.data(), 1, MPI_INT, receive_buffer.data(), 1,
MPI_INT, communicator);
AssertThrowMPI(ierr);
if (import_requests.size()>0)
{
const int ierr = MPI_Waitall (import_requests.size(),
- &import_requests[0],
+ import_requests.data(),
MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
{
Assert (values.size() > 0, ExcZero());
- value(x,values.size()-1,&values[0]);
+ value(x,values.size()-1,values.data());
}
{
Assert (values.size() > 0, ExcZero());
- value(x,values.size()-1,&values[0]);
+ value(x,values.size()-1,values.data());
}
{
buffer.resize(bytes_for_buffer());
- char *ptr = &buffer[0];
+ char *ptr = buffer.data();
const unsigned int num_cells = tree_index.size();
std::memcpy(ptr, &num_cells, sizeof(unsigned int));
ptr += sizeof(unsigned int);
std::memcpy(ptr,
- &tree_index[0],
+ tree_index.data(),
num_cells*sizeof(unsigned int));
ptr += num_cells*sizeof(unsigned int);
std::memcpy(ptr,
- &quadrants[0],
+ quadrants.data(),
num_cells * sizeof(typename dealii::internal::p4est::
types<dim>::quadrant));
ptr += num_cells*sizeof(typename dealii::internal::p4est::types<dim>::
quadrant);
std::memcpy(ptr,
- &vertex_indices[0],
+ vertex_indices.data(),
vertex_indices.size() * sizeof(unsigned int));
ptr += vertex_indices.size() * sizeof(unsigned int);
std::memcpy(ptr,
- &vertices[0],
+ vertices.data(),
vertices.size() * sizeof(dealii::Point<spacedim>));
ptr += vertices.size() * sizeof(dealii::Point<spacedim>);
- Assert (ptr == &buffer[0]+buffer.size(),
+ Assert (ptr == buffer.data()+buffer.size(),
ExcInternalError());
}
void unpack_data (const std::vector<char> &buffer)
{
- const char *ptr = &buffer[0];
+ const char *ptr = buffer.data();
unsigned int cells;
memcpy(&cells, ptr, sizeof(unsigned int));
ptr += sizeof(unsigned int);
tree_index.resize(cells);
- memcpy(&tree_index[0],ptr,sizeof(unsigned int)*cells);
+ memcpy(tree_index.data(),ptr,sizeof(unsigned int)*cells);
ptr += sizeof(unsigned int)*cells;
quadrants.resize(cells);
- memcpy(&quadrants[0],ptr,
+ memcpy(quadrants.data(),ptr,
sizeof(typename dealii::internal::p4est::types<dim>::quadrant)*cells);
ptr += sizeof(typename dealii::internal::p4est::types<dim>::quadrant)*cells;
for (unsigned int c=0; c<cells; ++c)
first_vertices[c] = &vertices[first_indices[c]];
- Assert (ptr == &buffer[0]+buffer.size(),
+ Assert (ptr == buffer.data() + buffer.size(),
ExcInternalError());
}
};
AssertThrowMPI(ierr);
receive.resize(len);
- char *ptr = &receive[0];
+ char *ptr = receive.data();
ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
this->get_communicator(), &status);
AssertThrowMPI(ierr);
// safely destroy the buffers.
if (requests.size() > 0)
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
const int ierr = MPI_Allgather (&send_value,
1,
MPI_UNSIGNED,
- &number_cache.n_locally_owned_active_cells[0],
+ number_cache.n_locally_owned_active_cells.data(),
1,
MPI_UNSIGNED,
this->mpi_communicator);
if (requests.size() > 0)
{
- ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
// set rcounts based on new_numbers:
int cur_count = new_numbers_copy.size ();
int ierr = MPI_Allgather (&cur_count, 1, MPI_INT,
- &rcounts[0], 1, MPI_INT,
+ rcounts.data(), 1, MPI_INT,
tr->get_communicator ());
AssertThrowMPI(ierr);
Assert(((int)new_numbers_copy.size()) ==
rcounts[Utilities::MPI::this_mpi_process (tr->get_communicator ())],
ExcInternalError());
- ierr = MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (),
+ ierr = MPI_Allgatherv (new_numbers_copy.data(), new_numbers_copy.size (),
DEAL_II_DOF_INDEX_MPI_TYPE,
- &gathered_new_numbers[0], &rcounts[0],
- &displacements[0],
+ gathered_new_numbers.data(), rcounts.data(),
+ displacements.data(),
DEAL_II_DOF_INDEX_MPI_TYPE,
tr->get_communicator ());
AssertThrowMPI(ierr);
// know how to serialize itself. consequently, first copy it over
// to an array of bytes, and then serialize that
std::vector<char> quadrants_as_chars (sizeof(quadrants[0]) * quadrants.size());
- std::memcpy(&quadrants_as_chars[0],
- &quadrants[0],
+ std::memcpy(quadrants_as_chars.data(),
+ quadrants.data(),
quadrants_as_chars.size());
// now serialize everything
&dof_numbers_and_indices;
quadrants.resize (quadrants_as_chars.size() / sizeof(quadrants[0]));
- std::memcpy(&quadrants[0],
- &quadrants_as_chars[0],
+ std::memcpy(quadrants.data(),
+ quadrants_as_chars.data(),
quadrants_as_chars.size());
}
decompressing_stream.push(boost::iostreams::gzip_decompressor());
decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer));
- decompressing_stream.write (&buffer[0], buffer.size());
+ decompressing_stream.write (buffer.data(), buffer.size());
}
// then restore the object from the buffer
AssertThrowMPI(ierr);
receive.resize(len);
- char *ptr = &receive[0];
+ char *ptr = receive.data();
ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
tria.get_communicator(), &status);
AssertThrowMPI(ierr);
AssertThrowMPI(ierr);
receive.resize(len);
- char *ptr = &receive[0];
+ char *ptr = receive.data();
ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
tria.get_communicator(), &status);
AssertThrowMPI(ierr);
// buffers.
if (requests.size() > 0)
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
if (reply_requests.size() > 0)
{
- const int ierr = MPI_Waitall(reply_requests.size(), &reply_requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(reply_requests.size(), reply_requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
}
const int ierr = MPI_Allgather ( &n_locally_owned_dofs,
1, DEAL_II_DOF_INDEX_MPI_TYPE,
- &n_locally_owned_dofs_per_processor[0],
+ n_locally_owned_dofs_per_processor.data(),
1, DEAL_II_DOF_INDEX_MPI_TYPE,
triangulation->get_communicator());
AssertThrowMPI(ierr);
my_data.resize(max_size);
std::vector<char> buffer(max_size*n_cpus);
- const int ierr = MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
- &buffer[0], max_size, MPI_BYTE,
+ const int ierr = MPI_Allgather(my_data.data(), max_size, MPI_BYTE,
+ buffer.data(), max_size, MPI_BYTE,
triangulation->get_communicator());
AssertThrowMPI(ierr);
minimum_degree_ordering
(G,
- make_iterator_property_map(°ree[0], id, degree[0]),
- &inverse_perm[0],
- &perm[0],
- make_iterator_property_map(&supernode_sizes[0], id, supernode_sizes[0]),
+ make_iterator_property_map(degree.begin(), id, degree[0]),
+ inverse_perm.data(),
+ perm.data(),
+ make_iterator_property_map(supernode_sizes.begin(), id, supernode_sizes[0]),
delta, id);
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- const int ierr = MPI_Allgather ( &local_dof_count[0],
+ const int ierr = MPI_Allgather ( local_dof_count.data(),
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
+ all_dof_counts.data(),
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
tria->get_communicator());
AssertThrowMPI(ierr);
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- const int ierr = MPI_Allgather ( &local_dof_count[0],
+ const int ierr = MPI_Allgather ( local_dof_count.data(),
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
+ all_dof_counts.data(),
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
tria->get_communicator());
AssertThrowMPI(ierr);
{
std::vector<types::global_dof_index> local_dof_count = dofs_per_component;
- const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_component[0], n_target_components,
+ const int ierr = MPI_Allreduce (local_dof_count.data(), dofs_per_component.data(), n_target_components,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM, tria->get_communicator());
AssertThrowMPI (ierr);
(&dof_handler.get_triangulation())))
{
std::vector<types::global_dof_index> local_dof_count = dofs_per_block;
- const int ierr = MPI_Allreduce (&local_dof_count[0], &dofs_per_block[0],
+ const int ierr = MPI_Allreduce (local_dof_count.data(), dofs_per_block.data(),
n_target_blocks,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM, tria->get_communicator());
this->dofs_per_face));
// TODO: Something goes wrong there. The error of the least squares fit
// is to large ...
- // FETools::compute_face_embedding_matrices(*this, &face_embeddings[0], 0, 0);
+ // FETools::compute_face_embedding_matrices(*this, face_embeddings.data(), 0, 0);
this->interface_constraints.reinit((1<<(dim-1)) * this->dofs_per_face,
this->dofs_per_face);
unsigned int target_row=0;
// processors and shifting the indices accordingly
const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
std::vector<types::global_vertex_index> indices(n_cpu);
- int ierr = MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, &indices[0],
+ int ierr = MPI_Allgather(&next_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, indices.data(),
indices.size(), DEAL_II_DOF_INDEX_MPI_TYPE, triangulation.get_communicator());
AssertThrowMPI(ierr);
const types::global_vertex_index shift = std::accumulate(&indices[0],
for (unsigned int i=0; i<n_points; ++i)
if ((surrounding_points[i][d]-minP[d]) > periodicity[d]/2.0)
modified_points[i][d] -= periodicity[d];
- surrounding_points_start = &modified_points[0];
+ surrounding_points_start = modified_points.data();
}
// Now perform the interpolation
AssertDimension(w.size(), this->n_rows());
// Compute V^T v
work.resize(std::max(mm,nn));
- gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.val, &one, &null, &work[0], &one);
+ gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.val, &one, &null, work.data(), &one);
// Multiply by singular values
for (size_type i=0; i<wr.size(); ++i)
work[i] *= wr[i];
// Multiply with U
- gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, &work[0], &one, &beta, w.val, &one);
+ gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, work.data(), &one, &beta, w.val, &one);
break;
}
case inverse_svd:
AssertDimension(v.size(), this->n_rows());
// Compute U^T v
work.resize(std::max(mm,nn));
- gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.val, &one, &null, &work[0], &one);
+ gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.val, &one, &null, work.data(), &one);
// Multiply by singular values
for (size_type i=0; i<wr.size(); ++i)
work[i] *= wr[i];
// Multiply with V
- gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, &work[0], &one, &beta, w.val, &one);
+ gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, work.data(), &one, &beta, w.val, &one);
break;
}
default:
// Compute U^T v
work.resize(std::max(mm,nn));
- gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.val, &one, &null, &work[0], &one);
+ gemv("T", &mm, &mm, &alpha, &svd_u->values[0], &mm, v.val, &one, &null, work.data(), &one);
// Multiply by singular values
for (size_type i=0; i<wr.size(); ++i)
work[i] *= wr[i];
// Multiply with V
- gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, &work[0], &one, &beta, w.val, &one);
+ gemv("T", &nn, &nn, &alpha, &svd_vt->values[0], &nn, work.data(), &one, &beta, w.val, &one);
break;
}
case inverse_svd:
// Compute V^T v
work.resize(std::max(mm,nn));
- gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.val, &one, &null, &work[0], &one);
+ gemv("N", &nn, &nn, &alpha, &svd_vt->values[0], &nn, v.val, &one, &null, work.data(), &one);
// Multiply by singular values
for (size_type i=0; i<wr.size(); ++i)
work[i] *= wr[i];
// Multiply with U
- gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, &work[0], &one, &beta, w.val, &one);
+ gemv("N", &mm, &mm, &alpha, &svd_u->values[0], &mm, work.data(), &one, &beta, w.val, &one);
break;
}
default:
number *values = const_cast<number *> (&this->values[0]);
ipiv.resize(mm);
int info = 0;
- getrf(&mm, &nn, values, &mm, &ipiv[0], &info);
+ getrf(&mm, &nn, values, &mm, ipiv.data(), &info);
Assert(info >= 0, ExcInternalError());
std::max(1,N) :
0;
work.resize(lwork);
- return lansy (&type, &LAPACKSupport::L, &N, values, &lda, &work[0]);
+ return lansy (&type, &LAPACKSupport::L, &N, values, &lda, work.data());
}
else
{
std::max(1,M) :
0;
work.resize(lwork);
- return lange (&type, &M, &N, values, &lda, &work[0]);
+ return lange (&type, &M, &N, values, &lda, work.data());
}
}
// use the same uplo as in Cholesky
pocon (&LAPACKSupport::L, &N, values, &lda,
&a_norm, &rcond,
- &work[0], &iwork[0], &info);
+ work.data(), iwork.data(), &info);
Assert(info >= 0, ExcInternalError());
work.resize(1);
int lwork = -1;
gesdd(&LAPACKSupport::A, &mm, &nn, values, &mm,
- &wr[0], mu, &mm, mvt, &nn,
- &work[0], &lwork, &ipiv[0], &info);
+ wr.data(), mu, &mm, mvt, &nn,
+ work.data(), &lwork, ipiv.data(), &info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("gesdd", info));
// Resize the work array. Add one to the size computed by LAPACK to be on
// the safe side.
work.resize(lwork);
// Do the actual SVD.
gesdd(&LAPACKSupport::A, &mm, &nn, values, &mm,
- &wr[0], mu, &mm, mvt, &nn,
- &work[0], &lwork, &ipiv[0], &info);
+ wr.data(), mu, &mm, mvt, &nn,
+ work.data(), &lwork, ipiv.data(), &info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("gesdd", info));
work.resize(0);
ipiv.resize(mm);
inv_work.resize (mm);
- getri(&mm, values, &mm, &ipiv[0], &inv_work[0], &mm, &info);
+ getri(&mm, values, &mm, ipiv.data(), inv_work.data(), &mm, &info);
}
else
{
const number *values = &this->values[0];
int info = 0;
- getrs(trans, &nn, &one, values, &nn, &ipiv[0],
+ getrs(trans, &nn, &one, values, &nn, ipiv.data(),
v.begin(), &nn, &info);
Assert(info == 0, ExcInternalError());
const number *values = &this->values[0];
int info = 0;
- getrs(trans, &nn, &kk, values, &nn, &ipiv[0], &B.values[0], &nn, &info);
+ getrs(trans, &nn, &kk, values, &nn, ipiv.data(), &B.values[0], &nn, &info);
Assert(info == 0, ExcInternalError());
}
work.resize(1);
geev(jobvl, jobvr, &nn, values, &nn,
- &wr[0], &wi[0],
- &vl[0], &nn, &vr[0], &nn,
- &work[0], &lwork, &info);
+ wr.data(), wi.data(),
+ vl.data(), &nn, vr.data(), &nn,
+ work.data(), &lwork, &info);
// geev returns info=0 on success. Since we only queried the optimal size
// for work, everything else would not be acceptable.
Assert (info == 0, ExcInternalError());
// Finally compute the eigenvalues.
geev(jobvl, jobvr, &nn, values, &nn,
- &wr[0], &wi[0],
- &vl[0], &nn, &vr[0], &nn,
- &work[0], &lwork, &info);
+ wr.data(), wi.data(),
+ vl.data(), &nn, vr.data(), &nn,
+ work.data(), &lwork, &info);
// Negative return value implies a wrong argument. This should be internal.
Assert (info >=0, ExcInternalError());
uplo, &nn, values_A, &nn,
&lower_bound, &upper_bound,
dummy, dummy, &abs_accuracy,
- &n_eigenpairs, &wr[0], values_eigenvectors,
- &nn, &work[0], &lwork, &iwork[0],
- &ifail[0], &info);
+ &n_eigenpairs, wr.data(), values_eigenvectors,
+ &nn, work.data(), &lwork, iwork.data(),
+ ifail.data(), &info);
// syevx returns info=0 on success. Since we only queried the optimal size
// for work, everything else would not be acceptable.
Assert (info == 0, ExcInternalError());
uplo, &nn, values_A, &nn,
&lower_bound, &upper_bound,
dummy, dummy, &abs_accuracy,
- &n_eigenpairs, &wr[0], values_eigenvectors,
- &nn, &work[0], &lwork, &iwork[0],
- &ifail[0], &info);
+ &n_eigenpairs, wr.data(), values_eigenvectors,
+ &nn, work.data(), &lwork, iwork.data(),
+ ifail.data(), &info);
// Negative return value implies a wrong argument. This should be internal.
Assert (info >=0, ExcInternalError());
sygvx (&itype, jobz, range, uplo, &nn, values_A, &nn,
values_B, &nn, &lower_bound, &upper_bound,
dummy, dummy, &abs_accuracy, &n_eigenpairs,
- &wr[0], values_eigenvectors, &nn, &work[0],
- &lwork, &iwork[0], &ifail[0], &info);
+ wr.data(), values_eigenvectors, &nn, work.data(),
+ &lwork, iwork.data(), ifail.data(), &info);
// sygvx returns info=0 on success. Since we only queried the optimal size
// for work, everything else would not be acceptable.
Assert (info == 0, ExcInternalError());
sygvx (&itype, jobz, range, uplo, &nn, values_A, &nn,
values_B, &nn, &lower_bound, &upper_bound,
dummy, dummy, &abs_accuracy, &n_eigenpairs,
- &wr[0], values_eigenvectors, &nn, &work[0],
- &lwork, &iwork[0], &ifail[0], &info);
+ wr.data(), values_eigenvectors, &nn, work.data(),
+ &lwork, iwork.data(), ifail.data(), &info);
// Negative return value implies a wrong argument. This should be internal.
Assert (info >=0, ExcInternalError());
sygv (&itype, jobz, uplo, &nn, values_A, &nn,
values_B, &nn,
- &wr[0], &work[0], &lwork, &info);
+ wr.data(), work.data(), &lwork, &info);
// sygv returns info=0 on success. Since we only queried the optimal size
// for work, everything else would not be acceptable.
Assert (info == 0, ExcInternalError());
// Finally compute the generalized eigenvalues.
sygv (&itype, jobz, uplo, &nn, values_A, &nn,
values_B, &nn,
- &wr[0], &work[0], &lwork, &info);
+ wr.data(), work.data(), &lwork, &info);
// Negative return value implies a wrong argument. This should be internal.
Assert (info >=0, ExcInternalError());
IS index_set;
ISCreateGeneral (get_mpi_communicator(), rows.size(),
- &petsc_rows[0], PETSC_COPY_VALUES, &index_set);
+ petsc_rows.data(), PETSC_COPY_VALUES, &index_set);
const PetscErrorCode ierr = MatZeroRowsIS(matrix, index_set, new_diag_value,
nullptr, nullptr);
(communicator,
local_rows, local_columns,
m, n,
- 0, &int_row_lengths[0],
+ 0, int_row_lengths.data(),
0,
- offdiag_row_lengths.size() ? &int_offdiag_row_lengths[0] : nullptr,
+ offdiag_row_lengths.size() ? int_offdiag_row_lengths.data() : nullptr,
&matrix);
//TODO: Sometimes the actual number of nonzero entries allocated is greater than the number of nonzero entries, which petsc will complain about unless explicitly disabled with MatSetOption. There is probably a way to prevent a different number nonzero elements being allocated in the first place. (See also previous TODO).
// that summarily allocates these
// entries:
ierr = MatMPIAIJSetPreallocationCSR (matrix,
- &rowstart_in_window[0],
- &colnums_in_window[0],
+ rowstart_in_window.data(),
+ colnums_in_window.data(),
nullptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
}
// that summarily allocates these
// entries:
ierr = MatMPIAIJSetPreallocationCSR (matrix,
- &rowstart_in_window[0],
- &colnums_in_window[0],
+ rowstart_in_window.data(),
+ colnums_in_window.data(),
nullptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
int_row_lengths (row_lengths.begin(), row_lengths.end());
const PetscErrorCode ierr = MatCreateSeqAIJ(PETSC_COMM_SELF, m, n, 0,
- &int_row_lengths[0], &matrix);
+ int_row_lengths.data(), &matrix);
AssertThrow (ierr == 0, ExcPETScError(ierr));
// set symmetric flag, if so requested
const PetscInt int_row = i;
const PetscErrorCode ierr = MatSetValues (matrix, 1, &int_row,
- row_lengths[i], &row_entries[0],
- &row_values[0], INSERT_VALUES);
+ row_lengths[i], row_entries.data(),
+ row_values.data(), INSERT_VALUES);
AssertThrow (ierr == 0, ExcPETScError(ierr));
}
compress (VectorOperation::insert);
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
- do_set_add_operation(indices.size(), &indices[0], &values[0], false);
+ do_set_add_operation(indices.size(), indices.data(), values.data(), false);
}
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
- do_set_add_operation(indices.size(), &indices[0], &values[0], true);
+ do_set_add_operation(indices.size(), indices.data(), values.data(), true);
}
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
- do_set_add_operation(indices.size(), &indices[0], values.begin(), true);
+ do_set_add_operation(indices.size(), indices.data(), values.begin(), true);
}
numeric_decomposition (nullptr),
control (UMFPACK_CONTROL)
{
- umfpack_dl_defaults (&control[0]);
+ umfpack_dl_defaults (control.data());
}
tmp.swap (Ax);
}
- umfpack_dl_defaults (&control[0]);
+ umfpack_dl_defaults (control.data());
}
int status;
status = umfpack_dl_symbolic (N, N,
- &Ap[0], &Ai[0], &Ax[0],
+ Ap.data(), Ai.data(), Ax.data(),
&symbolic_decomposition,
- &control[0], nullptr);
+ control.data(), nullptr);
AssertThrow (status == UMFPACK_OK,
ExcUMFPACKError("umfpack_dl_symbolic", status));
- status = umfpack_dl_numeric (&Ap[0], &Ai[0], &Ax[0],
+ status = umfpack_dl_numeric (Ap.data(), Ai.data(), Ax.data(),
symbolic_decomposition,
&numeric_decomposition,
- &control[0], nullptr);
+ control.data(), nullptr);
AssertThrow (status == UMFPACK_OK,
ExcUMFPACKError("umfpack_dl_numeric", status));
// instead.
const int status
= umfpack_dl_solve (transpose ? UMFPACK_A : UMFPACK_At,
- &Ap[0], &Ai[0], &Ax[0],
+ Ap.data(), Ai.data(), Ax.data(),
rhs_and_solution.begin(), rhs.begin(),
numeric_decomposition,
- &control[0], nullptr);
+ control.data(), nullptr);
AssertThrow (status == UMFPACK_OK, ExcUMFPACKError("umfpack_dl_solve", status));
}
// Use recursive if the number of partitions is less than or equal to 8
if (nparts <= 8)
- ierr = METIS_PartGraphRecursive(&n, &ncon, &int_rowstart[0], &int_colnums[0],
+ ierr = METIS_PartGraphRecursive(&n, &ncon, int_rowstart.data(), int_colnums.data(),
nullptr, nullptr, nullptr,
- &nparts,nullptr,nullptr,&options[0],
- &dummy,&int_partition_indices[0]);
+ &nparts,nullptr,nullptr,options,
+ &dummy,int_partition_indices.data());
// Otherwise use kway
else
- ierr = METIS_PartGraphKway(&n, &ncon, &int_rowstart[0], &int_colnums[0],
+ ierr = METIS_PartGraphKway(&n, &ncon, int_rowstart.data(), int_colnums.data(),
nullptr, nullptr, nullptr,
- &nparts,nullptr,nullptr,&options[0],
- &dummy,&int_partition_indices[0]);
+ &nparts,nullptr,nullptr,options,
+ &dummy,int_partition_indices.data());
// If metis returns normally, an error code METIS_OK=1 is returned from
// the above functions (see metish.h)
ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
AssertThrowMPI(ierr);
recv_buf.resize(len);
- ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+ ierr = MPI_Recv(recv_buf.data(), len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
AssertThrowMPI(ierr);
// complete all sends, so that we can safely destroy the buffers.
if (requests.size())
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
AssertThrowMPI(ierr);
recv_buf.resize(len);
- ierr = MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
+ ierr = MPI_Recv(recv_buf.data(), len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
AssertThrowMPI(ierr);
// complete all sends, so that we can safely destroy the buffers.
if (requests.size())
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
}
}
// the current processor. Therefore, pass a dummy in that case
else
parameter_list.set("null space: vectors",
- &dummy[0]);
+ dummy.data());
}
initialize (matrix, parameter_list);
// the current processor. Therefore, pass a dummy in that case
else
parameter_list.set("null space: vectors",
- &dummy[0]);
+ dummy.data());
}
initialize (matrix, parameter_list);
std::shared_ptr<Epetra_CrsGraph> graph;
if (input_row_map.Comm().NumProc() > 1)
graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
- &n_entries_per_row[0], true));
+ n_entries_per_row.data(), true));
else
graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
- &n_entries_per_row[0], true));
+ n_entries_per_row.data(), true));
// This functions assumes that the sparsity pattern sits on all
// processors (completely). The parallel version uses an Epetra graph
row_indices[col] = p->column();
}
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
- &row_indices[0]);
+ row_indices.data());
}
// Eventually, optimize the graph structure (sort indices, make memory
Epetra_Map relevant_map (TrilinosWrappers::types::int_type(-1),
TrilinosWrappers::types::int_type(relevant_rows.n_elements()),
(indices.empty() ? nullptr :
- reinterpret_cast<TrilinosWrappers::types::int_type *>(&indices[0])),
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(indices.data())),
0, input_row_map.Comm());
if (relevant_map.SameAs(input_row_map))
have_ghost_rows = false;
}
Epetra_Map off_processor_map(-1, ghost_rows.size(),
- (ghost_rows.size()>0)?(&ghost_rows[0]):nullptr,
+ (ghost_rows.size()>0)?(ghost_rows.data()):nullptr,
0, input_row_map.Comm());
std::shared_ptr<Epetra_CrsGraph> graph;
if (input_row_map.Comm().NumProc() > 1)
{
graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
- (n_entries_per_row.size()>0)?(&n_entries_per_row[0]):nullptr,
+ (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
exchange_data ? false : true));
if (have_ghost_rows == true)
nonlocal_graph.reset (new Epetra_CrsGraphMod (off_processor_map,
- &n_entries_per_ghost_row[0]));
+ n_entries_per_ghost_row.data()));
}
else
graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
- (n_entries_per_row.size()>0)?(&n_entries_per_row[0]):nullptr,
+ (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
true));
// now insert the indices, select between the right matrix
row_indices[col] = sparsity_pattern.column_number(global_row, col);
if (input_row_map.MyGID(global_row))
- graph->InsertGlobalIndices (global_row, row_length, &row_indices[0]);
+ graph->InsertGlobalIndices (global_row, row_length, row_indices.data());
else
{
Assert(nonlocal_graph.get() != nullptr, ExcInternalError());
nonlocal_graph->InsertGlobalIndices (global_row, row_length,
- &row_indices[0]);
+ row_indices.data());
}
}
++select_index;
++it;
}
- set (row, col, reinterpret_cast<size_type *>(&row_indices[0]),
- &values[0], false);
+ set (row, col, reinterpret_cast<size_type *>(row_indices.data()),
+ values.data(), false);
}
compress(VectorOperation::insert);
}
const TrilinosScalar *in_values = input_matrix[0];
TrilinosScalar *values = (*matrix)[0];
const size_type my_nonzeros = input_matrix.NumMyNonzeros();
- std::memcpy (&values[0], &in_values[0],
+ std::memcpy (values, in_values,
my_nonzeros*sizeof (TrilinosScalar));
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- set (row_indices[i], col_indices.size(), &col_indices[0], &values(i,0),
+ set (row_indices[i], col_indices.size(), col_indices.data(), &values(i,0),
elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- set (row, col_indices.size(), &col_indices[0], &values[0],
+ set (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
Assert (values.m() == values.n(), ExcNotQuadratic());
for (size_type i=0; i<indices.size(); ++i)
- add (indices[i], indices.size(), &indices[0], &values(i,0),
+ add (indices[i], indices.size(), indices.data(), &values(i,0),
elide_zero_values);
}
ExcDimensionMismatch(col_indices.size(), values.n()));
for (size_type i=0; i<row_indices.size(); ++i)
- add (row_indices[i], col_indices.size(), &col_indices[0],
+ add (row_indices[i], col_indices.size(), col_indices.data(),
&values(i,0), elide_zero_values);
}
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
- add (row, col_indices.size(), &col_indices[0], &values[0],
+ add (row, col_indices.size(), col_indices.data(), values.data(),
elide_zero_values);
}
indices.resize(graph->NumGlobalIndices(static_cast<TrilinosWrappers::types::int_type>(row)));
int n_indices = 0;
graph->ExtractGlobalRowCopy(static_cast<TrilinosWrappers::types::int_type>(row),
- indices.size(), n_indices, &indices[0]);
+ indices.size(), n_indices, indices.data());
AssertDimension(static_cast<unsigned int>(n_indices), indices.size());
for (TrilinosWrappers::types::int_type i=0; i<n_indices; ++i)
if (row_map.Comm().NumProc() > 1)
graph.reset(new Epetra_FECrsGraph(Copy, row_map,
- &local_entries_per_row[0],
+ local_entries_per_row.data(),
false
// TODO: Check which new Trilinos
// version supports this... Remember
));
else
graph.reset(new Epetra_FECrsGraph(Copy, row_map, col_map,
- &local_entries_per_row[0],
+ local_entries_per_row.data(),
false));
}
if (row_map.Comm().NumProc() > 1)
graph.reset(new Epetra_FECrsGraph(Copy, row_map,
- &n_entries_per_row[0],
+ n_entries_per_row.data(),
false));
else
graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
- &n_entries_per_row[0],
+ n_entries_per_row.data(),
false));
AssertDimension (sp.n_rows(),
}
}
graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
- &row_indices[0]);
+ row_indices.data());
}
else
for (size_type row=0; row<sp.n_rows(); ++row)
}
graph->InsertGlobalIndices (1,
reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),
- row_length, &row_indices[0]);
+ row_length, row_indices.data());
}
int ierr =
}
Assert (n_elements == added_elements, ExcInternalError());
- Epetra_Map new_map (v.size(), n_elements, &global_ids[0], 0,
+ Epetra_Map new_map (v.size(), n_elements, global_ids.data(), 0,
v.block(0).vector_partitioner().Comm());
std::shared_ptr<Epetra_FEVector> actual_vec;
// just send an empty message.
if (data.size())
{
- const int ierr = MPI_Isend(&data[0], data.size()*sizeof(data[0]),
+ const int ierr = MPI_Isend(data.data(), data.size()*sizeof(data[0]),
MPI_BYTE, dest, 71, tria->get_communicator(),
&*requests.rbegin());
AssertThrowMPI(ierr);
Assert(static_cast<int>(count * sizeof(DoFPair)) == len, ExcInternalError());
receive_buffer.resize(count);
- void *ptr = &receive_buffer[0];
+ void *ptr = receive_buffer.data();
ierr = MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
tria->get_communicator(), &status);
AssertThrowMPI(ierr);
// * wait for all MPI_Isend to complete
if (requests.size() > 0)
{
- const int ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+ const int ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
AssertThrowMPI(ierr);
requests.clear();
}
for (unsigned int i=0; i<dofs_per_cell; ++i)
prolongation_matrices[level]->set (dof_indices_child[i],
dofs_per_cell,
- &dof_indices_parent[0],
+ dof_indices_parent.data(),
&prolongation(i,0),
true);
}