#include <lac/petsc_vector_base.h>
-/*! @addtogroup PETSc
- *@{
- */
-
namespace PETScWrappers
{
/**
* Namespace for PETSc classes that work in parallel over MPI, such as
* distributed vectors and matrices.
*
+ * @ingroup PETSc
* @author Wolfgang Bangerth, 2004
*/
namespace MPI
* functions). Only the functions creating a vector of specific type differ,
* and are implemented in this particular class.
*
- * @section 1 Parallel communication model
+ * @section PETScParVec1 Parallel communication model
*
* The parallel functionality of PETSc is built on top of the Message Passing
* Interface (MPI). MPI's communication model is built on collective
* discussed below.
*
*
- * @section 2 Accessing individual elements of a vector
+ * @section PETScParVec2 Accessing individual elements of a vector
*
* PETSc does allow read access to individual elements of a vector, but in the
* distributed case only to elements that are stored locally. We implement
* compress(), or making sure that all processes do the same type of
* operations at the same time, for example by placing zero additions if
* necessary.
- *
+ *
+ * @ingroup PETSc
+ * @brief Parallel vector using MPI
* @author Wolfgang Bangerth, 2004
*/
class Vector : public VectorBase
return *this;
}
}
-
+/*@}*/
}
-/*@}*/
#endif // DEAL_II_USE_PETSC
#include <lac/petsc_vector_base.h>
#include <lac/petsc_parallel_vector.h>
-/*! @addtogroup PETSc
- *@{
- */
-
namespace PETScWrappers
{
* functions). Only the functions creating a vector of specific type differ,
* and are implemented in this particular class.
*
+ * @ingroup PETSc
+ * @brief Sequential vector
* @author Wolfgang Bangerth, 2004
*/
class Vector : public VectorBase
};
-
+/// @if NoDoc
// ------------------ template and inline functions -------------
return *this;
}
+/// @endif
+
}
-/*@}*/
#endif // DEAL_II_USE_PETSC