ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/serialization/src)
- IF( DEAL_II_USE_MT AND NOT DEAL_II_CAN_USE_CXX11)
+ IF( DEAL_II_WITH_THREADS AND NOT DEAL_II_CAN_USE_CXX11)
#
# If the C++ compiler doesn't completely support the C++11 standard
# (and consequently we can't use std::thread, std::mutex, etc), then
SET(CMAKE_CXX_COMPILER ${MPI_CXX_COMPILER})
ENDIF()
- SET(DEAL_II_COMPILER_SUPPORTS_MPI TRUE)
-
SET(${var} TRUE)
ENDMACRO()
)
SET(${var} TRUE)
- #
- # We support threading. Go on and configure the rest:
- #
- SET(DEAL_II_USE_MT TRUE)
-
#
# Change -lphtread to -pthread for better compatibility on non linux
# platforms:
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${ARPACK_LIBRARIES})
ADD_FLAGS(CMAKE_SHARED_LINKER_FLAGS "${ARPACK_LINKER_FLAGS}")
- SET(DEAL_II_USE_ARPACK TRUE)
-
SET(${var} TRUE)
ENDMACRO()
MACRO(FEATURE_FUNCTIONPARSER_CONFIGURE_BUNDLED var)
INCLUDE_DIRECTORIES(${FUNCTIONPARSER_FOLDER})
- SET(HAVE_FUNCTIONPARSER TRUE)
SET(${var} TRUE)
ENDMACRO()
INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIRS})
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${HDF5_LIBRARIES})
- SET(DEAL_II_HAVE_HDF5 TRUE)
-
SET(${var} TRUE)
ENDMACRO()
CHECK_FOR_LAPACK_FUNCTIONS()
- SET(HAVE_LIBLAPACK TRUE)
SET(${var} TRUE)
ENDMACRO()
INCLUDE_DIRECTORIES(${METIS_INCLUDE_DIRS})
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${METIS_LIBRARIES})
- SET(DEAL_II_USE_METIS TRUE)
-
SET(${var} TRUE)
ENDMACRO()
)
ADD_FLAGS(CMAKE_SHARED_LINKER_FLAGS "${MUMPS_LINKER_FLAGS}")
- SET(DEAL_II_USE_MUMPS TRUE)
-
SET(${var} TRUE)
ENDMACRO()
MACRO(FEATURE_NETCDF_CONFIGURE_EXTERNAL var)
INCLUDE_DIRECTORIES(${NETCDF_INCLUDE_DIRS})
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${NETCDF_LIBRARIES})
- SET(HAVE_LIBNETCDF TRUE)
-
SET(${var} TRUE)
ENDMACRO()
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${P4EST_LIBRARIES})
- SET(DEAL_II_USE_P4EST TRUE)
-
SET(${var} TRUE)
ENDMACRO()
# _NOT_ enabled.
# So we check for this:
#
- IF( (PETSC_WITH_MPIUNI AND DEAL_II_COMPILER_SUPPORTS_MPI)
+ IF( (PETSC_WITH_MPIUNI AND DEAL_II_WITH_MPI)
OR
- (NOT PETSC_WITH_MPIUNI AND NOT DEAL_II_COMPILER_SUPPORTS_MPI))
+ (NOT PETSC_WITH_MPIUNI AND NOT DEAL_II_WITH_MPI))
MESSAGE(WARNING "\n"
"Could not find a sufficient petsc installation: "
"Petsc has to be configured with the same MPI configuration as deal.II.\n\n"
${PETSC_LIBRARIES}
)
- SET(DEAL_II_USE_PETSC TRUE)
-
#
# Disable a bunch of warnings when compiling with petsc:
#
${SLEPC_LIBRARIES}
)
- SET(DEAL_II_USE_SLEPC TRUE)
-
SET(${var} TRUE)
ENDMACRO()
# Trilinos has to be configured with the same MPI configuration as
# deal.II.
#
- IF( (TRILINOS_WITH_MPI AND NOT DEAL_II_COMPILER_SUPPORTS_MPI)
+ IF( (TRILINOS_WITH_MPI AND NOT DEAL_II_WITH_MPI)
OR
- (NOT TRILINOS_WITH_MPI AND DEAL_II_COMPILER_SUPPORTS_MPI))
+ (NOT TRILINOS_WITH_MPI AND DEAL_II_WITH_MPI))
MESSAGE(WARNING "\n"
"Trilinos has to be configured with the same MPI configuration as deal.II.\n\n"
)
${Trilinos_TPL_LIBRARIES}
)
- SET(DEAL_II_USE_TRILINOS TRUE)
-
SET(DEAL_II_EXPAND_TRILINOS_VECTOR "TrilinosWrappers::Vector")
SET(DEAL_II_EXPAND_TRILINOS_BLOCKVECTOR "TrilinosWrappers::BlockVector")
SET(DEAL_II_EXPAND_TRILINOS_SPARSITY_PATTERN "TrilinosWrappers::SparsityPattern")
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${UMFPACK_LIBRARIES})
ADD_FLAGS(CMAKE_SHARED_LINKER_FLAGS "${UMFPACK_LINKER_FLAGS}")
- SET(HAVE_LIBUMFPACK TRUE)
-
SET(${var} TRUE)
ENDMACRO()
${UMFPACK_FOLDER}/AMD/Include
)
- SET(HAVE_LIBUMFPACK TRUE)
-
SET(${var} TRUE)
ENDMACRO()
MACRO(FEATURE_ZLIB_CONFIGURE_EXTERNAL var)
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIRS})
LIST(APPEND DEAL_II_EXTERNAL_LIBRARIES ${ZLIB_LIBRARIES})
- SET(HAVE_LIBZ TRUE)
-
SET(${var} TRUE)
ENDMACRO()
#include <iostream>
-#ifdef HAVE_LIBUMFPACK
+#ifdef DEAL_II_WITH_UMFPACK
extern "C" {
#include <umfpack.h>
}
#endif
-#if defined(DEAL_II_COMPILER_SUPPORTS_MPI)
+#if defined(DEAL_II_WITH_MPI)
#include <mpi.h>
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <Trilinos_version.h>
#endif
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
# include <deal.II/base/utilities.h>
# include <dmumps_c.h>
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <petscversion.h>
#endif
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
# include <slepcversion.h>
#endif
std::cout << "dealii-feature: BLAS=yes" << std::endl;
#endif
-#ifdef HAVE_LIBLAPACK
+#ifdef DEAL_II_WITH_LAPACK
std::cout << "dealii-feature: LAPACK=yes" << std::endl;
#endif
-#ifdef HAVE_LIBUMFPACK
+#ifdef DEAL_II_WITH_UMFPACK
std::cout << "dealii-feature: UMFPACK="
<< UMFPACK_MAIN_VERSION << '.'
<< UMFPACK_SUB_VERSION << '.'
<< UMFPACK_SUBSUB_VERSION << std::endl;
#endif
-#if defined(DEAL_II_COMPILER_SUPPORTS_MPI)
+#if defined(DEAL_II_WITH_MPI)
# ifdef OMPI_MAJOR_VERSION
std::cout << "dealii-feature: MPI=OpenMPI-"
<< OMPI_MAJOR_VERSION << '.'
# endif
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# ifdef TRILINOS_VERSION_STRING
std::cout << "dealii-feature: Trilinos=" << TRILINOS_VERSION_STRING << std::endl;
# else
# endif
#endif
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
std::cout << "dealii-feature: MUMPS=yes" << std::endl;
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
std::cout << "dealii-feature: PETSc="
<< PETSC_VERSION_MAJOR << '.'
<< PETSC_VERSION_MINOR << '.'
<< PETSC_VERSION_PATCH << std::endl;
#endif
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
std::cout << "dealii-feature: SLEPc="
<< SLEPC_VERSION_MAJOR << '.'
<< SLEPC_VERSION_MINOR << '.'
std::cout << std::endl;
#endif
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
std::cout << "dealii-feature: P4est=yes" << std::endl;
#endif
-#ifdef DEAL_II_HAVE_HDF5
+#ifdef DEAL_II_WITH_HDF5
std::cout << "dealii-feature: HDF5=yes" << std::endl;
#endif
std::cout << "dealii-feature: Tecplot=yes" << std::endl;
#endif
-#ifdef HAVE_LIBNETCDF
+#ifdef DEAL_II_WITH_NETCDF
std::cout << "dealii-feature: NetCDF=yes" << std::endl;
#endif
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
std::cout << "dealii-feature: LibZ=yes" << std::endl;
#endif
-#ifdef HAVE_FUNCTIONPARSER
+#ifdef DEAL_II_WITH_FUNCTIONPARSER
std::cout << "dealii-feature: parser=yes" << std::endl;
#else
std::cout << "dealii-feature: parser=no" << std::endl;
PREDEFINED = DOXYGEN=1 \
DEBUG=1 \
- DEAL_II_USE_PETSC=1 \
- DEAL_II_USE_SLEPC=1 \
- DEAL_II_USE_TRILINOS=1 \
- DEAL_II_USE_MT=1 \
+ DEAL_II_WITH_PETSC=1 \
+ DEAL_II_WITH_SLEPC=1 \
+ DEAL_II_WITH_TRILINOS=1 \
+ DEAL_II_WITH_THREADS=1 \
DEAL_II_USE_MT_POSIX=1 \
DEAL_II_USE_ARPACK=1 \
- DEAL_II_USE_METIS=1 \
- DEAL_II_USE_MUMPS=1 \
- DEAL_II_USE_P4EST=1
+ DEAL_II_WITH_METIS=1 \
+ DEAL_II_WITH_MUMPS=1 \
+ DEAL_II_WITH_P4EST=1
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
<li> <p>
Changed: Previously, we just set the preprocessor variable
- <code>DEAL_II_USE_MT</code>, when <code>--with-multithreading</code> was
+ <code>DEAL_II_WITH_THREADS</code>, when <code>--with-multithreading</code> was
given as argument to <code>./configure</code>. Tests in the code
- therefore looked like <code>#ifdef DEAL_II_USE_MT</code>. This has been
+ therefore looked like <code>#ifdef DEAL_II_WITH_THREADS</code>. This has been
changed so that the variable is always defined, but its value is now
equal to <code>1</code>
when multithreading was requested, and zero otherwise. The reason for
- this is that you can now write <code>if (DEAL_II_USE_MT && ...)</code>
+ this is that you can now write <code>if (DEAL_II_WITH_THREADS && ...)</code>
conditions, and need not interleave if-else clauses from regular code
and the preprocessor, if conditions involve both the state of this
preprocessor variable and the run-time state of your program. However,
this change requires that all appearances of <code>#ifdef
- DEAL_II_USE_MT</code> be changed to <code>#if DEAL_II_USE_MT ==
+ DEAL_II_WITH_THREADS</code> be changed to <code>#if DEAL_II_WITH_THREADS ==
1</code>, since the variable is now defined unconditionally.
<br>
(WB 2002/11/14)
<li>
<p>
New: If the compiler allows to do <code>\#include @<mpi.h@></code>, then
- the preprocessor flag <code>DEAL_II_COMPILER_SUPPORTS_MPI</code> is now set in
+ the preprocessor flag <code>DEAL_II_WITH_MPI</code> is now set in
<code>base/include/base/config.h</code>. This also fixes a problem in
<code>base/include/base/utilities.h</code> if a compiler capable of
including <code>mpi.h</code> was used but not PETSc.
void assemble_system ();
void output_results (const unsigned int timestep_number) const;
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
parallel::distributed::Triangulation<dim> triangulation;
#else
Triangulation<dim> triangulation;
:
pcout (std::cout,
Utilities::System::get_this_mpi_process(MPI_COMM_WORLD)==0),
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
triangulation (MPI_COMM_WORLD),
#endif
fe (QGaussLobatto<1>(fe_degree+1)),
}
pcout << " Number of global active cells: "
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
<< triangulation.n_global_active_cells()
#else
<< triangulation.n_active_cells()
/* Defined if deal.II was configured on a native Windows platform. */
#cmakedefine DEAL_II_MSVC
+/* Disable a bunch of warnings for Microsoft Visual C++. */
+//#ifdef DEAL_II_MSVC
+//# pragma warning( disable : 4244 ) /* implied downcasting from double to float */
+//# pragma warning( disable : 4267 ) /* implied downcasting from size_t to unsigned int */
+//# pragma warning( disable : 4996 ) /* unsafe functions, such as strcat and sprintf */
+//# pragma warning( disable : 4355 ) /* 'this' : used in base member initializer list */
+//# pragma warning( disable : 4661 ) /* no suitable definition provided for explicit template instantiation request */
+//# pragma warning( disable : 4800 ) /* forcing value to bool 'true' or 'false' (performance warning) */
+//# pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
+//# pragma warning( disable : 4667 ) /* no function template defined that matches forced instantiation */
+//# pragma warning( disable : 4520 ) /* multiple default constructors specified */
+//# pragma warning( disable : 4700 ) /* uninitialized local variable */
+//# pragma warning( disable : 4789 ) /* destination of memory copy is too small */
+//# pragma warning( disable : 4808 ) /* case 'value' is not a valid value for switch condition of type 'bool */
+//#endif // DEAL_II_MSVC
+
/****************************************
* Configured in check_2_compiler_bugs: *
#cmakedefine DEAL_II_BOOST_BIND_COMPILER_BUG
-
/*****************************************
* Configured in configure_arpack.cmake: *
*****************************************/
-/* Defined if an ARPACK installation was found and is going to be used */
#cmakedefine DEAL_II_USE_ARPACK
-
/*************************************************
* Configured in configure_functionparser.cmake: *
*************************************************/
-/* Defined if you have the `functionparser' library */
-#cmakedefine HAVE_FUNCTIONPARSER
-
+#cmakedefine DEAL_II_WITH_FUNCTIONPARSER
/***************************************
* Configured in configure_hdf5.cmake: *
***************************************/
-/* Defined if deal.II was configured with hdf5 support */
-#cmakedefine DEAL_II_HAVE_HDF5
-
+#cmakedefine DEAL_II_WITH_HDF5
/*****************************************
* Configured in configure_lapack.cmake: *
*****************************************/
-/* Defined if deal.II was configured with LAPACK support */
-#cmakedefine HAVE_LIBLAPACK
+#cmakedefine DEAL_II_WITH_LAPACK
-/* Defined if you have the `daxpy_' function. */
+/* Defined if the corresponding BLAS or LAPACK function is available */
#cmakedefine HAVE_DAXPY_
-
-/* Defined if you have the `dgeevx_' function. */
#cmakedefine HAVE_DGEEVX_
-
-/* Defined if you have the `dgeev_' function. */
#cmakedefine HAVE_DGEEV_
-
-/* Defined if you have the `dgelsd_' function. */
#cmakedefine HAVE_DGELSD_
-
-/* Defined if you have the `dgemm_' function. */
#cmakedefine HAVE_DGEMM_
-
-/* Defined if you have the `dgemv_' function. */
#cmakedefine HAVE_DGEMV_
-
-/* Defined if you have the `dgeqrf_' function. */
#cmakedefine HAVE_DGEQRF_
-
-/* Defined if you have the `dgesdd_' function. */
#cmakedefine HAVE_DGESDD_
-
-/* Defined if you have the `dgesvd_' function. */
#cmakedefine HAVE_DGESVD_
-
-/* Defined if you have the `dgetrf_' function. */
#cmakedefine HAVE_DGETRF_
-
-/* Defined if you have the `dgetri_' function. */
#cmakedefine HAVE_DGETRI_
-
-/* Defined if you have the `dgetrs_' function. */
#cmakedefine HAVE_DGETRS_
-
-/* Defined if you have the `dorgqr_' function. */
#cmakedefine HAVE_DORGQR_
-
-/* Defined if you have the `dormqr_' function. */
#cmakedefine HAVE_DORMQR_
-
-/* Defined if you have the `dstev_' function. */
#cmakedefine HAVE_DSTEV_
-
-/* Defined if you have the `dsyevx_' function. */
#cmakedefine HAVE_DSYEVX_
-
-/* Defined if you have the `dsygv_' function. */
#cmakedefine HAVE_DSYGV_
-
-/* Defined if you have the `dsygvx_' function. */
#cmakedefine HAVE_DSYGVX_
-
-/* Defined if you have the `dtrtrs_' function. */
#cmakedefine HAVE_DTRTRS_
-
-/* Defined if you have the `saxpy_' function. */
#cmakedefine HAVE_SAXPY_
-
-/* Defined if you have the `sgeevx_' function. */
#cmakedefine HAVE_SGEEVX_
-
-/* Defined if you have the `sgeev_' function. */
#cmakedefine HAVE_SGEEV_
-
-/* Defined if you have the `sgelsd_' function. */
#cmakedefine HAVE_SGELSD_
-
-/* Defined if you have the `sgemm_' function. */
#cmakedefine HAVE_SGEMM_
-
-/* Defined if you have the `sgemv_' function. */
#cmakedefine HAVE_SGEMV_
-
-/* Defined if you have the `sgeqrf_' function. */
#cmakedefine HAVE_SGEQRF_
-
-/* Defined if you have the `sgesdd_' function. */
#cmakedefine HAVE_SGESDD_
-
-/* Defined if you have the `sgesvd_' function. */
#cmakedefine HAVE_SGESVD_
-
-/* Defined if you have the `sgetrf_' function. */
#cmakedefine HAVE_SGETRF_
-
-/* Defined if you have the `sgetri_' function. */
#cmakedefine HAVE_SGETRI_
-
-/* Defined if you have the `sgetrs_' function. */
#cmakedefine HAVE_SGETRS_
-
-/* Defined if you have the `sorgqr_' function. */
#cmakedefine HAVE_SORGQR_
-
-/* Defined if you have the `sormqr_' function. */
#cmakedefine HAVE_SORMQR_
-
-/* Defined if you have the `sstev_' function. */
#cmakedefine HAVE_SSTEV_
-
-/* Defined if you have the `ssyevx_' function. */
#cmakedefine HAVE_SSYEVX_
-
-/* Defined if you have the `ssygv_' function. */
#cmakedefine HAVE_SSYGV_
-
-/* Defined if you have the `ssygvx_' function. */
#cmakedefine HAVE_SSYGVX_
-
-/* Defined if you have the `strtrs_' function. */
#cmakedefine HAVE_STRTRS_
* Configured in configure_metis.cmake: *
****************************************/
-/* Defined if a Metis installation was found and is going to be used */
-#cmakedefine DEAL_II_USE_METIS
+#cmakedefine DEAL_II_WITH_METIS
/**************************************
* Configured in configure_mpi.cmake: *
**************************************/
-/* Defined if the compiler supports mpi */
-#cmakedefine DEAL_II_COMPILER_SUPPORTS_MPI
+#cmakedefine DEAL_II_WITH_MPI
/*****************************************
* Configured in configure_mumps.cmake: *
*****************************************/
-/* Defined if an MUMPS installation was found and is going to be used */
-#cmakedefine DEAL_II_USE_MUMPS
+#cmakedefine DEAL_II_WITH_MUMPS
/*****************************************
* Configured in configure_netcdf.cmake: *
*****************************************/
-/* Defined if you have the `NetCDF' library */
-#cmakedefine HAVE_LIBNETCDF
+#cmakedefine DEAL_II_WITH_NETCDF
/****************************************
* Configured in configure_p4est.cmake: *
****************************************/
-/* Defined if we are to use the p4est library to distribute meshes on a
- cluster computer. */
-#cmakedefine DEAL_II_USE_P4EST
+#cmakedefine DEAL_II_WITH_P4EST
/****************************************
* Configured in configure_petsc.cmake: *
****************************************/
-/* Defined if a PETSc installation was found and is going to be used */
-#cmakedefine DEAL_II_USE_PETSC
+#cmakedefine DEAL_II_WITH_PETSC
+
/*
* Note: The following definitions will be set in petscconf.h and
>= \
(major)*10000 + (minor)*100 + (subminor))
-
/****************************************
* Configured in configure_slepc.cmake: *
****************************************/
-/* Defined if a SLEPc installation was found and is going to be used */
-#cmakedefine DEAL_II_USE_SLEPC
+#cmakedefine DEAL_II_WITH_SLEPC
/********************************************
* Configured in configure_1_threads.cmake: *
********************************************/
-/* Flag indicating whether the library shall be compiled for multithreaded
- * applications. If so, then it is set to one, otherwise to zero.
- */
-#cmakedefine DEAL_II_USE_MT
+#cmakedefine DEAL_II_WITH_THREADS
/* Defined if multi-threading is to be achieved by using the POSIX functions
*/
* volatile. We do this here in a very old-fashioned C-style, but still
* convenient way.
*/
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# define DEAL_VOLATILE volatile
#else
# define DEAL_VOLATILE
* Configured in configure_trilinos.cmake: *
*******************************************/
-/* Defined if a Trilinos installation was found and is going to be used */
-#cmakedefine DEAL_II_USE_TRILINOS
+#cmakedefine DEAL_II_WITH_TRILINOS
/******************************************
* Configured in configure_umfpack.cmake: *
******************************************/
-/* Defined if deal.II was configured with UMFPACK support */
-#cmakedefine HAVE_LIBUMFPACK
+#cmakedefine DEAL_II_WITH_UMFPACK
/***************************************
* Configured in configure_zlib.cmake: *
***************************************/
-/* Defined if deal.II was configure with zlib support */
-#cmakedefine HAVE_LIBZ
-
-
-/* Disable a bunch of warnings for Microsoft Visual C++. */
-#ifdef DEAL_II_MSVC
-//# pragma warning( disable : 4244 ) /* implied downcasting from double to float */
-//# pragma warning( disable : 4267 ) /* implied downcasting from size_t to unsigned int */
-//# pragma warning( disable : 4996 ) /* unsafe functions, such as strcat and sprintf */
-//# pragma warning( disable : 4355 ) /* 'this' : used in base member initializer list */
-//# pragma warning( disable : 4661 ) /* no suitable definition provided for explicit template instantiation request */
-//# pragma warning( disable : 4800 ) /* forcing value to bool 'true' or 'false' (performance warning) */
-//# pragma warning( disable : 4146 ) /* unary minus operator applied to unsigned type, result still unsigned */
-//# pragma warning( disable : 4667 ) /* no function template defined that matches forced instantiation */
-//# pragma warning( disable : 4520 ) /* multiple default constructors specified */
-//# pragma warning( disable : 4700 ) /* uninitialized local variable */
-//# pragma warning( disable : 4789 ) /* destination of memory copy is too small */
-//# pragma warning( disable : 4808 ) /* case 'value' is not a valid value for switch condition of type 'bool */
-#endif // DEAL_II_MSVC
+#cmakedefine DEAL_II_WITH_ZLIB
#include <deal.II/base/numbers.h>
/**
* Assert support for the LAPACK library
*/
-#ifdef HAVE_LIBLAPACK
+#ifdef DEAL_II_WITH_LAPACK
# define AssertLAPACK {}
#else
# define AssertLAPACK Assert(false, ExcNeedsLAPACK())
/**
* Assert support for the UMFPACK library
*/
-#ifdef HAVE_LIBUMFPACK
+#ifdef DEAL_II_WITH_UMFPACK
# define AssertUMFPACK {}
#else
# define AssertUMFPACK Assert(false, ExcNeedsUMFPACK())
#include <vector>
#include <algorithm>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <Epetra_Map.h>
#endif
-#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC)
+#if defined(DEAL_II_WITH_MPI) || defined(DEAL_II_WITH_PETSC)
#include <mpi.h>
#else
typedef int MPI_Comm;
*/
void block_read(std::istream &in);
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* Given an MPI communicator,
* create a Trilinos map object
#include <deal.II/base/config.h>
#include <vector>
-#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC)
+#if defined(DEAL_II_WITH_MPI) || defined(DEAL_II_WITH_PETSC)
# include <mpi.h>
// Check whether <mpi.h> is a suitable
// include for us (if MPI_SEEK_SET is not
namespace internal
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* Return the corresponding MPI data
* type id for the argument given.
T sum (const T &t,
const MPI_Comm &mpi_communicator)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
T sum;
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&t)),
&sum, 1, internal::mpi_type_id(&t), MPI_SUM,
const MPI_Comm &mpi_communicator,
T (&sums)[N])
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&values[0])),
&sums[0], N, internal::mpi_type_id(values), MPI_SUM,
mpi_communicator);
const MPI_Comm &mpi_communicator,
std::vector<T> &sums)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
sums.resize (values.size());
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&values[0])),
&sums[0], values.size(), internal::mpi_type_id((T *)0), MPI_SUM,
T max (const T &t,
const MPI_Comm &mpi_communicator)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
T sum;
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&t)),
&sum, 1, internal::mpi_type_id(&t), MPI_MAX,
const MPI_Comm &mpi_communicator,
T (&maxima)[N])
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&values[0])),
&maxima[0], N, internal::mpi_type_id(values), MPI_MAX,
mpi_communicator);
const MPI_Comm &mpi_communicator,
std::vector<T> &maxima)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
maxima.resize (values.size());
MPI_Allreduce (const_cast<void *>(static_cast<const void *>(&values[0])),
&maxima[0], values.size(), internal::mpi_type_id((T *)0), MPI_MAX,
#include <cstddef>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# include <tbb/parallel_for.h>
# include <tbb/parallel_reduce.h>
# include <tbb/partitioner.h>
Predicate &predicate,
const unsigned int grainsize)
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) grainsize;
Predicate &predicate,
const unsigned int grainsize)
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) grainsize;
Predicate &predicate,
const unsigned int grainsize)
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) grainsize;
namespace internal
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/**
* Take a range argument and call the
* given function with its begin and end.
const Function &f,
const unsigned int grainsize)
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) grainsize;
namespace internal
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/**
* A class that conforms to the Body
* requirements of the TBB
const typename identity<RangeType>::type &end,
const unsigned int grainsize)
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) grainsize;
namespace parallel
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
namespace internal
{
const std::size_t end,
const std::size_t minimum_parallel_grain_size) const
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// make sure we don't get compiler
// warnings about unused arguments
(void) minimum_parallel_grain_size;
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# include <tbb/enumerable_thread_specific.h>
#endif
ThreadLocalStorage<T> &operator = (const T &t);
private:
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/**
* The data element we store. If we support threads, then this
* object will be of a type that provides a separate object
T &
ThreadLocalStorage<T>::get ()
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
return data.local();
#else
return data;
#include <deal.II/base/std_cxx1x/shared_ptr.h>
#include <deal.II/base/std_cxx1x/bind.h>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# include <deal.II/base/std_cxx1x/thread.h>
# include <deal.II/base/std_cxx1x/mutex.h>
# include <deal.II/base/std_cxx1x/condition_variable.h>
#include <utility>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# ifdef DEAL_II_USE_MT_POSIX
# include <pthread.h>
# endif
};
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/**
* Class implementing a
namespace internal
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/**
* A class that represents threads. For
namespace internal
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
template <typename> struct TaskDescriptor;
#include <deal.II/base/thread_management.h>
#include <deal.II/base/utilities.h>
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
# include <mpi.h>
#endif
*/
Timer ();
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* Constructor that takes an MPI
* communicator as input. A timer
*/
MPI_Comm mpi_communicator;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* Store whether the wall time is
* synchronized between machines.
const enum OutputFrequency output_frequency,
const enum OutputType output_type);
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* Constructor that takes an MPI
* communicator as input. A timer
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
inline
const Utilities::System::MinMaxAvg &
#include <functional>
#include <string>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <Epetra_Comm.h>
# include <Epetra_Map.h>
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# else
# include <Epetra_SerialComm.h>
}
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* This namespace provides some of the basic structures used in the
* initialization of the Trilinos objects (e.g., matrices, vectors, and
#include <deal.II/base/std_cxx1x/function.h>
#include <deal.II/base/std_cxx1x/bind.h>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
# include <deal.II/base/thread_management.h>
# include <tbb/pipeline.h>
#endif
namespace WorkStream
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
namespace internal
}
-#endif // DEAL_II_USE_MT
+#endif // DEAL_II_WITH_THREADS
if (!(begin != end))
return;
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
// create the three stages of the
// pipeline
internal::IteratorRangeToItemStream<Iterator,ScratchData,CopyData>
#include <list>
#include <utility>
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
# include <mpi.h>
#endif
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
#include <p4est_connectivity.h>
#include <p4est.h>
#include <p4est_ghost.h>
template <int, int> class Triangulation;
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
namespace internal
{
}
-#else // DEAL_II_USE_P4EST
+#else // DEAL_II_WITH_P4EST
namespace parallel
{
* communicator used by this
* triangulation.
*/
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Comm get_communicator () const;
#endif
};
// dummy include in order to have the
// definition of PetscScalar available
// without including other PETSc stuff
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <petsc.h>
#endif
bool
CellAccessor<dim,spacedim>::is_locally_owned () const
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
return true;
#else
const types::subdomain_id subdomain = this->subdomain_id();
bool
CellAccessor<dim,spacedim>::is_ghost () const
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
return false;
#else
const types::subdomain_id subdomain = this->subdomain_id();
bool
CellAccessor<dim,spacedim>::is_artificial () const
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
return false;
#else
return (this->subdomain_id() == numbers::artificial_subdomain_id);
class BlockCompressedSparsityPattern;
class BlockCompressedSimpleSparsityPattern;
class BlockCompressedSetSparsityPattern;
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
{
class BlockSparsityPattern;
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
DEAL_II_NAMESPACE_OPEN
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
{
class Vector;
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* A copy constructor taking a
* (parallel) Trilinos block
BlockVector &
operator= (const Vector<Number> &V);
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* A copy constructor from a
* Trilinos block vector to a
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template <typename Number>
BlockVector<Number>::BlockVector (const TrilinosWrappers::BlockVector &v)
{}
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template <typename Number>
inline
BlockVector<Number> &
*/
VectorView<Number> vector_view;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* A vector that collects all requests from @p
* compress() operations. This class uses
void
Vector<Number>::clear_mpi_requests ()
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
for (unsigned int j=0; j<compress_requests.size(); j++)
MPI_Request_free(&compress_requests[j]);
compress_requests.clear();
void
Vector<Number>::compress_start (const unsigned int counter)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const Utilities::MPI::Partitioner &part = *partitioner;
// nothing to do when we neither have import
void
Vector<Number>::compress_finish (const bool add_ghost_data)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const Utilities::MPI::Partitioner &part = *partitioner;
// nothing to do when we neither have import
void
Vector<Number>::update_ghost_values_start (const unsigned int counter) const
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const Utilities::MPI::Partitioner &part = *partitioner;
// nothing to do when we neither have import
void
Vector<Number>::update_ghost_values_finish () const
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// wait for both sends and receives to
// complete, even though only receives are
// really necessary. this gives (much) better
void
Vector<Number>::swap (Vector<Number> &v)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// introduce a Barrier over all MPI processes
// to make sure that the compress request are
// no longer used before changing the owner
// information in order, use as many barriers
// as there are processors and start writing
// when it's our turn
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
for (unsigned int i=0; i<partitioner->this_mpi_process(); i++)
MPI_Barrier (partitioner->get_communicator());
#endif
<< std::endl;
out << std::endl << std::flush;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Barrier (partitioner->get_communicator());
for (unsigned int i=partitioner->this_mpi_process()+1;
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/table.h>
# include <deal.II/lac/block_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#endif // __deal2__petsc_block_sparse_matrix_h
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/petsc_parallel_block_vector.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#endif
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_full_matrix.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/subscriptor.h>
# include <deal.II/lac/full_matrix.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_matrix_base.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_matrix_free.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/table.h>
# include <deal.II/lac/block_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#endif // __deal2__petsc_parallel_block_sparse_matrix_h
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_parallel_vector.h>
# include <deal.II/lac/block_indices.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#endif
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_parallel_sparse_matrix.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/subscriptor.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_parallel_vector.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <petscpc.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_precondition.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/solver_control.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_solver.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/petsc_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_sparse_matrix.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/subscriptor.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_vector.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/subscriptor.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
/*---------------------------- petsc_vector_base.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_SLEPC
+#endif // DEAL_II_WITH_SLEPC
/*---------------------------- slepc_solver.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_SLEPC
+#endif // DEAL_II_WITH_SLEPC
/*-------------------- slepc_spectral_transformation.h ------------------*/
#include <deal.II/lac/sparse_matrix_ez.h>
#include <deal.II/lac/block_sparse_matrix.h>
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
# include <deal.II/base/utilities.h>
# include <dmumps_c.h>
#endif
{
private:
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
DMUMPS_STRUC_C id;
-#endif // DEAL_II_USE_MUMPS
+#endif // DEAL_II_WITH_MUMPS
double *a;
double *rhs;
void
SparseVanka<number>::compute_inverses ()
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
compute_inverses (0, matrix->m());
#else
const unsigned int n_inverses = std::count (selected.begin(),
else
// otherwise: blocking requested
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
// spawn threads. since
// some compilers have
// trouble finding out
#include <vector>
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
#include <mpi.h>
#include <deal.II/base/index_set.h>
#endif
const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
/**
* Communciate rows in a compressed
* sparsity pattern over MPI.
template <typename number>
void SwappableVector<number>::alert ()
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
// note: this function does nothing
// in non-MT mode
return;
this->block_read (tmp_in);
tmp_in.close ();
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
// release the lock that was
// acquired by the calling
// functions
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/table.h>
# include <deal.II/lac/block_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#endif // __deal2__trilinos_block_sparse_matrix_h
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/trilinos_parallel_block_vector.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#endif
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/block_indices.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#endif
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/subscriptor.h>
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/lac/trilinos_vector_base.h>
# include <deal.II/lac/parallel_vector.h>
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# else
# include <Epetra_SerialComm.h>
* needs to be copied from
* deal.II format.
*/
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm communicator;
#else
Epetra_SerialComm communicator;
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*---------------------------- trilinos_precondition.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/lac/exceptions.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*---------------------------- trilinos_solver.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/base/subscriptor.h>
# include <Epetra_Map.h>
# include <Epetra_CrsGraph.h>
# include <Epetra_Vector.h>
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# include "mpi.h"
# else
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*----------------------- trilinos_sparse_matrix.h --------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/subscriptor.h>
# include <deal.II/base/index_set.h>
# include <Epetra_FECrsGraph.h>
# include <Epetra_Map.h>
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# include "mpi.h"
# else
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*-------------------- trilinos_sparsity_pattern.h --------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# include <deal.II/base/subscriptor.h>
{
vector.reset (new Epetra_FEVector(Epetra_Map
(static_cast<int>(v.size()), 0,
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(MPI_COMM_SELF)
#else
Epetra_SerialComm()
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*---------------------------- trilinos_vector.h ---------------------------*/
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
#include <deal.II/base/utilities.h>
# include <deal.II/base/std_cxx1x/shared_ptr.h>
# define TrilinosScalar double
# include "Epetra_ConfigDefs.h"
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI // only if MPI is installed
+# ifdef DEAL_II_WITH_MPI // only if MPI is installed
# include "mpi.h"
# include "Epetra_MpiComm.h"
# else
}
#ifdef DEBUG
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# ifdef DEAL_II_WITH_MPI
// check that every process has decided
// to use the same mode. This will
// otherwise result in undefined
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
/*---------------------------- trilinos_vector_base.h ---------------------------*/
DEAL_II_NAMESPACE_OPEN
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
namespace PETScWrappers
{
class Vector;
}
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
{
namespace MPI
Vector (const Vector<OtherNumber> &v);
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
/**
* Another copy constructor: copy the
* values from a sequential PETSc wrapper
Vector (const PETScWrappers::MPI::Vector &v);
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* Another copy constructor: copy
* the values from a Trilinos
*/
Vector<Number> &operator= (const BlockVector<Number> &v);
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
/**
* Another copy operator: copy the
* values from a sequential PETSc
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* Another copy operator: copy
* the values from a (sequential
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/petsc_parallel_vector.h>
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_vector.h>
#endif
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template <typename Number>
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template <typename Number>
Vector<Number>::Vector (const TrilinosWrappers::MPI::Vector &v)
}
result = outer_results[0];
}
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
else if (vec_size > 4 * internal::Vector::minimum_parallel_grain_size)
{
// split the vector into smaller pieces to be
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template <typename Number>
Vector<Number> &
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template <typename Number>
Vector<Number> &
{
// print_memory_statistics involves global communication, so we can
// disable the check here only if no processor has any such data
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
unsigned int general_size_glob = 0, general_size_loc = jacobians.size();
MPI_Allreduce (&general_size_loc, &general_size_glob, 1, MPI_UNSIGNED,
MPI_MAX, size_info.communicator);
MemoryConsumption::memory_consumption (jacobians_grad_upper));
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
unsigned int quad_size_glob = 0, quad_size_loc = quadrature_points.size();
MPI_Allreduce (&quad_size_loc, &quad_size_glob, 1, MPI_UNSIGNED,
MPI_MAX, size_info.communicator);
#include <deal.II/matrix_free/dof_info.h>
#include <deal.II/matrix_free/mapping_info.h>
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
#include <tbb/task.h>
#include <tbb/task_scheduler_init.h>
#include <tbb/parallel_for.h>
}
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
// This defines the TBB data structures that are needed to schedule the
// partition-partition variant
VectorStruct &dst;
};
-#endif // DEAL_II_USE_MT
+#endif // DEAL_II_WITH_THREADS
} // end of namespace internal
OutVector &dst,
const InVector &src) const
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
// Use multithreading if so requested and if there is enough work to do in
// parallel (the code might hang if there are less than two chunks!)
void assert_communicator_equality (const dealii::Triangulation<dim> &tria,
const MPI_Comm &comm_mf)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const parallel::distributed::Triangulation<dim> *dist_tria =
dynamic_cast<const parallel::distributed::Triangulation<dim>*>(&tria);
if (dist_tria != 0)
// initialize the basic multithreading information that needs to be
// passed to the DoFInfo structure
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
if (additional_data.tasks_parallel_scheme != AdditionalData::none)
{
task_info.use_multithreading = true;
// initialize the basic multithreading information that needs to be
// passed to the DoFInfo structure
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
if (additional_data.tasks_parallel_scheme != AdditionalData::none)
{
task_info.use_multithreading = true;
/*
// try to balance the number of cells before and after the boundary part
// on each processor. probably not worth it!
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
MPI_Allreduce (&n_boundary_cells, &n_max_boundary_cells, 1, MPI_UNSIGNED,
MPI_MAX, size_info.communicator);
#endif
typedef ::dealii::SparseMatrix<typename VECTOR::value_type> Matrix;
};
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template <>
struct MatrixSelector<dealii::TrilinosWrappers::MPI::Vector>
{
}
}
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* Adjust vectors on all levels to
* correct size. Here, we just
(&mg_dof.get_tria()));
AssertThrow(tria!=NULL, ExcMessage("multigrid with Trilinos vectors only works with distributed Triangulation!"));
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
for (unsigned int level=v.min_level();
level<=v.max_level(); ++level)
{
}
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
namespace PETScWrappers
{
class SparseMatrix;
}
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
{
class SparseMatrix;
BlockVector<number> &right_hand_side,
const bool eliminate_columns = true);
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
/**
* Apply dirichlet boundary conditions to
* the system matrix and vectors as
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
/**
* Apply dirichlet boundary
* conditions to the system matrix
}
}
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
// if this was a distributed
// DoFHandler, we need to do the
// reduction over the entire domain
// we use uint32_t and uint8_t below, which are declared here:
#include <stdint.h>
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
# include <zlib.h>
#endif
-#ifdef DEAL_II_HAVE_HDF5
+#ifdef DEAL_II_WITH_HDF5
#include <hdf5.h>
#endif
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
/**
* Do a zlib compression followed
* by a base64 encoding of the
VtuStream::write_point (const unsigned int,
const Point<dim> &p)
{
-#if !defined(HAVE_LIBZ)
+#if !defined(DEAL_II_WITH_ZLIB)
// write out coordinates
stream << p;
// fill with zeroes
void
VtuStream::flush_points ()
{
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
// compress the data we have in
// memory and write them to the
// stream. then release the data
unsigned int d2,
unsigned int d3)
{
-#if !defined(HAVE_LIBZ)
+#if !defined(DEAL_II_WITH_ZLIB)
stream << start << '\t'
<< start+d1;
if (dim>=2)
void
VtuStream::flush_cells ()
{
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
// compress the data we have in
// memory and write them to the
// stream. then release the data
std::ostream &
VtuStream::operator<< (const std::vector<T> &data)
{
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
// compress the data we have in
// memory and write them to the
// stream. then release the data
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
Assert (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
Assert(dim<=3, ExcNotImplemented());
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
<< "\n-->\n";
out << "<VTKFile type=\"UnstructuredGrid\" version=\"0.1\"";
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
out << " compressor=\"vtkZLibDataCompressor\"";
#endif
#ifdef DEAL_II_WORDS_BIGENDIAN
{
AssertThrow (out, ExcIO());
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
// verify that there are indeed
// patches to be written out. most
// of the times, people just forget
patches[0].data.n_rows()));
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
const char *ascii_or_binary = "binary";
#else
const char *ascii_or_binary = "ascii";
// uint8_t might be a typedef to unsigned
// char which is then not printed as
// ascii integers
-#ifdef HAVE_LIBZ
+#ifdef DEAL_II_WITH_ZLIB
std::vector<uint8_t> cell_types (n_cells,
static_cast<uint8_t>(vtk_cell_type[dim]));
#else
template <int dim, int spacedim>
void DataOutInterface<dim,spacedim>::write_vtu_in_parallel (const char *filename, MPI_Comm comm) const
{
-#ifndef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifndef DEAL_II_WITH_MPI
//without MPI fall back to the normal way to write a vtu file:
(void)comm;
const unsigned int n_data_sets = data_names.size();
int myrank;
-#ifndef DEAL_II_HAVE_HDF5
+#ifndef DEAL_II_WITH_HDF5
// throw an exception, but first make
// sure the compiler does not warn about
// the now unused function arguments
compute_sizes<dim,spacedim>(patches, local_node_cell_count[0], local_node_cell_count[1]);
// And compute the global total
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Comm_rank(comm, &myrank);
MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
#else
{
int myrank;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Comm_rank(comm, &myrank);
#else
(void)comm;
void DataOutInterface<dim,spacedim>::
write_hdf5_parallel (const char *filename, MPI_Comm comm) const
{
-#ifndef DEAL_II_HAVE_HDF5
+#ifndef DEAL_II_WITH_HDF5
AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
#endif
DataOutBase::write_hdf5_parallel(get_patches(), get_dataset_names(),
const char *filename,
MPI_Comm comm)
{
-#ifndef DEAL_II_HAVE_HDF5
+#ifndef DEAL_II_WITH_HDF5
// throw an exception, but first make
// sure the compiler does not warn about
// the now unused function arguments
// If HDF5 is not parallel and we're using multiple processes, abort
#ifndef H5_HAVE_PARALLEL
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
int world_size;
MPI_Comm_size(comm, &world_size);
AssertThrow (world_size <= 1,
plist_id = H5Pcreate(H5P_FILE_ACCESS);
AssertThrow(plist_id != -1, ExcIO());
// If MPI is enabled *and* HDF5 is parallel, we can do parallel output
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
#ifdef H5_HAVE_PARALLEL
// Set the access to use the specified MPI_Comm object
status = H5Pset_fapl_mpio(plist_id, comm, MPI_INFO_NULL);
// Compute the global total number of nodes/cells
// And determine the offset of the data for this process
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
MPI_Allreduce(local_node_cell_count, global_node_cell_count, 2, MPI_UNSIGNED, MPI_SUM, comm);
MPI_Scan(local_node_cell_count, global_node_cell_offsets, 2, MPI_UNSIGNED, MPI_SUM, comm);
global_node_cell_offsets[0] -= local_node_cell_count[0];
// Create the property list for a collective write
plist_id = H5Pcreate(H5P_DATASET_XFER);
AssertThrow(plist_id >= 0, ExcIO());
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
#ifdef H5_HAVE_PARALLEL
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
AssertThrow(status >= 0, ExcIO());
#include <deal.II/base/utilities.h>
#include <deal.II/lac/vector.h>
-#ifdef HAVE_FUNCTIONPARSER
+#ifdef DEAL_II_WITH_FUNCTIONPARSER
#include <fparser.hh>
namespace fparser
{
}
-#ifdef HAVE_FUNCTIONPARSER
+#ifdef DEAL_II_WITH_FUNCTIONPARSER
template <int dim>
void FunctionParser<dim>::initialize (const std::string &variables,
const std::vector<std::string> &expressions,
#include <deal.II/base/index_set.h>
#include <list>
-#ifdef DEAL_II_USE_TRILINOS
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_TRILINOS
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# endif
# include <Epetra_SerialComm.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
Epetra_Map
IndexSet::make_trilinos_map (const MPI_Comm &communicator,
return Epetra_Map (static_cast<int>(size()),
static_cast<int>(n_elements()),
0,
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
:
0),
0,
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
// deliberate memory leak and
// instead destroying an empty
// object
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
if (this == &deallog)
{
stream_map_type *dummy = new stream_map_type();
#include <cstddef>
#include <iostream>
-#ifdef DEAL_II_USE_TRILINOS
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_TRILINOS
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# include <deal.II/lac/vector_memory.h>
# include <deal.II/lac/trilinos_vector.h>
# endif
#endif
-#ifdef DEAL_II_USE_PETSC
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_PETSC
+# ifdef DEAL_II_WITH_MPI
# include <petscsys.h>
# include <deal.II/lac/petsc_block_vector.h>
# include <deal.II/lac/petsc_parallel_block_vector.h>
# endif
#endif
-#ifdef DEAL_II_USE_SLEPC
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_SLEPC
+# ifdef DEAL_II_WITH_MPI
# include <slepcsys.h>
# endif
#endif
namespace MPI
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// Unfortunately, we have to work
// around an oddity in the way PETSc
// and some gcc versions interact. If
ExcMessage ("You can only create a single object of this class "
"in a program since it initializes the MPI system."));
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// if we have PETSc, we will initialize it and let it handle MPI.
// Otherwise, we will do it.
-#ifdef DEAL_II_USE_PETSC
-# ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_PETSC
+# ifdef DEAL_II_WITH_SLEPC
// Initialise SLEPc (with PETSc):
SlepcInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
# else
MPI_InitFinalize::~MPI_InitFinalize()
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// make memory pool release all MPI-based vectors that are no
// longer used at this point. this is relevant because the
// static object destructors run for these vectors at the end of
// the program would run after MPI_Finalize is called, leading
// to errors
-# if defined(DEAL_II_USE_TRILINOS)
+# if defined(DEAL_II_WITH_TRILINOS)
GrowingVectorMemory<TrilinosWrappers::MPI::Vector>
::release_unused_memory ();
GrowingVectorMemory<TrilinosWrappers::MPI::BlockVector>
// calling PETScFinalize. running the calls below after
// PetscFinalize has already been called will therefore
// yield errors of double deallocations
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if ((PetscInitializeCalled == PETSC_TRUE)
&&
(PetscFinalizeCalled == PETSC_FALSE))
GrowingVectorMemory<PETScWrappers::BlockVector>
::release_unused_memory ();
-# ifdef DEAL_II_USE_SLEPC
+# ifdef DEAL_II_WITH_SLEPC
// and now end SLEPc (with PETSc)
SlepcFinalize();
# else
DEAL_II_NAMESPACE_OPEN
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
/* Detecting how many processors a given machine has is something that
varies greatly between operating systems. For a few operating
// find out the end index for each processor
// and communicate it (this implies the start
// index for the next processor)
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
if (n_procs < 2)
{
Assert (ghost_indices_data.n_elements() == 0, ExcInternalError());
Threads::Mutex::ScopedLock lock (subscription_lock);
++counter;
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
const char *const name = (id != 0) ? id : unknown_subscriber;
map_iterator it = counter_map.find(name);
Threads::Mutex::ScopedLock lock (subscription_lock);
--counter;
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
map_iterator it = counter_map.find(name);
Assert (it != counter_map.end(), ExcNoSubscriber(object_info->name(), name));
Assert (it->second > 0, ExcNoSubscriber(object_info->name(), name));
void Subscriptor::list_subscribers () const
{
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
for (map_iterator it = counter_map.begin();
it != counter_map.end(); ++it)
deallog << it->second << '/'
-#ifndef DEAL_II_USE_MT
+#ifndef DEAL_II_WITH_THREADS
DummyBarrier::DummyBarrier (const unsigned int count,
const char *,
void *)
:
cumulative_time (0.),
cumulative_wall_time (0.)
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
, mpi_communicator (MPI_COMM_SELF)
, sync_wall_time (false)
#endif
// in case we use an MPI compiler, use
// the communicator given from input
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Timer::Timer(MPI_Comm mpi_communicator,
bool sync_wall_time_)
:
{
running = true;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
if (sync_wall_time)
MPI_Barrier(mpi_communicator);
#endif
# error Unsupported platform. Porting not finished.
#endif
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
if (sync_wall_time && Utilities::System::job_supports_mpi())
{
this->mpi_data
output_type (output_type),
out_stream (stream, true),
output_is_enabled (true)
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
, mpi_communicator (MPI_COMM_SELF)
#endif
{}
output_type (output_type),
out_stream (stream),
output_is_enabled (true)
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
, mpi_communicator (MPI_COMM_SELF)
#endif
{}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
TimerOutput::TimerOutput (MPI_Comm mpi_communicator,
std::ostream &stream,
#endif
-#ifdef DEAL_II_USE_TRILINOS
-# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_TRILINOS
+# ifdef DEAL_II_WITH_MPI
# include <Epetra_MpiComm.h>
# include <deal.II/lac/vector_memory.h>
# include <deal.II/lac/trilinos_vector.h>
bool job_supports_mpi ()
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
int MPI_has_been_started = 0;
MPI_Initialized(&MPI_has_been_started);
}
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace Trilinos
{
const Epetra_Comm &
comm_world()
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
static Teuchos::RCP<Epetra_MpiComm>
communicator = Teuchos::rcp (new Epetra_MpiComm (MPI_COMM_WORLD), true);
#else
const Epetra_Comm &
comm_self()
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
static Teuchos::RCP<Epetra_MpiComm>
communicator = Teuchos::rcp (new Epetra_MpiComm (MPI_COMM_SELF), true);
#else
Epetra_Comm *
duplicate_communicator (const Epetra_Comm &communicator)
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// see if the communicator is in fact a
// parallel MPI communicator; if so,
// communicator if this whole
// thing was created as an MPI
// communicator
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm
*mpi_comm = dynamic_cast<Epetra_MpiComm *>(&communicator);
if (mpi_comm != 0)
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/trilinos_block_vector.h>
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
#include <deal.II/grid/grid_refinement.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/base/config.h>
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::BlockVector<float>, DoFHandler<deal_II_dimension> >;
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template class SolutionTransfer<deal_II_dimension, PETScWrappers::Vector, DoFHandler<deal_II_dimension> >;
template class SolutionTransfer<deal_II_dimension, PETScWrappers::BlockVector, DoFHandler<deal_II_dimension> >;
template class SolutionTransfer<deal_II_dimension, PETScWrappers::MPI::BlockVector, DoFHandler<deal_II_dimension> >;
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::Vector, DoFHandler<deal_II_dimension> >;
template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::BlockVector, DoFHandler<deal_II_dimension> >;
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_tools.h>
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
# include <p4est_bits.h>
# include <p4est_extended.h>
# include <p4est_vtk.h>
DEAL_II_NAMESPACE_OPEN
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
namespace internal
{
}
-#else // DEAL_II_USE_P4EST
+#else // DEAL_II_WITH_P4EST
namespace parallel
{
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
template <int dim, int spacedim>
MPI_Comm
Triangulation<dim,spacedim>::get_communicator () const
}
}
-#endif // DEAL_II_USE_P4EST
+#endif // DEAL_II_WITH_P4EST
for (deal_II_dimension : DIMENSIONS)
{
-# ifdef DEAL_II_USE_P4EST
+# ifdef DEAL_II_WITH_P4EST
namespace internal
\{
# endif
\}
\}
-# endif // DEAL_II_USE_P4EST
+# endif // DEAL_II_WITH_P4EST
namespace parallel
\{
/* --------------------- class ParallelDistributed ---------------- */
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
namespace
{
const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation)
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
(void)vertices_with_ghost_neighbors;
Assert (false, ExcNotImplemented());
#else
const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
const unsigned int level)
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
(void)vertices_with_ghost_neighbors;
Assert (false, ExcNotImplemented());
#else
}
-#endif // DEAL_II_USE_P4EST
+#endif // DEAL_II_WITH_P4EST
{
NumberCache number_cache;
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
(void)dof_handler;
Assert (false, ExcNotImplemented());
#else
}
}
#endif // DEBUG
-#endif // DEAL_II_USE_P4EST
+#endif // DEAL_II_WITH_P4EST
return number_cache;
}
distribute_mg_dofs (DoFHandler<dim,spacedim> &dof_handler,
std::vector<NumberCache> &number_caches) const
{
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
(void)dof_handler;
(void)number_caches;
Assert (false, ExcNotImplemented());
}
-#endif // DEAL_II_USE_P4EST
+#endif // DEAL_II_WITH_P4EST
}
NumberCache number_cache;
-#ifndef DEAL_II_USE_P4EST
+#ifndef DEAL_II_WITH_P4EST
Assert (false, ExcNotImplemented());
#else
= (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
std::vector<unsigned int> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
= (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
std::vector<unsigned int> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
if (tria)
{
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
//this is a distributed Triangulation. We need to traverse the coarse
//cells in the order p4est does
for (unsigned int c = 0; c < tria->n_cells (0); ++c)
Assert (0<=direction && direction<space_dim,
ExcIndexRange (direction, 0, space_dim));
-#if defined(DEBUG) && defined(DEAL_II_USE_P4EST)
+#if defined(DEBUG) && defined(DEAL_II_WITH_P4EST)
// Check whether we run on a non parallel mesh or on a
// parallel::distributed::Triangulation in serial
{
Assert(dim == space_dim,
ExcNotImplemented());
-#if defined(DEBUG) && defined(DEAL_II_USE_P4EST)
+#if defined(DEBUG) && defined(DEAL_II_WITH_P4EST)
// Check whether we run on a non parallel mesh or on a
// parallel::distributed::Triangulation in serial
{
ExcInternalError());
// reduce information from all CPUs
-#if defined(DEAL_II_USE_P4EST) && defined(DEAL_II_COMPILER_SUPPORTS_MPI)
+#if defined(DEAL_II_WITH_P4EST) && defined(DEAL_II_WITH_MPI)
const unsigned int dim = DH::dimension;
const unsigned int spacedim = DH::space_dimension;
+= std::count(dofs_by_block.begin(), dofs_by_block.end(),
block);
-#ifdef DEAL_II_USE_P4EST
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_P4EST
+#ifdef DEAL_II_WITH_MPI
// if we are working on a parallel mesh, we now need to collect
// this information from all processors
if (const parallel::distributed::Triangulation<DH::dimension,DH::space_dimension> *tria
Assert(u2.size()==dof2.n_dofs(),
ExcDimensionMismatch(u2.size(), dof2.n_dofs()));
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1) != 0)
if (dynamic_cast<const DoFHandler<dim>*>(&dof1) != 0)
{
Assert(u1_interpolated.size()==dof1.n_dofs(),
ExcDimensionMismatch(u1_interpolated.size(), dof1.n_dofs()));
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1) != 0)
if (dynamic_cast<const DoFHandler<dim>*>(&dof1) != 0)
{
Assert(u1_interpolated.size() == dof1.n_dofs(),
ExcDimensionMismatch(u1_interpolated.size(), dof1.n_dofs()));
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1) != 0)
if (dynamic_cast<const DoFHandler<dim>*>(&dof1) != 0)
{
// interpolate back to dof1
// taking into account
// constraints1
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1) != 0)
{
AssertThrow (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1_interpolated) != 0,
Assert(u1_difference.size()==dof1.n_dofs(),
ExcDimensionMismatch(u1_difference.size(), dof1.n_dofs()));
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&u1) != 0)
if (dynamic_cast<const DoFHandler<dim>*>(&dof1) != 0)
{
Vector<float> &);
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template
void interpolate<deal_II_dimension>
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template
void interpolate<deal_II_dimension>
Vector<float> &);
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template
void interpolate<deal_II_dimension>
#if deal_II_dimension <= deal_II_space_dimension
template class MappingQ1Eulerian<deal_II_dimension, Vector<double>, deal_II_space_dimension>;
template class MappingQ1Eulerian<deal_II_dimension, Vector<float>, deal_II_space_dimension>;
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template class MappingQ1Eulerian<deal_II_dimension, PETScWrappers::Vector, deal_II_space_dimension>;
#endif
#endif
{
#if deal_II_dimension <= deal_II_space_dimension
template class MappingQEulerian<deal_II_dimension, Vector<double>, deal_II_space_dimension>;
-# ifdef DEAL_II_USE_PETSC
+# ifdef DEAL_II_WITH_PETSC
template class MappingQEulerian<deal_II_dimension,
PETScWrappers::Vector, deal_II_space_dimension>;
template class MappingQEulerian<deal_II_dimension,
#include <cctype>
-#ifdef HAVE_LIBNETCDF
+#ifdef DEAL_II_WITH_NETCDF
#include <netcdfcpp.h>
#endif
template <>
void GridIn<2>::read_netcdf (const std::string &filename)
{
-#ifndef HAVE_LIBNETCDF
+#ifndef DEAL_II_WITH_NETCDF
// do something with unused
// filename
filename.c_str();
template <>
void GridIn<3>::read_netcdf (const std::string &filename)
{
-#ifndef HAVE_LIBNETCDF
+#ifndef DEAL_II_WITH_NETCDF
// do something with the function argument
// to make sure it at least looks used,
// even if it is not
}
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
PetscScalar
max_element (const PETScWrappers::Vector &criteria)
{
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
TrilinosScalar
max_element (const TrilinosWrappers::Vector &criteria)
{
// processor without being true on
// all. however, we can ask for the global
// number of active cells and use that
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
if (const parallel::distributed::Triangulation<dim,spacedim> *p_tria
= dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>(&tria))
Assert (p_tria->n_global_active_cells() == tria.n_cells(0),
double global_volume = 0;
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
if (const parallel::distributed::Triangulation<dim,spacedim> *p_tria
= dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>(&triangulation))
global_volume = Utilities::MPI::sum (local_volume, p_tria->get_communicator());
template class BlockSparsityPatternBase<CompressedSparsityPattern>;
template class BlockSparsityPatternBase<CompressedSimpleSparsityPattern>;
template class BlockSparsityPatternBase<CompressedSetSparsityPattern>;
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template class BlockSparsityPatternBase<TrilinosWrappers::SparsityPattern>;
#endif
}
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
{
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
// this is a specialization for a
// parallel (non-block) Trilinos
individual_indices.end());
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const Epetra_MpiComm *mpi_comm
= dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
individual_indices.end());
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
const Epetra_MpiComm *mpi_comm
= dynamic_cast<const Epetra_MpiComm *>(&vec.block(0).trilinos_vector().Comm());
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
// this is a specialization for a
// parallel (non-block) PETSc
// can't do that - we have to rewrite those functions by hand if we want to
// use them. The key is to use local ranges etc., which still needs to be
// implemented.
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::Vector);
PARALLEL_VECTOR_FUNCTIONS(TrilinosWrappers::MPI::BlockVector);
#endif
// BLOCK_MATRIX_FUNCTIONS(BlockSparseMatrixEZ<double>);
// BLOCK_MATRIX_VECTOR_FUNCTIONS(BlockSparseMatrixEZ<float>, Vector<float>);
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
BLOCK_MATRIX_FUNCTIONS(PETScWrappers::BlockSparseMatrix);
MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
BLOCK_MATRIX_VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockSparseMatrix ,PETScWrappers::MPI::BlockVector);
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::Vector);
BLOCK_SPARSITY_FUNCTIONS(BlockCompressedSetSparsityPattern);
BLOCK_SPARSITY_FUNCTIONS(BlockCompressedSimpleSparsityPattern);
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
SPARSITY_FUNCTIONS(TrilinosWrappers::SparsityPattern);
BLOCK_SPARSITY_FUNCTIONS(TrilinosWrappers::BlockSparsityPattern);
#endif
ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<float>);
ONLY_MATRIX_FUNCTIONS(BlockSparseMatrix<double>);
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
ONLY_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
#endif
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
ONLY_MATRIX_FUNCTIONS(PETScWrappers::SparseMatrix);
ONLY_MATRIX_FUNCTIONS(PETScWrappers::BlockSparseMatrix);
ONLY_MATRIX_FUNCTIONS(PETScWrappers::MPI::SparseMatrix);
#include <deal.II/lac/petsc_block_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
DEAL_II_NAMESPACE_OPEN
#include <deal.II/lac/petsc_full_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_matrix_base.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_full_matrix.h>
# include <deal.II/lac/petsc_sparse_matrix.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_matrix_free.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
DEAL_II_NAMESPACE_OPEN
#include <deal.II/lac/petsc_parallel_block_vector.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_block_vector.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/sparsity_pattern.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_parallel_vector.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
# include <cmath>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_precondition.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/utilities.h>
# include <deal.II/lac/petsc_matrix_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/base/logstream.h>
#include <deal.II/lac/petsc_solver.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_matrix_base.h>
# include <deal.II/lac/petsc_vector_base.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_vector.h>
# include <deal.II/lac/sparsity_pattern.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_vector.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <cmath>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/petsc_vector_base.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/base/memory_consumption.h>
# include <deal.II/lac/petsc_vector.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/slepc_solver.h>
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
# include <deal.II/lac/petsc_matrix_base.h>
# include <deal.II/lac/petsc_vector_base.h>
{
void dummy () {}
}
-#endif // DEAL_II_USE_SLEPC
+#endif // DEAL_II_WITH_SLEPC
#include <deal.II/lac/slepc_spectral_transformation.h>
-#ifdef DEAL_II_USE_SLEPC
+#ifdef DEAL_II_WITH_SLEPC
# include <deal.II/lac/slepc_solver.h>
# include <deal.II/lac/petsc_matrix_base.h>
{
void dummy () {}
}
-#endif // DEAL_II_USE_SLEPC
+#endif // DEAL_II_WITH_SLEPC
template class Solver<BlockVector<float> >;
template class Solver<parallel::distributed::Vector<float> >;
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
template class Solver<PETScWrappers::Vector>;
template class Solver<PETScWrappers::BlockVector>;
#endif
#endif
// include UMFPACK file.
-#ifdef HAVE_LIBUMFPACK
+#ifdef DEAL_II_WITH_UMFPACK
# include <umfpack.h>
#endif
{}
-#ifdef HAVE_LIBUMFPACK
+#ifdef DEAL_II_WITH_UMFPACK
SparseDirectUMFPACK::SparseDirectUMFPACK ()
:
Assert(false, ExcNotImplemented());
}
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
SparseDirectMUMPS::SparseDirectMUMPS ()
:
initialize_called (false)
copy_solution (dst);
}
-#endif // DEAL_II_USE_MUMPS
+#endif // DEAL_II_WITH_MUMPS
// explicit instantiations for SparseMatrixMA27
template
InstantiateUMFPACK(BlockSparseMatrix<float>)
// explicit instantiations for SparseDirectMUMPS
-#ifdef DEAL_II_USE_MUMPS
+#ifdef DEAL_II_WITH_MUMPS
#define InstantiateMUMPS(MATRIX) \
template \
void SparseDirectMUMPS::initialize (const MATRIX &, const Vector<double> &);
#include <algorithm>
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
#include <deal.II/base/utilities.h>
#include <deal.II/lac/compressed_sparsity_pattern.h>
#include <deal.II/lac/compressed_set_sparsity_pattern.h>
#include <deal.II/lac/block_sparsity_pattern.h>
#endif
-#ifdef DEAL_II_USE_METIS
+#ifdef DEAL_II_WITH_METIS
extern "C"
{
#include <metis.h>
// Make sure that METIS is actually
// installed and detected
-#ifndef DEAL_II_USE_METIS
+#ifndef DEAL_II_WITH_METIS
AssertThrow (false, ExcMETISNotInstalled());
#else
ExcInternalError());
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
template <class CSP_t>
void distribute_sparsity_pattern(CSP_t &csp,
const std::vector<unsigned int> &rows_per_cpu,
const MPI_Comm & mpi_comm,\
const IndexSet & myrange)
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
SPARSITY_FUNCTIONS(CompressedSparsityPattern);
SPARSITY_FUNCTIONS(CompressedSimpleSparsityPattern);
SPARSITY_FUNCTIONS(BlockCompressedSimpleSparsityPattern);
void
TridiagonalMatrix<double>::compute_eigenvalues()
{
-#ifdef HAVE_LIBLAPACK
+#ifdef DEAL_II_WITH_LAPACK
Assert(state == matrix, ExcState(state));
Assert(is_symmetric, ExcNotImplemented());
#include <deal.II/lac/trilinos_block_sparse_matrix.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/block_sparse_matrix.h>
# include <deal.II/lac/block_sparsity_pattern.h>
// produce a dummy local map and pass it
// off to the other function
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm trilinos_communicator (MPI_COMM_SELF);
#else
Epetra_SerialComm trilinos_communicator;
#include <deal.II/lac/trilinos_block_vector.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_block_sparse_matrix.h>
#include <deal.II/lac/trilinos_precondition.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/vector.h>
# include <deal.II/lac/sparse_matrix.h>
{
PreconditionBase::PreconditionBase()
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
:
communicator (MPI_COMM_SELF)
#endif
:
Subscriptor (),
preconditioner (base.preconditioner),
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
communicator (base.communicator),
#endif
vector_distributor (new Epetra_Map(*base.vector_distributor))
void PreconditionBase::clear ()
{
preconditioner.reset();
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
communicator = MPI_COMM_SELF;
#endif
vector_distributor.reset();
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#include <deal.II/lac/trilinos_solver.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/conditional_ostream.h>
# include <deal.II/lac/trilinos_sparse_matrix.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_PETSC
+#endif // DEAL_II_WITH_PETSC
#include <deal.II/lac/trilinos_sparse_matrix.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/utilities.h>
# include <deal.II/lac/sparse_matrix.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#include <deal.II/lac/trilinos_sparsity_pattern.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/utilities.h>
# include <deal.II/lac/sparsity_pattern.h>
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#include <deal.II/lac/trilinos_vector.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_sparse_matrix.h>
# include <deal.II/lac/trilinos_block_vector.h>
last_action = Zero;
Epetra_LocalMap map (static_cast<int>(partitioning.size()),
0,
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
{
Epetra_LocalMap map (static_cast<int>(partitioning.size()),
0,
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
#include <deal.II/base/memory_consumption.h>
#include <deal.II/lac/trilinos_vector_base.h>
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <cmath>
# include <Epetra_Import.h>
last_action (Zero),
compressed (true),
has_ghosts (false),
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
vector(new Epetra_FEVector(
Epetra_Map(0,0,Epetra_MpiComm(MPI_COMM_SELF))))
#else
// When we clear the vector,
// reset the pointer and generate
// an empty vector.
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
Epetra_Map map (0, 0, Epetra_MpiComm(MPI_COMM_SELF));
#else
Epetra_Map map (0, 0, Epetra_SerialComm());
++ptr;
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// in parallel, check that the vector
// is zero on _all_ processors.
const Epetra_MpiComm *mpi_comm
bool
VectorBase::is_non_negative () const
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_WITH_MPI
// if this vector is a parallel one, then
// we need to communicate to determine
// the answer to the current
DEAL_II_NAMESPACE_CLOSE
-#endif // DEAL_II_USE_TRILINOS
+#endif // DEAL_II_WITH_TRILINOS
for(deal_II_dimension : DIMENSIONS)
{
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
template void
MGTransferPrebuilt<TrilinosWrappers::MPI::Vector>::copy_to_mg (
INSTANTIATE(BlockVector<double>, hp::DoFHandler);
INSTANTIATE(BlockVector<float>, hp::DoFHandler);
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
INSTANTIATE(PETScWrappers::Vector, DoFHandler);
INSTANTIATE(PETScWrappers::BlockVector, DoFHandler);
INSTANTIATE(PETScWrappers::BlockVector, hp::DoFHandler);
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
INSTANTIATE(TrilinosWrappers::Vector, DoFHandler);
INSTANTIATE(TrilinosWrappers::BlockVector, DoFHandler);
INSTANTIATE(TrilinosWrappers::MPI::Vector, DoFHandler);
const types::subdomain_id subdomain_id_,
const types::material_id material_id)
{
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
if (dynamic_cast<const parallel::distributed::Triangulation<1,spacedim>*>
(&dof_handler.get_tria())
!= 0)
const types::subdomain_id subdomain_id_,
const types::material_id material_id)
{
-#ifdef DEAL_II_USE_P4EST
+#ifdef DEAL_II_WITH_P4EST
if (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&dof_handler.get_tria())
!= 0)
INSTANTIATE(parallel::distributed::BlockVector<double>,hp::DoFHandler)
INSTANTIATE(parallel::distributed::BlockVector<float>,hp::DoFHandler)
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
INSTANTIATE(PETScWrappers::Vector,DoFHandler)
INSTANTIATE(PETScWrappers::BlockVector,DoFHandler)
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
INSTANTIATE(TrilinosWrappers::Vector,DoFHandler)
INSTANTIATE(TrilinosWrappers::BlockVector,DoFHandler)
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/block_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_parallel_sparse_matrix.h>
# include <deal.II/lac/petsc_sparse_matrix.h>
# include <deal.II/lac/petsc_parallel_vector.h>
# include <deal.II/lac/petsc_parallel_block_sparse_matrix.h>
#endif
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_sparse_matrix.h>
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/trilinos_block_sparse_matrix.h>
-#ifdef DEAL_II_USE_PETSC
+#ifdef DEAL_II_WITH_PETSC
namespace internal
{
-#ifdef DEAL_II_USE_TRILINOS
+#ifdef DEAL_II_WITH_TRILINOS
namespace internal
{
void TimeDependent::end_sweep (const unsigned int n_threads)
{
-#ifdef DEAL_II_USE_MT
+#ifdef DEAL_II_WITH_THREADS
if (n_threads > 1)
{
const unsigned int stride = timesteps.size() / n_threads;