.PHONY: base baseg lac lacg 1d 2d 3d 1dg 2dg 3dg all \
online-doc doc printable-doc tex-doc contrib \
+ contrib-functionparser \
clean clean-contrib clean-base clean-lac clean-dealII \
clean-doc clean-examples clean-lib clean-tests clean-common-scripts \
distclean \
dnl -------------------------------------------------------------
-dnl See whether the compiler we have has MPI build in (e.g. if it
+dnl See whether the compiler we have has MPI built in (e.g. if it
dnl is actually mpiCC, etc)
dnl
dnl Usage: DEAL_II_DETERMINE_IF_SUPPORTS_MPI
AC_MSG_RESULT(yes)
AC_DEFINE(DEAL_II_COMPILER_SUPPORTS_MPI, 1,
[Defined if the compiler supports including <mpi.h>])
+
+ dnl Export this variable so that we can refer to it
+ dnl from contrib/configure.in when configuring p4est
+ DEAL_II_COMPILER_SUPPORTS_MPI=1
+ export DEAL_II_COMPILER_SUPPORTS_MPI
+
DEAL_II_USE_MPI=yes
],
[
dnl modified according to other options in later steps of
dnl configuration
dnl
-dnl CFLAGS : flags for optimized mode
+dnl CFLAGS.o : flags for optimized mode
+dnl CFLAGS.g : flags for debug mode
dnl
dnl Usage: DEAL_II_SET_CC_FLAGS
dnl
[
dnl First the flags for gcc compilers
if test "$GCC" = yes ; then
- CFLAGS="$CFLAGS -O3 -funroll-loops -funroll-all-loops -fstrict-aliasing"
+ CFLAGSO="$CFLAGS -O3 -funroll-loops -funroll-all-loops -fstrict-aliasing"
+ CFLAGSG="$CFLAGS -g"
dnl Set PIC flags. On some systems, -fpic/PIC is implied, so don't set
dnl anything to avoid a warning. on AIX make sure we always pass -lpthread
dnl because this seems to be somehow required to make things work. Likewise
case "$CC_VERSION" in
ibm_xlc)
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGS -O2"
CFLAGSPIC="-fPIC"
SHLIBLD="$CXX"
;;
MIPSpro*)
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGS -O2"
CFLAGSPIC="-KPIC"
;;
intel_icc*)
- CFLAGS="$CFLAGS -O2 -unroll"
+ CFLAGSO="$CFLAGS -O2 -unroll"
case "$CC_VERSION" in
intel_icc5 | intel_icc6 | intel_icc7 | intel_icc8 | intel_icc9)
CFLAGSPIC="-KPIC"
;;
esac
- CFLAGS="$CFLAGS -ansi_alias -vec_report0"
+ CFLAGSO="$CFLAGSO -ansi_alias -vec_report0"
dnl If we are on an x86 platform, add -tpp6 to optimization
dnl flags
*)
AC_MSG_RESULT(Unknown C compiler - using generic options)
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGSO -O2"
;;
esac
fi
])
+
dnl ------------------------------------------------------------
dnl Check whether PETSc is installed, and if so store the
dnl respective links
fi
])
+
+
+
+dnl ------------------------------------------------------------
+dnl Check whether P4EST is to be used to parallelize meshes
+dnl
+dnl Usage: DEAL_II_CONFIGURE_P4EST
+dnl
+dnl ------------------------------------------------------------
+AC_DEFUN(DEAL_II_CONFIGURE_P4EST, dnl
+[
+ AC_MSG_CHECKING(whether p4est shall be used)
+
+ AC_ARG_WITH(p4est,
+ [ --with-p4est=/path/to/p4est makes deal.II use p4est to distribute meshes
+ on a cluster computer],
+ use_p4est=$withval,
+ use_p4est=no)
+
+ if test "x$use_p4est" != "xno" ; then
+ AC_MSG_RESULT(yes)
+
+ if test ! -d "${use_p4est}/DEBUG" -o ! -d "${use_p4est}/FAST" ; then
+ echo "${use_p4est}/DEBUG"
+ AC_MSG_ERROR([p4est directories $use_p4est/DEBUG or $use_p4est/FAST not found])
+ fi
+
+ AC_DEFINE(DEAL_II_USE_P4EST, 1,
+ [Defined if we are to use the p4est library to distribute
+ meshes on a cluster computer.])
+ USE_CONTRIB_P4EST=yes
+ export USE_CONTRIB_P4EST
+
+ DEAL_II_P4EST_DIR=${use_p4est}
+ export DEAL_II_P4EST_DIR
+
+ CXXFLAGSG="$CXXFLAGSG -I$use_p4est/DEBUG/include"
+ CXXFLAGSO="$CXXFLAGSO -I$use_p4est/FAST/include"
+ else
+ AC_MSG_RESULT(no)
+ fi
+])
+
# libbase. do similarly for the threading building block things if threading
# is enabled
ifeq ($(enable-parser),yes)
- extra-o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT)
+ extra-o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT)
extra-g.o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT)
endif
+
# production rules
$(LIBDIR)/base/%.g.$(OBJEXT) :
@echo "=====base=============debug======$(MT)== $(<F)"
deplibs.o += $D/lib/libtbb$(shared-lib-suffix)
endif
+ ifeq ($(USE_CONTRIB_P4EST),yes)
+ deplibs.g += $(DEAL_II_P4EST_DIR)/DEBUG/lib/libp4est.so \
+ $(DEAL_II_P4EST_DIR)/DEBUG/lib/libsc.so \
+ -Wl,-rpath,$(DEAL_II_P4EST_DIR)/DEBUG/lib
+ deplibs.o += $(DEAL_II_P4EST_DIR)/FAST/lib/libp4est.so \
+ $(DEAL_II_P4EST_DIR)/FAST/lib/libsc.so \
+ -Wl,-rpath,$(DEAL_II_P4EST_DIR)/FAST/lib
+ endif
+
else
deplibs.g =
deplibs.o =
libg: $(LIBDIR)/libbase.g$(lib-suffix)
libo: $(LIBDIR)/libbase$(lib-suffix)
-$(LIBDIR)/libbase$(static-lib-suffix): $(o-files)
+$(LIBDIR)/libbase$(static-lib-suffix): $(o-files) $(extra-o-files)
@echo "=====base=============optimized==$(MT)== Linking library: $(@F)"
@$(AR) ru $@ $(o-files) $(extra-o-files)
@$(RANLIB) $@
-$(LIBDIR)/libbase.g$(static-lib-suffix): $(go-files)
+$(LIBDIR)/libbase.g$(static-lib-suffix): $(go-files) $(extra-g.o-files)
@echo "=====base=============debug======$(MT)== Linking library: $(@F)"
@$(AR) ru $@ $(go-files) $(extra-g.o-files)
@$(RANLIB) $@
-$(LIBDIR)/libbase$(shared-lib-suffix): $(o-files)
+$(LIBDIR)/libbase$(shared-lib-suffix): $(o-files) $(extra-o-files)
@echo "=====base=============optimized==$(MT)== Linking library: $(@F)"
@$(SHLIBLD) $(LDFLAGS) $(SHLIBFLAGS) -o $(LIBDIR)/$(call DEAL_II_SHLIB_NAME,base) $(call DEAL_II_ADD_SONAME,base) $(o-files) $(extra-o-files) $(deplibs.o)
@ln -f -s $(call DEAL_II_SHLIB_NAME,base) $@
-$(LIBDIR)/libbase.g$(shared-lib-suffix): $(go-files)
+$(LIBDIR)/libbase.g$(shared-lib-suffix): $(go-files) $(extra-g.o-files)
@echo "=====base=============debug======$(MT)== Linking library: $(@F)"
@$(SHLIBLD) $(LDFLAGS) $(SHLIBFLAGS) -o $(LIBDIR)/$(call DEAL_II_SHLIB_NAME,base.g) $(call DEAL_II_ADD_SONAME,base.g) $(go-files) $(extra-g.o-files) $(deplibs.g)
@ln -f -s $(call DEAL_II_SHLIB_NAME,base.g) $@
/* Defined if a MUMPS installation was found and is going to be used */
#undef DEAL_II_USE_MUMPS
+/* Defined if we are to use the p4est library to distribute meshes on a
+ cluster computer. */
+#undef DEAL_II_USE_P4EST
+
/* Defined if a PETSc installation was found and is going to be used */
#undef DEAL_II_USE_PETSC
* words, you need to specify the size of the index space
* $[0,\text{size})$ of which objects of this class are a subset.
*
+ * The data structures used in this class along with a rationale can be found
+ * in the @ref distributed_paper "Distributed Computing paper".
+ *
* @author Wolfgang Bangerth, 2009
*/
class IndexSet
*/
void read(std::istream & in);
+ /**
+ * Writes the IndexSet into a binary,
+ * compact representation, that can be
+ * read in again using the block_read()
+ * function.
+ */
+ void block_write(std::ostream & out) const;
+
+ /**
+ * Constructs the IndexSet from a binary
+ * representation given by the stream
+ * @param in written by the write_block()
+ * function.
+ */
+ void block_read(std::istream & in);
+
#ifdef DEAL_II_USE_TRILINOS
/**
Timer (MPI_Comm mpi_communicator,
bool sync_wall_time = false);
-
- /**
- * Structure to save collective data
- * measured by this timer class. Queried
- * by get_data() or printed with
- * print_data() after calling stop().
- */
- struct TimeMinMaxAvg
- {
- double sum;
- double min;
- double max;
- unsigned int min_index;
- unsigned int max_index;
- double avg;
-
- /**
- * Set the time values to @p
- * val, and the MPI rank to
- * the given @p rank.
- */
- void set(const double val,
- const unsigned int rank)
- {
- sum = min = max = val;
- min_index = max_index = rank;
- }
-
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- /**
- * Given two structures
- * indicating timer
- * information, do the
- * reduction by choosing the
- * one with the longer time
- * interval. This is a max
- * operation and therefore a
- * reduction.
- *
- * Arguments are passed as
- * void pointers to satisfy
- * the MPI requirement of
- * reduction operations.
- */
- static void max_reduce ( const void * in_lhs_,
- void * inout_rhs_,
- int * len,
- MPI_Datatype * );
-#endif
- };
-
/**
* Returns a reference to the data
* structure with global timing
* information. Filled after calling
* stop().
*/
- const TimeMinMaxAvg & get_data() const;
+ const Utilities::System::MinMaxAvg & get_data() const;
/**
* Prints the data to the given stream.
*/
bool sync_wall_time;
- TimeMinMaxAvg mpi_data;
+ Utilities::System::MinMaxAvg mpi_data;
#endif
};
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
inline
-const Timer::TimeMinMaxAvg &
+const Utilities::System::MinMaxAvg &
Timer::get_data() const
{
return mpi_data;
// $Id$
// Version: $Name$
//
-// Copyright (C) 2009, 2010 by the deal.II authors
+// Copyright (C) 2009 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* distributed on many
* processors. Such cells are
* called "artificial".
+ *
+ * See the glossary entries on @ref
+ * GlossSubdomainId "subdomain ids"
+ * and @ref GlossArtificialCell
+ * "artificial cells" as well as
+ * the @ref distributed module for
+ * more information.
*/
const unsigned int artificial_subdomain_id = static_cast<subdomain_id_t>(-2);
}
*/
double get_cpu_load ();
+ /**
+ * Structure that hold information about
+ * memory usage in kB. Used by
+ * get_memory_stats(). See man 5 proc
+ * entry /status for details.
+ */
+ struct MemoryStats
+ {
+ unsigned long int VmPeak; /** peak virtual memory size in kB */
+ unsigned long int VmSize; /** current virtual memory size in kB */
+ unsigned long int VmHWM; /** peak resident memory size in kB */
+ unsigned long int VmRSS; /** current resident memory size in kB */
+ };
+
+
+ /**
+ * Fills the @param stats structure with
+ * information about the memory
+ * consumption of this process. This is
+ * only implemented on Linux.
+ */
+ void get_memory_stats (MemoryStats & stats);
+
/**
* Return the name of the host this
*/
MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Data structure to store the result of
+ * calculate_collective_mpi_min_max_avg.
+ */
+ struct MinMaxAvg
+ {
+ double sum;
+ double min;
+ double max;
+ unsigned int min_index;
+ unsigned int max_index;
+ double avg;
+ };
+
+ /**
+ * Returns sum, average, minimum,
+ * maximum, processor id of minimum and
+ * maximum as a collective operation of
+ * on the given MPI communicator @param
+ * mpi_communicator . Each processor's
+ * value is given in @param my_value and
+ * the result will be returned in @param
+ * result . The result is available on all
+ * machines.
+ */
+ void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
+ const double my_value,
+ MinMaxAvg & result);
+
+
+
/**
* A class that is used to initialize the
* MPI system at the beginning of a
}
+void
+IndexSet::block_write(std::ostream & out) const
+{
+ Assert (out, ExcIO());
+ out.write(reinterpret_cast<const char*>(&index_space_size),
+ sizeof(index_space_size));
+ size_t n_ranges = ranges.size();
+ out.write(reinterpret_cast<const char*>(&n_ranges),
+ sizeof(n_ranges));
+ if (ranges.size())
+ out.write (reinterpret_cast<const char*>(&*ranges.begin()),
+ reinterpret_cast<const char*>(&*ranges.end())
+ - reinterpret_cast<const char*>(&*ranges.begin()));
+ Assert (out, ExcIO());
+}
+
+void
+IndexSet::block_read(std::istream & in)
+{
+ unsigned int size;
+ size_t n_ranges;
+ in.read(reinterpret_cast<char*>(&size), sizeof(size));
+ in.read(reinterpret_cast<char*>(&n_ranges), sizeof(n_ranges));
+ // we have to clear ranges first
+ ranges.clear();
+ set_size(size);
+ ranges.resize(n_ranges, Range(0,0));
+ if (n_ranges)
+ in.read(reinterpret_cast<char*>(&*ranges.begin()),
+ reinterpret_cast<char*>(&*ranges.end())
+ - reinterpret_cast<char*>(&*ranges.begin()));
+}
+
+
void
IndexSet::subtract_set (const IndexSet & other)
}
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-
-void Timer::TimeMinMaxAvg::max_reduce ( const void * in_lhs_,
- void * inout_rhs_,
- int * len,
- MPI_Datatype * )
-{
- const Timer::TimeMinMaxAvg * in_lhs = static_cast<const Timer::TimeMinMaxAvg*>(in_lhs_);
- Timer::TimeMinMaxAvg * inout_rhs = static_cast<Timer::TimeMinMaxAvg*>(inout_rhs_);
-
- Assert(*len==1, ExcInternalError());
-
- inout_rhs->sum += in_lhs->sum;
- if (inout_rhs->min>in_lhs->min)
- {
- inout_rhs->min = in_lhs->min;
- inout_rhs->min_index = in_lhs->min_index;
- }
- if (inout_rhs->max<in_lhs->max)
- {
- inout_rhs->max = in_lhs->max;
- inout_rhs->max_index = in_lhs->max_index;
- }
-}
-
-
-#endif
double Timer::stop ()
{
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
if (sync_wall_time)
{
- unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
-
- MPI_Op op;
- int ierr = MPI_Op_create((MPI_User_function *)&Timer::TimeMinMaxAvg::max_reduce,
- false, &op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- TimeMinMaxAvg in;
- in.set(time, my_id);
-
- MPI_Datatype type;
- int lengths[]={3,2};
- MPI_Aint displacements[]={0,offsetof(TimeMinMaxAvg, min_index)};
- MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
-
- ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Type_commit(&type);
-
- ierr = MPI_Reduce ( &in, &this->mpi_data, 1, type, op, 0, mpi_communicator );
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Type_free (&type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Op_free(&op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- this->mpi_data.avg = this->mpi_data.sum / dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+ Utilities::System
+ ::calculate_collective_mpi_min_max_avg(mpi_communicator, time,
+ this->mpi_data);
cumulative_wall_time += this->mpi_data.max;
}
#endif
+
+ void get_memory_stats (MemoryStats & stats)
+ {
+ stats.VmPeak = stats.VmSize = stats.VmHWM = stats.VmRSS = 0;
+
+ // parsing /proc/self/stat would be a
+ // lot easier, but it does not contain
+ // VmHWM, so we use /status instead.
+#if defined(__linux__)
+ std::ifstream file("/proc/self/status");
+ std::string line;
+ std::string name;
+ while (!file.eof())
+ {
+ file >> name;
+ if (name == "VmPeak:")
+ file >> stats.VmPeak;
+ else if (name == "VmSize:")
+ file >> stats.VmSize;
+ else if (name == "VmHWM:")
+ file >> stats.VmHWM;
+ else if (name == "VmRSS:")
+ {
+ file >> stats.VmRSS;
+ break; //this is always the last entry
+ }
+
+ getline(file, line);
+ }
+#endif
+ }
+
+
+
std::string get_hostname ()
{
const unsigned int N=1024;
}
+ namespace
+ {
+ // custom MIP_Op for
+ // calculate_collective_mpi_min_max_avg
+ void max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * )
+ {
+ const MinMaxAvg * in_lhs = static_cast<const MinMaxAvg*>(in_lhs_);
+ MinMaxAvg * inout_rhs = static_cast<MinMaxAvg*>(inout_rhs_);
+
+ Assert(*len==1, ExcInternalError());
+
+ inout_rhs->sum += in_lhs->sum;
+ if (inout_rhs->min>in_lhs->min)
+ {
+ inout_rhs->min = in_lhs->min;
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+ else if (inout_rhs->min == in_lhs->min)
+ { // choose lower cpu index when tied to make operator cumutative
+ if (inout_rhs->min_index > in_lhs->min_index)
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+
+ if (inout_rhs->max < in_lhs->max)
+ {
+ inout_rhs->max = in_lhs->max;
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+ else if (inout_rhs->max == in_lhs->max)
+ { // choose lower cpu index when tied to make operator cumutative
+ if (inout_rhs->max_index > in_lhs->max_index)
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+ }
+ }
+
+ void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
+ double my_value,
+ MinMaxAvg & result)
+ {
+ unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+ unsigned int numproc = dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+
+ MPI_Op op;
+ int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ MinMaxAvg in;
+ in.sum = in.min = in.max = my_value;
+ in.min_index = in.max_index = my_id;
+
+ MPI_Datatype type;
+ int lengths[]={3,2};
+ MPI_Aint displacements[]={0,offsetof(MinMaxAvg, min_index)};
+ MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+ ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_commit(&type);
+ ierr = MPI_Allreduce ( &in, &result, 1, type, op, mpi_communicator );
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_free (&type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Op_free(&op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ result.avg = result.sum / numproc;
+ }
+
+
std::vector<unsigned int>
compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
const std::vector<unsigned int> & destinations)
return 0;
}
-
+ void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
+ double my_value,
+ MinMaxAvg & result)
+ {
+ result.sum = my_value;
+ result.avg = my_value;
+ result.min = my_value;
+ result.max = my_value;
+ result.min_index = 0;
+ result.max_index = 0;
+ }
+
MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
{
USE_CONTRIB_HSL = @USE_CONTRIB_HSL@
USE_CONTRIB_UMFPACK = @USE_CONTRIB_UMFPACK@
+USE_CONTRIB_P4EST = @USE_CONTRIB_P4EST@
+DEAL_II_P4EST_DIR = @DEAL_II_P4EST_DIR@
+
TARGET = @target@
######################################################
F77FLAGS.o = @DEFS@ @F77FLAGSO@ $(INCLUDE)
# compile flags for C compiler
-CFLAGS = @CFLAGS@
+CFLAGS.g = @CFLAGSG@
+CFLAGS.o = @CFLAGSO@
# if in debug mode, add TBB assertions
ifeq ($(enable-threads),yes)
include $(DEAL_II_PETSC_DIR)/bmake/common/variables
endif
else
+ # PETSC's $(PETSC_ARCH)/conf/petscvariables include file happens
+ # to have a variable $(CXX) of itself. we need to save our own
+ # variable and restore it later. this isn't pretty :-(
+ SAVE_CXX := $(CXX)
include $(DEAL_II_PETSC_DIR)/conf/variables
+ CXX := $(SAVE_CXX)
endif
CXXFLAGS.g += $(GCXX_PETSCFLAGS)
CXXFLAGS.o += $(OCXX_PETSCFLAGS)
######################################################################
# $Id$
#
-# Copyright (C) 2001, 2003, 2005, the deal.II authors
+# Copyright (C) 2001, 2003, 2005, 2010, the deal.II authors
#
# Remove insignificant volatile data from output files of tests
#
#s/value.*//;
#s/with residual.*//;
+
+
+# remove deal.II debug output
+s/^DEAL.*::_.*\n//g;
#! /bin/sh
-# From configure.in Revision: 22370 .
+# From configure.in Revision: 22333 .
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.63 for deal.II 6.4.pre.
#
HSL_INCLUDE_DIR
NEEDS_F77LIBS
DEAL_II_USE_BLAS
+DEAL_II_P4EST_DIR
+USE_CONTRIB_P4EST
+DEAL_II_USE_P4EST
DEAL_II_DEFINE_DEAL_II_USE_MUMPS
DEAL_II_BLACS_ARCH
DEAL_II_BLACS_DIR
CXXCPP
enablethreads
CFLAGSPIC
+CFLAGSG
+CFLAGSO
CC_VERSION
SHLIBFLAGS
SHLIBLD
ac_subst_files=''
ac_user_opts='
enable_option_checking
+enable_mpi
enable_threads
enable_shared
enable_parser
with_mumps
with_scalapack
with_blacs
+with_p4est
with_blas
with_zlib
with_netcdf
--disable-option-checking ignore unrecognized --enable/--with options
--disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no)
--enable-FEATURE[=ARG] include FEATURE [ARG=yes]
+ --enable-mpi Select MPI-enabled compilers and MPI support in
+ deal.II.
--enable-threads Use multiple threads inside deal.II
--enable-shared Set compiler flags to generate shared libraries
--enable-parser While switched on by default, this option allows to
Specify the path to the BLACS installation; use this
if you want to override the BLACS_DIR environment
variable.
+ --with-p4est=/path/to/p4est makes deal.II use p4est to distribute meshes
+ on a cluster computer
--with-blas=blaslib Use the blas library blaslib. Make sure the path
to the libary is searched by ld, since it is
included by the argument -lblaslib. If no argument
{ $as_echo "$as_me:$LINENO: result: ---------------- configuring C/C++ compilers ----------------" >&5
$as_echo "---------------- configuring C/C++ compilers ----------------" >&6; }
+
+# Check whether --enable-mpi was given.
+if test "${enable_mpi+set}" = set; then
+ enableval=$enable_mpi;
+ { $as_echo "$as_me:$LINENO: checking whether to explicitly use MPI" >&5
+$as_echo_n "checking whether to explicitly use MPI... " >&6; }
+ if test "x$enableval" = "xyes" ; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+ if test "x$CXX" = "x" ; then CXX=mpiCC ; fi
+ if test "x$CC" = "x" ; then CC=mpicc ; fi
+ else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+fi
+
+
OLDCFLAGS="$CFLAGS"
ac_ext=c
ac_cpp='$CPP $CPPFLAGS'
#define DEAL_II_COMPILER_SUPPORTS_MPI 1
_ACEOF
+
+ DEAL_II_COMPILER_SUPPORTS_MPI=1
+ export DEAL_II_COMPILER_SUPPORTS_MPI
+
DEAL_II_USE_MPI=yes
else
if test "$GCC" = yes ; then
- CFLAGS="$CFLAGS -O3 -funroll-loops -funroll-all-loops -fstrict-aliasing"
+ CFLAGSO="$CFLAGS -O3 -funroll-loops -funroll-all-loops -fstrict-aliasing"
+ CFLAGSG="$CFLAGS -g"
case "$target" in
*aix* )
CFLAGSPIC=
case "$CC_VERSION" in
ibm_xlc)
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGS -O2"
CFLAGSPIC="-fPIC"
SHLIBLD="$CXX"
;;
MIPSpro*)
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGS -O2"
CFLAGSPIC="-KPIC"
;;
intel_icc*)
- CFLAGS="$CFLAGS -O2 -unroll"
+ CFLAGSO="$CFLAGS -O2 -unroll"
case "$CC_VERSION" in
intel_icc5 | intel_icc6 | intel_icc7 | intel_icc8 | intel_icc9)
CFLAGSPIC="-KPIC"
;;
esac
- CFLAGS="$CFLAGS -ansi_alias -vec_report0"
+ CFLAGSO="$CFLAGSO -ansi_alias -vec_report0"
case "$target" in
*86*)
*)
{ $as_echo "$as_me:$LINENO: result: Unknown C compiler - using generic options" >&5
$as_echo "Unknown C compiler - using generic options" >&6; }
- CFLAGS="$CFLAGS -O2"
+ CFLAGSO="$CFLAGSO -O2"
;;
esac
fi
+
# Check whether --enable-threads was given.
if test "${enable_threads+set}" = set; then
enableval=$enable_threads;
esac
if test "x$enableshared" = "xyes" ; then
- CFLAGS="$CFLAGS $CFLAGSPIC"
+ CFLAGSO="$CFLAGSO $CFLAGSPIC"
+ CFLAGSG="$CFLAGSG $CFLAGSPIC"
CXXFLAGSG="$CXXFLAGSG $CXXFLAGSPIC"
CXXFLAGSO="$CXXFLAGSO $CXXFLAGSPIC"
F77FLAGSG="$F77FLAGSG $F77FLAGSPIC"
+
+ { $as_echo "$as_me:$LINENO: checking whether p4est shall be used" >&5
+$as_echo_n "checking whether p4est shall be used... " >&6; }
+
+
+# Check whether --with-p4est was given.
+if test "${with_p4est+set}" = set; then
+ withval=$with_p4est; use_p4est=$withval
+else
+ use_p4est=no
+fi
+
+
+ if test "x$use_p4est" != "xno" ; then
+ { $as_echo "$as_me:$LINENO: result: yes" >&5
+$as_echo "yes" >&6; }
+
+ if test ! -d "${use_p4est}/DEBUG" -o ! -d "${use_p4est}/FAST" ; then
+ echo "${use_p4est}/DEBUG"
+ { { $as_echo "$as_me:$LINENO: error: p4est directories $use_p4est/DEBUG or $use_p4est/FAST not found" >&5
+$as_echo "$as_me: error: p4est directories $use_p4est/DEBUG or $use_p4est/FAST not found" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+
+cat >>confdefs.h <<\_ACEOF
+#define DEAL_II_USE_P4EST 1
+_ACEOF
+
+ USE_CONTRIB_P4EST=yes
+ export USE_CONTRIB_P4EST
+
+ DEAL_II_P4EST_DIR=${use_p4est}
+ export DEAL_II_P4EST_DIR
+
+ CXXFLAGSG="$CXXFLAGSG -I$use_p4est/DEBUG/include"
+ CXXFLAGSO="$CXXFLAGSO -I$use_p4est/FAST/include"
+ else
+ { $as_echo "$as_me:$LINENO: result: no" >&5
+$as_echo "no" >&6; }
+ fi
+
+
+
+
+
+
+
+
if test "x$with_umfpack" != "x" -a "x$with_umfpack" != "xno" ; then
if test "x$with_blas" = "x" -o "x$with_blas" = "xno"; then
with_blas="yes"
AC_MSG_RESULT()
AC_MSG_RESULT(---------------- configuring C/C++ compilers ----------------)
+
+dnl See if the user has specified --enable-mpi. If so, and if $CXX and $CC have
+dnl not been set to a particular value, then override them with 'mpiCC' and 'mpicc'.
+AC_ARG_ENABLE(mpi,
+ AS_HELP_STRING([--enable-mpi],
+ [Select MPI-enabled compilers and MPI support in deal.II.]),
+ [
+ AC_MSG_CHECKING(whether to explicitly use MPI)
+ if test "x$enableval" = "xyes" ; then
+ AC_MSG_RESULT(yes)
+ if test "x$CXX" = "x" ; then CXX=mpiCC ; fi
+ if test "x$CC" = "x" ; then CC=mpicc ; fi
+ else
+ AC_MSG_RESULT(no)
+ fi])
+
dnl Find a C compiler. This modifies the variable CC.
dnl In order to get the absolute path of the compiler, use the
dnl second line
DEAL_II_SET_CC_FLAGS
AC_SUBST(CC_VERSION)
-AC_SUBST(CFLAGS)
+AC_SUBST(CFLAGSO)
+AC_SUBST(CFLAGSG)
AC_SUBST(CFLAGSPIC)
dnl -------------------------------------------------------------
esac
if test "x$enableshared" = "xyes" ; then
- CFLAGS="$CFLAGS $CFLAGSPIC"
+ CFLAGSO="$CFLAGSO $CFLAGSPIC"
+ CFLAGSG="$CFLAGSG $CFLAGSPIC"
CXXFLAGSG="$CXXFLAGSG $CXXFLAGSPIC"
CXXFLAGSO="$CXXFLAGSO $CXXFLAGSPIC"
F77FLAGSG="$F77FLAGSG $F77FLAGSPIC"
AC_SUBST(DEAL_II_BLACS_ARCH)
AC_SUBST(DEAL_II_DEFINE_DEAL_II_USE_MUMPS)
+DEAL_II_CONFIGURE_P4EST
+AC_SUBST(DEAL_II_USE_P4EST)
+AC_SUBST(USE_CONTRIB_P4EST)
+AC_SUBST(DEAL_II_P4EST_DIR)
+
+
+
+
dnl Make sure we configure for libraries used by other libraries. For
dnl example, UMFPACK needs blas, and so does LAPACK.
if test "x$with_umfpack" != "x" -a "x$with_umfpack" != "xno" ; then
endif
+
ifeq ($(USE_CONTRIB_UMFPACK),yes)
umfpack:
@cd umfpack ; $(MAKE)
endif
+
ifeq ($(enable-parser),yes)
functionparser:
@cd functionparser ; $(MAKE)
@-rm -f ../lib/contrib/*/*.o
.PHONY: default hsl clean hsl-clean umfpack tbb tbb-clean functionparser
+
AC_CONFIG_SUBDIRS(utilities)
-
dnl -------------------------------------------------------------
dnl Output results
dnl -------------------------------------------------------------
$(inst-files) \
Makefile $D/common/Make.global_options
@echo "===================================== Remaking deal.II/Makefile"
- @(for dir in dofs fe grid hp multigrid numerics ; do \
+ @(for dir in dofs fe grid hp multigrid numerics distributed ; do \
$D/common/scripts/make_dependencies $(INCLUDE) "-B\$$(LIBDIR)" `echo $D/deal.II/source/$$dir/*cc` \
| $(PERL) -p -e 's!LIBDIR\)/(.*):!LIBDIR)/DIM_PLACEHOLDER/$$1:!g;' \
| $(PERL) -pe 's!((\.g)?.$(OBJEXT)):!_DIM_PLACEHOLDER$$1:!g;' \
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__distribute_grid_refinement_h
+#define __deal2__distribute_grid_refinement_h
+
+
+#include <base/config.h>
+#include <base/exceptions.h>
+#include <distributed/tria.h>
+
+#include <vector>
+#include <limits>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+ namespace distributed
+ {
+ // forward declarations
+ template <int dim, int spacedim> class Triangulation;
+
+
+/**
+ * Collection of functions controlling refinement and coarsening of
+ * parallel::distributed::Triangulation objects. This namespace
+ * provides similar functionality to the dealii::GridRefinement
+ * namespace, except that it works for meshes that are parallel and
+ * distributed.
+ *
+ * @ingroup grid
+ * @author Wolfgang Bangerth, 2009
+ */
+ namespace GridRefinement
+ {
+ /**
+ * Like
+ * dealii::GridRefinement::refine_and_coarsen_fixed_number,
+ * but for parallel distributed
+ * triangulation.
+ *
+ * The vector of criteria needs
+ * to be a vector of refinement
+ * criteria for all cells
+ * active on the current
+ * triangulation,
+ * i.e. tria.dealii::Triangulation::n_active_cells(). However,
+ * the function will only look
+ * at the indicators that
+ * correspond to those cells
+ * that are actually locally
+ * owned, and ignore the
+ * indicators for all other
+ * cells. The function will
+ * then coordinate among all
+ * processors that store part
+ * of the triangulation so that
+ * at the end @p
+ * top_fraction_of_cells are
+ * refined, where the fraction
+ * is enforced as a fraction of
+ * Triangulation::n_global_active_cells,
+ * not
+ * Triangulation::n_locally_active_cells
+ * on each processor
+ * individually. In other
+ * words, it may be that on
+ * some processors, no cells
+ * are refined at all.
+ *
+ * The same is true for the fraction of
+ * cells that is coarsened.
+ */
+ template <int dim, class Vector, int spacedim>
+ void
+ refine_and_coarsen_fixed_number (
+ parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ const double top_fraction_of_cells,
+ const double bottom_fraction_of_cells);
+
+ /**
+ * Like
+ * dealii::GridRefinement::refine_and_coarsen_fixed_fraction,
+ * but for parallel distributed
+ * triangulation.
+ *
+ * The vector of criteria needs to be a
+ * vector of refinement criteria for
+ * all cells active on the current
+ * triangulation,
+ * i.e. tria.dealii::Triangulation::n_active_cells(). However,
+ * the function will only look at the
+ * indicators that correspond to those
+ * cells that are actually locally
+ * owned, and ignore the indicators for
+ * all other cells. The function will
+ * then coordinate among all processors
+ * that store part of the triangulation
+ * so that at the end the smallest
+ * fraction of
+ * Triangulation::n_global_active_cells
+ * (not
+ * Triangulation::n_locally_active_cells
+ * on each processor individually) is
+ * refined that together make up a
+ * total of @p top_fraction_of_error of
+ * the total error. In other words, it
+ * may be that on some processors, no
+ * cells are refined at all.
+ *
+ * The same is true for the fraction of
+ * cells that is coarsened.
+ */
+ template <int dim, class Vector, int spacedim>
+ void
+ refine_and_coarsen_fixed_fraction (
+ parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ const double top_fraction_of_error,
+ const double bottom_fraction_of_error);
+ }
+ }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif //__deal2__distributed_grid_refinement_h
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__distributed_solution_transfer_h
+#define __deal2__distributed_solution_transfer_h
+
+#include <base/config.h>
+#include <distributed/tria.h>
+#include <dofs/dof_handler.h>
+
+#include <vector>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+
+ namespace distributed
+ {
+/**
+ * Transfers a discrete FE function (like a solution vector) by
+ * interpolation while refining and/or
+ * coarsening a distributed grid and
+ * handles the necessary communication.
+ *
+ * <h3>Usage</h3>
+ * @verbatim
+ * SolutionTransfer<dim, Vector<double> > soltrans(dof_handler);
+ * // flag some cells for refinement
+ * // and coarsening, e.g.
+ * GridRefinement::refine_and_coarsen_fixed_fraction(
+ * *tria, error_indicators, 0.3, 0.05);
+ * // prepare the triangulation,
+ * tria->prepare_coarsening_and_refinement();
+ * // prepare the SolutionTransfer object
+ * // for coarsening and refinement and give
+ * // the solution vector that we intend to
+ * // interpolate later,
+ * soltrans.prepare_for_coarsening_and_refinement(solution);
+ * // actually execute the refinement,
+ * tria->execute_coarsening_and_refinement ();
+ * // redistribute dofs,
+ * dof_handler->distribute_dofs (fe);
+ * // and interpolate the solution
+ * Vector<double> interpolate_solution(dof_handler->n_dofs());
+ * soltrans.interpolate(solution, interpolated_solution);
+ * @endverbatim
+ * @ingroup distributed
+ * @author Timo Heister, 2009
+ */
+ template<int dim, typename VECTOR, class DH=DoFHandler<dim> >
+ class SolutionTransfer
+ {
+ public:
+ /**
+ * Constructor, takes the current DoFHandler
+ * as argument.
+ */
+ SolutionTransfer(const DH &dof);
+ /**
+ * Destructor.
+ */
+ ~SolutionTransfer();
+
+ /**
+ * Prepares the @p SolutionTransfer for
+ * coarsening and refinement. It
+ * stores the dof indices of each cell and
+ * stores the dof values of the vectors in
+ * @p all_in in each cell that'll be coarsened.
+ * @p all_in includes all vectors
+ * that are to be interpolated
+ * onto the new (refined and/or
+ * coarsenend) grid.
+ */
+ void prepare_for_coarsening_and_refinement (const std::vector<const VECTOR*> &all_in);
+
+ /**
+ * Same as previous function
+ * but for only one discrete function
+ * to be interpolated.
+ */
+ void prepare_for_coarsening_and_refinement (const VECTOR &in);
+
+ /**
+ *
+ */
+ void interpolate (std::vector<VECTOR*> &all_out);
+
+ /**
+ * Same as the previous function.
+ * It interpolates only one function.
+ * It assumes the vectors having the
+ * right sizes (i.e. <tt>in.size()==n_dofs_old</tt>,
+ * <tt>out.size()==n_dofs_refined</tt>)
+ *
+ * Multiple calling of this function is
+ * NOT allowed. Interpolating
+ * several functions can be performed
+ * in one step by using
+ * <tt>interpolate (all_in, all_out)</tt>
+ */
+ void interpolate (VECTOR &out);
+
+
+ /**
+ * return the size in bytes that need
+ * to be stored per cell.
+ */
+ unsigned int get_data_size() const;
+
+
+
+ private:
+ /**
+ * Pointer to the degree of
+ * freedom handler to work
+ * with.
+ */
+ SmartPointer<const DH,SolutionTransfer<dim,VECTOR,DH> > dof_handler;
+
+ /**
+ * A vector that stores
+ * pointers to all the
+ * vectors we are supposed to
+ * copy over from the old to
+ * the new mesh.
+ */
+ std::vector<const VECTOR*> input_vectors;
+
+ /**
+ * The offset that the
+ * Triangulation has assigned
+ * to this object starting at
+ * which we are allowed to
+ * write.
+ */
+ unsigned int offset;
+
+ /**
+ * A callback function used
+ * to pack the data on the
+ * current mesh into objects
+ * that can later be
+ * retrieved after
+ * refinement, coarsening and
+ * repartitioning.
+ */
+ void pack_callback(const typename Triangulation<dim,dim>::cell_iterator &cell,
+ const typename Triangulation<dim,dim>::CellStatus status,
+ void* data);
+
+ /**
+ * A callback function used
+ * to unpack the data on the
+ * current mesh that has been
+ * packed up previously on
+ * the mesh before
+ * refinement, coarsening and
+ * repartitioning.
+ */
+ void unpack_callback(const typename Triangulation<dim,dim>::cell_iterator &cell,
+ const typename Triangulation<dim,dim>::CellStatus status,
+ const void* data,
+ std::vector<VECTOR*> &all_out);
+
+ };
+
+
+ }
+}
+
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__distributed_tria_h
+#define __deal2__distributed_tria_h
+
+
+#include <base/config.h>
+#include <base/subscriptor.h>
+#include <base/smartpointer.h>
+#include <base/template_constraints.h>
+#include <grid/tria.h>
+
+#include <base/std_cxx1x/function.h>
+
+#include <vector>
+#include <list>
+#include <utility>
+
+#ifdef DEAL_II_USE_P4EST
+#include <p4est_connectivity.h>
+#include <p4est.h>
+#include <p4est_ghost.h>
+
+#include <p8est_connectivity.h>
+#include <p8est.h>
+#include <p8est_ghost.h>
+#endif
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class Triangulation;
+
+#ifdef DEAL_II_USE_P4EST
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ namespace Policy
+ {
+ template <int, int> class ParallelDistributed;
+ }
+ }
+}
+
+
+namespace internal
+{
+ namespace p4est
+ {
+ /**
+ * A structure whose explicit
+ * specializations contain
+ * typedefs to the relevant
+ * p4est_* and p8est_*
+ * types. Using this
+ * structure, for example by
+ * saying
+ * <code>types@<dim@>::connectivity</code>
+ * we can write code in a
+ * dimension independent way,
+ * either referring to
+ * p4est_connectivity_t or
+ * p8est_connectivity_t,
+ * depending on template
+ * argument.
+ */
+ template <int> struct types;
+
+ template <>
+ struct types<2>
+ {
+ typedef p4est_connectivity_t connectivity;
+ typedef p4est_t forest;
+ typedef p4est_tree_t tree;
+ typedef p4est_quadrant_t quadrant;
+ typedef p4est_topidx_t topidx;
+ typedef p4est_locidx_t locidx;
+ typedef p4est_balance_type_t balance_type;
+ typedef p4est_ghost_t ghost;
+ };
+
+ template <>
+ struct types<3>
+ {
+ typedef p8est_connectivity_t connectivity;
+ typedef p8est_t forest;
+ typedef p8est_tree_t tree;
+ typedef p8est_quadrant_t quadrant;
+ typedef p4est_topidx_t topidx;
+ typedef p4est_locidx_t locidx;
+ typedef p8est_balance_type_t balance_type;
+ typedef p8est_ghost_t ghost;
+ };
+
+
+ /**
+ * Initialize the
+ * GeometryInfo<dim>::max_children_per_cell
+ * children of the cell
+ * p4est_cell.
+ */
+ template <int dim>
+ void
+ init_quadrant_children
+ (const typename types<dim>::quadrant & p4est_cell,
+ typename types<dim>::quadrant (&p4est_children)[GeometryInfo<dim>::max_children_per_cell]);
+
+
+ /**
+ * Initialize quadrant to represent a coarse cell.
+ */
+ template <int dim>
+ void
+ init_coarse_quadrant(typename types<dim>::quadrant & quad);
+
+
+
+ /**
+ * Returns whether q1 and q2 are equal
+ */
+ template <int dim>
+ bool
+ quadrant_is_equal (const typename types<dim>::quadrant & q1,
+ const typename types<dim>::quadrant & q2);
+
+ //TODO: remove these functions from
+ //public interface somehow? [TH]
+
+ /**
+ * returns whether q1 is an ancestor of q2
+ */
+ template <int dim>
+ bool
+ quadrant_is_ancestor (const typename types<dim>::quadrant & q1,
+ const typename types<dim>::quadrant & q2);
+ }
+}
+
+
+
+namespace parallel
+{
+ namespace distributed
+ {
+
+
+/**
+ * This class acts like the dealii::Triangulation class, but it
+ * distributes the mesh across a number of different processors when
+ * using MPI. The class's interface does not add a lot to the
+ * dealii::Triangulation class but there are a number of difficult
+ * algorithms under the hood that ensure we always have a
+ * load-balanced, fully distributed mesh. Use of this class is
+ * explained in step-40, step-32, the @ref distributed documentation
+ * module, as well as the @ref distributed_paper . See there for more
+ * information.
+ *
+ * @author Wolfgang Bangerth, Timo Heister 2008, 2009, 2010
+ * @ingroup distributed
+ */
+ template <int dim, int spacedim = dim>
+ class Triangulation : public dealii::Triangulation<dim,spacedim>
+ {
+ public:
+ /**
+ * Import the various
+ * iterator typedefs from the
+ * base class.
+ */
+ typedef typename dealii::Triangulation<dim,spacedim>::active_cell_iterator active_cell_iterator;
+ typedef typename dealii::Triangulation<dim,spacedim>::cell_iterator cell_iterator;
+ typedef typename dealii::Triangulation<dim,spacedim>::raw_cell_iterator raw_cell_iterator;
+
+ /**
+ * Constructor.
+ *
+ * @param mpi_communicator denotes
+ * the MPI communicator to be used for
+ * the triangulation.
+ *
+ * @param smooth_grid Degree
+ * and kind of mesh smoothing
+ * to be applied to the
+ * mesh. See the
+ * dealii::Triangulation
+ * class for a description of
+ * the kinds of smoothing
+ * operations that can be
+ * applied.
+ *
+ * @note This class does not
+ * currently support the
+ * <code>check_for_distorted_cells</code>
+ * argument provided by the
+ * base class.
+ */
+ Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing
+ smooth_grid = (dealii::Triangulation<dim,spacedim>::none));
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+ /**
+ * Reset this triangulation into a
+ * virgin state by deleting all data.
+ *
+ * Note that this operation is only
+ * allowed if no subscriptions to this
+ * object exist any more, such as
+ * DoFHandler objects using it.
+ */
+ virtual void clear ();
+
+ /**
+ * Implementation of the same
+ * function as in the base
+ * class.
+ */
+ virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria);
+
+ /**
+ * Create a triangulation as
+ * documented in the base
+ * class.
+ *
+ * This function also sets up
+ * the various data
+ * structures necessary to
+ * distribute a mesh across a
+ * number of processors. This
+ * will be necessary once the
+ * mesh is being refined,
+ * though we will always keep
+ * the entire coarse mesh
+ * that is generated by this
+ * function on all
+ * processors.
+ */
+ virtual void create_triangulation (const std::vector<Point<spacedim> > &vertices,
+ const std::vector<CellData<dim> > &cells,
+ const SubCellData &subcelldata);
+
+ /**
+ * Coarsen and refine the
+ * mesh according to
+ * refinement and coarsening
+ * flags set.
+ *
+ * Since the current
+ * processor only has control
+ * over those cells it owns
+ * (i.e. the ones for which
+ * <code>cell-@>subdomain_id()
+ * ==
+ * this-@>locally_owned_subdomain()</code>),
+ * refinement and coarsening
+ * flags are only respected
+ * for those locally owned
+ * cells. Flags may be set on
+ * other cells as well (and
+ * may often, in fact, if you
+ * call
+ * Triangulation::prepare_coarsening_and_refinement)
+ * but will be largely
+ * ignored: the decision to
+ * refine the global mesh
+ * will only be affected by
+ * flags set on locally owned
+ * cells.
+ */
+ virtual void execute_coarsening_and_refinement ();
+
+ /**
+ * Return the subdomain id of
+ * those cells that are owned
+ * by the current
+ * processor. All cells in
+ * the triangulation that do
+ * not have this subdomain id
+ * are either owned by
+ * another processor or have
+ * children that only exist
+ * on other processors.
+ */
+ types::subdomain_id_t locally_owned_subdomain () const;
+
+ /**
+ * Return the number of
+ * active cells in the
+ * triangulation that are
+ * locally owned, i.e. that
+ * have a subdomain_id equal
+ * to
+ * locally_owned_subdomain(). Note
+ * that there may be more
+ * active cells in the
+ * triangulation stored on
+ * the present processor,
+ * such as for example ghost
+ * cells, or cells further
+ * away from the locally
+ * owned block of cells but
+ * that are needed to ensure
+ * that the triangulation
+ * that stores this
+ * processor's set of active
+ * cells still remains
+ * balanced with respect to
+ * the 2:1 size ratio of
+ * adjacent cells.
+ *
+ * As a consequence of the
+ * remark above, the result
+ * of this function is always
+ * smaller or equal to the
+ * result of
+ * ::Triangulation::n_active_cells()
+ * which includes the active
+ * ghost and artificial cells
+ * (see also @ref
+ * GlossArtificialCell and
+ * @ref GlossGhostCell).
+ */
+ unsigned int n_locally_owned_active_cells () const;
+
+ /**
+ * Return the sum over all
+ * processors of the number
+ * of active cells owned by
+ * each processor. This
+ * equals the overall number
+ * of active cells in the
+ * distributed triangulation.
+ */
+ unsigned int n_global_active_cells () const;
+
+ /**
+ * Return the number of
+ * active cells owned by each
+ * of the MPI processes that
+ * contribute to this
+ * triangulation. The element
+ * of this vector indexed by
+ * locally_owned_subdomain()
+ * equals the result of
+ * n_locally_owned_active_cells().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_active_cells_per_processor () const;
+
+ /**
+ * Return the MPI
+ * communicator used by this
+ * triangulation.
+ */
+ MPI_Comm get_communicator () const;
+
+ /**
+ * Return the local memory
+ * consumption in bytes.
+ */
+ virtual unsigned int memory_consumption () const;
+
+ /**
+ * Return the local memory
+ * consumption contained in the p4est
+ * data structures alone. This is
+ * already contained in
+ * memory_consumption() but made
+ * available separately for debugging
+ * purposes.
+ */
+ virtual unsigned int memory_consumption_p4est () const;
+
+ /**
+ * A collective operation that produces
+ * a sequence of output files with the
+ * given file base name that contain
+ * the mesh in VTK format.
+ *
+ * More than anything else, this
+ * function is useful for debugging the
+ * interface between deal.II and p4est.
+ */
+ void write_mesh_vtk (const char *file_basename) const;
+
+ /**
+ * Produce a check sum of the
+ * triangulation. This is a
+ * collective operation and
+ * is mostly useful for
+ * debugging purposes.
+ */
+ unsigned int get_checksum () const;
+
+ /**
+ * Used to inform in the callbacks of
+ * register_data_attach() and
+ * notify_ready_to_unpack() how the
+ * cell with the given cell_iterator
+ * is going to change. Note that
+ * this may me different then the
+ * refine_flag() and coarsen_flag()
+ * in the cell_iterator because of
+ * refinement constraints that this
+ * machine does not see.
+ */
+ enum CellStatus
+ {
+ CELL_PERSIST, CELL_REFINE, CELL_COARSEN, CELL_INVALID
+ };
+
+ /**
+ * Register a function with
+ * the current Triangulation
+ * object that will be used
+ * to attach data to active
+ * cells before
+ * execute_coarsening_and_refinement(). In
+ * execute_coarsening_and_refinement()
+ * the Triangulation will
+ * call the given function
+ * pointer and provide
+ * @p size bytes to store
+ * data. If necessary, this data will be
+ * transferred to the new
+ * owner of that cell during repartitioning
+ * the tree. See
+ * notify_ready_to_unpack()
+ * on how to retrieve the
+ * data.
+ *
+ * Callers need to store the
+ * return value. It
+ * specifies an offset of the
+ * position at which data can
+ * later be retrieved during
+ * a call to
+ * notify_ready_to_unpack().
+ */
+ unsigned int
+ register_data_attach (const size_t size,
+ const std_cxx1x::function<void (const cell_iterator &,
+ const CellStatus,
+ void*)> & pack_callback);
+
+ /**
+ * The given function is called for
+ * each new active cell and supplies
+ * a pointer to the data saved with
+ * register_data_attach().
+ */
+ void
+ notify_ready_to_unpack (const unsigned int offset,
+ const std_cxx1x::function<void (const cell_iterator &,
+ const CellStatus,
+ const void*)> & unpack_callback);
+
+ private:
+ /**
+ * MPI communicator to be
+ * used for the
+ * triangulation. We create a
+ * unique communicator for
+ * this class, which is a
+ * duplicate of the one
+ * passed to the constructor.
+ */
+ MPI_Comm mpi_communicator;
+
+ /**
+ * The subdomain id to be
+ * used for the current
+ * processor.
+ */
+ types::subdomain_id_t my_subdomain;
+
+ /**
+ * A flag that indicates whether the
+ * triangulation has actual content.
+ */
+ bool triangulation_has_content;
+
+ /**
+ * A structure that contains
+ * some numbers about the
+ * distributed triangulation.
+ */
+ struct NumberCache
+ {
+ std::vector<unsigned int> n_locally_owned_active_cells;
+ unsigned int n_global_active_cells;
+ };
+
+ NumberCache number_cache;
+
+ /**
+ * A data structure that holds the
+ * connectivity between trees. Since
+ * each tree is rooted in a coarse grid
+ * cell, this data structure holds the
+ * connectivity between the cells of
+ * the coarse grid.
+ */
+ typename dealii::internal::p4est::types<dim>::connectivity *connectivity;
+
+ /**
+ * A data structure that holds the
+ * local part of the global
+ * triangulation.
+ */
+ typename dealii::internal::p4est::types<dim>::forest *parallel_forest;
+
+ /**
+ * A flag that indicates
+ * whether refinement of a
+ * triangulation is currently
+ * in progress. We need this
+ * flag because the
+ * refinement_notification()
+ * function is called from
+ * Triangulation::execute_coarsening_and_refinement(),
+ * but
+ * refinement_notification()
+ * itself also calls
+ * Triangulation::execute_coarsening_and_refinement()
+ * and therefore gets called
+ * recursively again. While the
+ * first time we want to take
+ * over work to copy things
+ * from a refined p4est, the
+ * other times we don't want to
+ * get in the way as these
+ * latter calls to
+ * Triangulation::execute_coarsening_and_refinement()
+ * are simply there in order to
+ * re-create a triangulation
+ * that matches the p4est.
+ */
+ bool refinement_in_progress;
+
+
+ /**
+ *
+ */
+ unsigned int attached_data_size;
+ unsigned int n_attached_datas;
+ typedef std_cxx1x::function<
+ void(typename Triangulation<dim,spacedim>::cell_iterator, CellStatus, void*)
+ > pack_callback_t;
+
+ typedef std::pair<unsigned int, pack_callback_t> callback_pair_t;
+
+ typedef std::list<callback_pair_t> callback_list_t;
+ callback_list_t attached_data_pack_callbacks;
+
+
+
+
+
+ /**
+ * Two arrays that store which p4est
+ * tree corresponds to which coarse
+ * grid cell and vice versa. We need
+ * these arrays because p4est goes with
+ * the original order of coarse cells
+ * when it sets up its forest, and then
+ * applies the Morton ordering within
+ * each tree. But if coarse grid cells
+ * are badly ordered this may mean that
+ * individual parts of the forest
+ * stored on a local machine may be
+ * split across coarse grid cells that
+ * are not geometrically
+ * close. Consequently, we apply a
+ * Cuthill-McKee preordering to ensure
+ * that the part of the forest stored
+ * by p4est is located on geometrically
+ * close coarse grid cells.
+ */
+ std::vector<unsigned int> coarse_cell_to_p4est_tree_permutation;
+ std::vector<unsigned int> p4est_tree_to_coarse_cell_permutation;
+
+ /**
+ * Return a pointer to the p4est
+ * tree that belongs to the given
+ * dealii_coarse_cell_index()
+ */
+ typename dealii::internal::p4est::types<dim>::tree *
+ init_tree(const int dealii_coarse_cell_index) const;
+
+ /**
+ * The function that computes the
+ * permutation between the two data
+ * storage schemes.
+ */
+ void setup_coarse_cell_to_p4est_tree_permutation ();
+
+ /**
+ * Take the contents of a newly created
+ * triangulation we are attached to and
+ * copy it to p4est data structures.
+ *
+ * This function exists in 2d
+ * and 3d variants.
+ */
+ void copy_new_triangulation_to_p4est (dealii::internal::int2type<2>);
+ void copy_new_triangulation_to_p4est (dealii::internal::int2type<3>);
+
+ /**
+ * Copy the local part of the refined
+ * forest from p4est into the attached
+ * triangulation.
+ */
+ void copy_local_forest_to_triangulation ();
+
+
+ /**
+ * Update the number_cache
+ * variable after mesh
+ * creation or refinement.
+ */
+ void update_number_cache ();
+
+ /**
+ * Internal function notifying all
+ * registered classes to attach their
+ * data before repartitioning
+ * occurs. Called from
+ * execute_coarsening_and_refinement().
+ */
+ void attach_mesh_data();
+
+
+ template <int, int> friend class dealii::internal::DoFHandler::Policy::ParallelDistributed;
+ };
+
+
+ /**
+ * Specialization of the general template
+ * for the 1d case. There is currently no
+ * support for distributing 1d
+ * triangulations. Consequently, all this
+ * class does is throw an exception.
+ */
+ template <int spacedim>
+ class Triangulation<1,spacedim> : public dealii::Triangulation<1,spacedim>
+ {
+ public:
+ /**
+ * Constructor. The argument denotes
+ * the MPI communicator to be used for
+ * the triangulation.
+ */
+ Triangulation (MPI_Comm mpi_communicator);
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+ /**
+ * Return the MPI
+ * communicator used by this
+ * triangulation.
+ */
+ MPI_Comm get_communicator () const;
+
+ /**
+ * Return the subdomain id of
+ * those cells that are owned
+ * by the current
+ * processor. All cells in
+ * the triangulation that do
+ * not have this subdomain id
+ * are either owned by
+ * another processor or have
+ * children that only exist
+ * on other processors.
+ */
+ types::subdomain_id_t locally_owned_subdomain () const;
+
+ /**
+ * Dummy arrays. This class
+ * isn't usable but the
+ * compiler wants to see
+ * these variables at a
+ * couple places anyway.
+ */
+ std::vector<unsigned int> coarse_cell_to_p4est_tree_permutation;
+ std::vector<unsigned int> p4est_tree_to_coarse_cell_permutation;
+ };
+ }
+}
+
+
+#else // DEAL_II_USE_P4EST
+
+namespace parallel
+{
+ namespace distributed
+ {
+ /**
+ * Dummy class the compiler chooses for
+ * parallel distributed triangulations if
+ * we didn't actually configure deal.II
+ * with the p4est library. The existence
+ * of this class allows us to refer to
+ * parallel::distributed::Triangulation
+ * objects throughout the library even if
+ * it is disabled.
+ *
+ * Since the constructor of this class is
+ * private, no such objects can actually
+ * be created if we don't have p4est
+ * available.
+ */
+ template <int dim, int spacedim = dim>
+ class Triangulation : public dealii::Triangulation<dim,spacedim>
+ {
+ private:
+ /**
+ * Constructor.
+ */
+ Triangulation ();
+
+ public:
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+ /**
+ * Return the subdomain id of
+ * those cells that are owned
+ * by the current
+ * processor. All cells in
+ * the triangulation that do
+ * not have this subdomain id
+ * are either owned by
+ * another processor or have
+ * children that only exist
+ * on other processors.
+ */
+ types::subdomain_id_t locally_owned_subdomain () const;
+ };
+ }
+}
+
+
+#endif
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
namespace DoFHandler
{
struct Implementation;
+ namespace Policy
+ {
+ struct Implementation;
+ }
}
namespace hp
}
}
-
-
// note: the file dof_accessor.templates.h is included at the end of
// this file. this includes a lot of templates and thus makes
// compilation slower, but at the same time allows for more aggressive
* coordinates of the TriaAccessor.
*/
void copy_from (const TriaAccessorBase<structdim, DH::dimension, DH::space_dimension> &da);
-
+
/**
* Return an iterator pointing to
* the the parent.
template <int dim, int spacedim> friend class DoFHandler;
template <int dim, int spacedim> friend class hp::DoFHandler;
+ friend class internal::DoFHandler::Policy::Implementation;
friend class internal::DoFHandler::Implementation;
friend class internal::hp::DoFHandler::Implementation;
+ friend class internal::DoFCellAccessor::Implementation;
};
*/
typename internal::DoFHandler::Iterators<DH>::cell_iterator
parent () const;
-
+
/**
* @name Accessing sub-objects and neighbors
*/
* @}
*/
- private:
+ /**
+ * Set the DoF indices of this
+ * cell to the given values. This
+ * function bypasses the DoF
+ * cache, if one exists for the
+ * given DoF handler class.
+ */
+ void set_dof_indices (const std::vector<unsigned int> &dof_indices);
+
/**
* Update the cache in which we
* store the dof indices of this
* function
*/
template <int dim, int spacedim> friend class DoFHandler;
-
friend class internal::DoFCellAccessor::Implementation;
};
Assert (false, ExcNotImplemented());
}
+ /**
+ * Implement setting dof
+ * indices on a
+ * cell. Currently not
+ * implemented for
+ * hp::DoFHandler objects.
+ */
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<1,spacedim> > &accessor,
+ const std::vector<unsigned int> &local_dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ Assert (local_dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<2; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
+ local_dof_indices[index]);
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
+ accessor.dof_index(d, local_dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<2,spacedim> > &accessor,
+ const std::vector<unsigned int> &local_dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ Assert (local_dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
+ local_dof_indices[index]);
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
+ accessor.line(line)->set_dof_index(d, local_dof_indices[index]);
+
+ for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
+ accessor.set_dof_index(d, local_dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ set_dof_indices (DoFCellAccessor<DoFHandler<3,spacedim> > &accessor,
+ const std::vector<unsigned int> &local_dof_indices)
+ {
+ Assert (accessor.has_children() == false,
+ ExcInternalError());
+
+ const unsigned int dofs_per_vertex = accessor.get_fe().dofs_per_vertex,
+ dofs_per_line = accessor.get_fe().dofs_per_line,
+ dofs_per_quad = accessor.get_fe().dofs_per_quad,
+ dofs_per_hex = accessor.get_fe().dofs_per_hex,
+ dofs_per_cell = accessor.get_fe().dofs_per_cell;
+
+ Assert (local_dof_indices.size() == dofs_per_cell,
+ ExcInternalError());
+
+ unsigned int index = 0;
+
+ for (unsigned int vertex=0; vertex<8; ++vertex)
+ for (unsigned int d=0; d<dofs_per_vertex; ++d, ++index)
+ accessor.set_vertex_dof_index(vertex,d,
+ local_dof_indices[index]);
+ // now copy dof numbers into the line. for
+ // lines with the wrong orientation, we have
+ // already made sure that we're ok by picking
+ // the correct vertices (this happens
+ // automatically in the vertex()
+ // function). however, if the line is in
+ // wrong orientation, we look at it in
+ // flipped orientation and we will have to
+ // adjust the shape function indices that we
+ // see to correspond to the correct
+ // (cell-local) ordering.
+ for (unsigned int line=0; line<12; ++line)
+ for (unsigned int d=0; d<dofs_per_line; ++d, ++index)
+ accessor.line(line)->set_dof_index(accessor.dof_handler->get_fe().
+ adjust_line_dof_index_for_line_orientation(d,
+ accessor.line_orientation(line)),
+ local_dof_indices[index]);
+ // now copy dof numbers into the face. for
+ // faces with the wrong orientation, we
+ // have already made sure that we're ok by
+ // picking the correct lines and vertices
+ // (this happens automatically in the
+ // line() and vertex() functions). however,
+ // if the face is in wrong orientation, we
+ // look at it in flipped orientation and we
+ // will have to adjust the shape function
+ // indices that we see to correspond to the
+ // correct (cell-local) ordering. The same
+ // applies, if the face_rotation or
+ // face_orientation is non-standard
+ for (unsigned int quad=0; quad<6; ++quad)
+ for (unsigned int d=0; d<dofs_per_quad; ++d, ++index)
+ accessor.quad(quad)->set_dof_index(accessor.dof_handler->get_fe().
+ adjust_quad_dof_index_for_face_orientation(d,
+ accessor.face_orientation(quad),
+ accessor.face_flip(quad),
+ accessor.face_rotation(quad)),
+ local_dof_indices[index]);
+ for (unsigned int d=0; d<dofs_per_hex; ++d, ++index)
+ accessor.set_dof_index(d, local_dof_indices[index]);
+
+ Assert (index == dofs_per_cell,
+ ExcInternalError());
+ }
+
+
+ // implementation for the case of
+ // hp::DoFHandler objects. it's
+ // not implemented there, for no
+ // space dimension
+ template <int dim, int spacedim>
+ static
+ void
+ set_dof_indices (const DoFCellAccessor<dealii::hp::DoFHandler<dim,spacedim> > &,
+ const std::vector<unsigned int> &)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
/**
* A function that collects the
DoFCellAccessor<DH>::
get_dof_indices (std::vector<unsigned int> &dof_indices) const
{
+ Assert (this->is_artificial() == false,
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
internal::DoFCellAccessor::Implementation::get_dof_indices (*this, dof_indices);
}
DoFCellAccessor<DH>::get_dof_values (const InputVector &values,
Vector<number> &local_values) const
{
+ Assert (this->is_artificial() == false,
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
internal::DoFCellAccessor::Implementation
::get_dof_values (*this, values, local_values.begin(), local_values.end());
}
ForwardIterator local_values_begin,
ForwardIterator local_values_end) const
{
+ Assert (this->is_artificial() == false,
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
internal::DoFCellAccessor::Implementation
::get_dof_values (*this, values, local_values_begin, local_values_end);
}
ForwardIterator local_values_begin,
ForwardIterator local_values_end) const
{
+ Assert (this->is_artificial() == false,
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
internal::DoFCellAccessor::Implementation
::get_dof_values (*this, constraints, values,
local_values_begin, local_values_end);
DoFCellAccessor<DH>::set_dof_values (const Vector<number> &local_values,
OutputVector &values) const
{
+ Assert (this->is_artificial() == false,
+ ExcMessage ("Can't ask for DoF indices on artificial cells."));
internal::DoFCellAccessor::Implementation
::set_dof_values (*this, local_values, values);
}
#include <base/config.h>
#include <base/exceptions.h>
#include <base/smartpointer.h>
+#include <base/index_set.h>
#include <dofs/block_info.h>
#include <dofs/dof_iterator_selector.h>
+#include <dofs/number_cache.h>
#include <dofs/function_map.h>
+#include <dofs/dof_handler_policy.h>
#include <vector>
#include <map>
* algorithms.
*
*
+ * <h3>Interaction with distributed meshes</h3>
+ *
+ * Upon construction, this class takes a reference to a triangulation
+ * object. In most cases, this will be a reference to an object of
+ * type Triangulation, i.e. the class that represents triangulations
+ * that entirely reside on a single processor. However, it can also be
+ * of type parallel::distributed::Triangulation (see, for example,
+ * step-32, step-40 and in particular the @ref distributed module) in
+ * which case the DoFHandler object will proceed to only manage
+ * degrees of freedom on locally owned and ghost cells. This process
+ * is entirely transparent to the used.
+ *
+ *
* <h3>User defined renumbering schemes</h3>
*
* The DoFRenumbering class offers a number of renumbering schemes like the
public:
typedef typename IteratorSelector::CellAccessor cell_accessor;
typedef typename IteratorSelector::FaceAccessor face_accessor;
-
+
typedef typename IteratorSelector::raw_line_iterator raw_line_iterator;
typedef typename IteratorSelector::line_iterator line_iterator;
typedef typename IteratorSelector::active_line_iterator active_line_iterator;
* value.
*/
static const unsigned int default_fe_index = 0;
-
+
/**
* Standard constructor, not
* initializing any data. After
* triangulation to work on.
*/
DoFHandler ( const Triangulation<dim,spacedim> &tria);
-
+
/**
* Destructor.
*/
virtual ~DoFHandler ();
-
+
/**
* Assign a Triangulation and a
* FiniteElement to the
* in the same object.
*/
void initialize_local_block_info();
-
+
/**
* Clear all data of this object and
* especially delete the lock this object
* a list of new dof numbers for all the
* dofs.
*
- * @p new_numbers is an array of integers
- * with size equal to the number of dofs
- * on the present grid. It stores the new
- * indices after renumbering in the
- * order of the old indices.
- *
* This function is called by
* the functions in
* DoFRenumbering function
* after computing the ordering
* of the degrees of freedom.
- * However, you can call this
- * function yourself, which is
- * necessary if a user wants to
- * implement an ordering scheme
- * herself, for example
- * downwind numbering.
+ * This function is called, for
+ * example, by the functions in
+ * the DoFRenumbering
+ * namespace, but it can of
+ * course also be called from
+ * user code.
*
- * The @p new_number array must
- * have a size equal to the
+ * @arg new_number This array
+ * must have a size equal to
+ * the number of degrees of
+ * freedom owned by the current
+ * processor, i.e. the size
+ * must be equal to what
+ * n_locally_owned_dofs()
+ * returns. If only one
+ * processor participates in
+ * storing the current mesh,
+ * then this equals the total
* number of degrees of
- * freedom. Each entry must
- * state the new global DoF
- * number of the degree of
- * freedom referenced.
+ * freedom, i.e. the result of
+ * n_dofs(). The contents of
+ * this array are the new
+ * global indices for each
+ * freedom listed in the
+ * IndexSet returned by
+ * locally_owned_dofs(). In the
+ * case of a sequential mesh
+ * this means that the array is
+ * a list of new indices for
+ * each of the degrees of
+ * freedom on the current
+ * mesh. In the case that we
+ * have a
+ * parallel::distributed::Triangulation
+ * underlying this DoFHandler
+ * object, the array is a list
+ * of new indices for all the
+ * locally owned degrees of
+ * freedom, enumerated in the
+ * same order as the currently
+ * locally owned DoFs. In other
+ * words, assume that degree of
+ * freedom <code>i</code> is
+ * currently locally owned,
+ * then
+ * <code>new_numbers[locally_owned_dofs().index_within_set(i)]</code>
+ * returns the new global DoF
+ * index of
+ * <code>i</code>. Since the
+ * IndexSet of
+ * locally_owned_dofs() is
+ * complete in the sequential
+ * case, the latter convention
+ * for the content of the array
+ * reduces to the former in the
+ * case that only one processor
+ * participates in the mesh.
*/
void renumber_dofs (const std::vector<unsigned int> &new_numbers);
/**
- * Return number of degrees of freedom.
- * Included in this number are those
+ * Return the global number of
+ * degrees of freedom. If the
+ * current object handles all
+ * degrees of freedom itself
+ * (even if you may intend to
+ * solve your linear system in
+ * parallel, such as in step-17
+ * or step-18), then this number
+ * equals the number of locally
+ * owned degrees of freedom since
+ * this object doesn't know
+ * anything about what you want
+ * to do with it and believes
+ * that it owns every degree of
+ * freedom it knows about.
+ *
+ * On the other hand, if this
+ * object operates on a
+ * parallel::distributed::Triangulation
+ * object, then this function
+ * returns the global number of
+ * degrees of freedom,
+ * accumulated over all
+ * processors.
+ *
+ * In either case, included in
+ * the returned number are those
* DoFs which are constrained by
- * hanging nodes.
+ * hanging nodes, see @ref constraints.
*/
unsigned int n_dofs () const;
* initialize_local_block_indices().
*/
const BlockInfo& block_info() const;
-
+
+
+ /**
+ * Return the number of
+ * degrees of freedom that
+ * belong to this
+ * process.
+ *
+ * If this is a sequential job,
+ * then the result equals that
+ * produced by n_dofs(). On the
+ * other hand, if we are
+ * operating on a
+ * parallel::distributed::Triangulation,
+ * then it includes only the
+ * degrees of freedom that the
+ * current processor owns. Note
+ * that in this case this does
+ * not include all degrees of
+ * freedom that have been
+ * distributed on the current
+ * processor's image of the mesh:
+ * in particular, some of the
+ * degrees of freedom on the
+ * interface between the cells
+ * owned by this processor and
+ * cells owned by other
+ * processors may be theirs, and
+ * degrees of freedom on ghost
+ * cells are also not necessarily
+ * included.
+ */
+ unsigned int n_locally_owned_dofs() const;
+
+ /**
+ * Return an IndexSet describing
+ * the set of locally owned DoFs
+ * as a subset of
+ * 0..n_dofs(). The number of
+ * elements of this set equals
+ * n_locally_owned_dofs().
+ */
+ const IndexSet & locally_owned_dofs() const;
+
+
+ /**
+ * Returns a vector that
+ * stores the locally owned
+ * DoFs of each processor. If
+ * you are only interested in
+ * the number of elements
+ * each processor owns then
+ * n_dofs_per_processor() is
+ * a better choice.
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element that equals the
+ * IndexSet representing the
+ * entire range [0,n_dofs()].
+ */
+ const std::vector<IndexSet> &
+ locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a vector that
+ * stores the number of
+ * degrees of freedom each
+ * processor that
+ * participates in this
+ * triangulation owns
+ * locally. The sum of all
+ * these numbers equals the
+ * number of degrees of
+ * freedom that exist
+ * globally, i.e. what
+ * n_dofs() returns.
+ *
+ * Each element of the vector
+ * returned by this function
+ * equals the number of
+ * elements of the
+ * corresponding sets
+ * returned by
+ * global_dof_indices().
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element equal to n_dofs().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_dofs_per_processor () const;
+
/**
* Return a constant reference to
* the selected finite element
*/
virtual unsigned int memory_consumption () const;
- /**
- * Exception
- */
- DeclException0 (ExcInvalidTriangulation);
-
/**
* @todo Replace by ExcInternalError.
*/
int,
<< "You tried to do something on level " << arg1
<< ", but this level is empty.");
-
-
+
+
protected:
/**
* The object containing
* information on the block structure.
*/
BlockInfo block_info_object;
-
- private:
+
+ /**
+ * Array to store the indices for
+ * degrees of freedom located at
+ * vertices.
+ */
+ std::vector<unsigned int> vertex_dofs;
+
+
/**
* Address of the triangulation to
* work on.
*/
- SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> > tria;
+ SmartPointer<const Triangulation<dim,spacedim>,DoFHandler<dim,spacedim> >
+ tria;
/**
* Store a pointer to the finite element
* function (this clears all data of
* this object as well, though).
*/
- SmartPointer<const FiniteElement<dim,spacedim>,DoFHandler<dim,spacedim> > selected_fe;
+ SmartPointer<const FiniteElement<dim,spacedim>,DoFHandler<dim,spacedim> >
+ selected_fe;
+
+ /**
+ * An object that describes how degrees
+ * of freedom should be distributed and
+ * renumbered.
+ */
+ std_cxx1x::shared_ptr<internal::DoFHandler::Policy::PolicyBase<dim,spacedim> > policy;
+
+ /**
+ * A structure that contains all
+ * sorts of numbers that
+ * characterize the degrees of
+ * freedom this object works on.
+ *
+ * For most members of this
+ * structure, there is an
+ * accessor function in this
+ * class that returns its value.
+ */
+ internal::DoFHandler::NumberCache number_cache;
+
+ private:
/**
* Copy constructor. I can see no reason
*/
internal::DoFHandler::DoFFaces<dim> *faces;
- /**
- * Store the number of dofs
- * created last time.
- */
- unsigned int used_dofs;
-
- /**
- * Array to store the indices for
- * degrees of freedom located at
- * vertices.
- */
- std::vector<unsigned int> vertex_dofs;
-
/**
* Make accessor objects friends.
*/
friend class internal::DoFCellAccessor::Implementation;
friend class internal::DoFHandler::Implementation;
+ friend class internal::DoFHandler::Policy::Implementation;
};
unsigned int
DoFHandler<dim,spacedim>::n_dofs () const
{
- return used_dofs;
+ return number_cache.n_global_dofs;
+}
+
+
+template <int dim, int spacedim>
+unsigned int
+DoFHandler<dim, spacedim>::n_locally_owned_dofs() const
+{
+ return number_cache.n_locally_owned_dofs;
+}
+
+
+template <int dim, int spacedim>
+const IndexSet &
+DoFHandler<dim, spacedim>::locally_owned_dofs() const
+{
+ return number_cache.locally_owned_dofs;
+}
+
+
+template <int dim, int spacedim>
+const std::vector<unsigned int> &
+DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
+{
+ return number_cache.n_locally_owned_dofs_per_processor;
+}
+
+
+template <int dim, int spacedim>
+const std::vector<IndexSet> &
+DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor () const
+{
+ return number_cache.locally_owned_dofs_per_processor;
}
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__dof_handler_policy_h
+#define __deal2__dof_handler_policy_h
+
+
+
+#include <base/config.h>
+#include <base/exceptions.h>
+#include <base/template_constraints.h>
+
+#include <vector>
+#include <map>
+#include <set>
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class FiniteElement;
+template <int, int> class DoFHandler;
+
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ class NumberCache;
+
+ /**
+ * A namespace in which we define
+ * classes that describe how to
+ * distribute and renumber
+ * degrees of freedom.
+ */
+ namespace Policy
+ {
+ struct Implementation;
+
+ /**
+ * A class that implements policies for
+ * how the DoFHandler::distribute_dofs
+ * and DoFHandler::renumber_dofs
+ * functions should work.
+ */
+ template <int dim, int spacedim>
+ class PolicyBase
+ {
+ public:
+ /**
+ * Destructor.
+ */
+ virtual ~PolicyBase ();
+
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (const unsigned int offset,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+ };
+
+
+ /**
+ * This class implements the
+ * default policy for sequential
+ * operations, i.e. for the case where
+ * all cells get degrees of freedom.
+ */
+ template <int dim, int spacedim>
+ class Sequential : public PolicyBase<dim,spacedim>
+ {
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (const unsigned int offset,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ };
+
+
+ /**
+ * This class implements the
+ * policy for operations when
+ * we use a
+ * parallel::distributed::Triangulation
+ * object.
+ */
+ template <int dim, int spacedim>
+ class ParallelDistributed : public PolicyBase<dim,spacedim>
+ {
+ public:
+ /**
+ * Distribute degrees of freedom on
+ * the object given as last argument.
+ */
+ virtual
+ NumberCache
+ distribute_dofs (const unsigned int offset,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ */
+ virtual
+ NumberCache
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ };
+ }
+ }
+}
+
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+/*---------------------------- dof_handler_policy.h ---------------------------*/
+#endif
+/*---------------------------- dof_handler_policy.h ---------------------------*/
component_wise (hp::DoFHandler<dim> &dof_handler,
const std::vector<unsigned int> &target_component = std::vector<unsigned int> ());
-
/**
* Sort the degrees of freedom by
* component. It does the same
* the renumbering vector.
*/
template <int dim, class ITERATOR, class ENDITERATOR>
- static unsigned int
+ unsigned int
compute_component_wise (std::vector<unsigned int>& new_dof_indices,
- ITERATOR& start,
+ const ITERATOR& start,
const ENDITERATOR& end,
const std::vector<unsigned int> &target_component);
#include <base/index_set.h>
#include <lac/constraint_matrix.h>
#include <dofs/function_map.h>
+#include <dofs/dof_handler.h>
+#include <fe/fe.h>
#include <vector>
#include <set>
SparsityPattern &sparsity_pattern,
const ConstraintMatrix &constraints = ConstraintMatrix(),
const bool keep_constrained_dofs = true,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int);
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id);
/**
* Locate non-zero entries for
SparsityPattern &sparsity_pattern,
const ConstraintMatrix &constraints = ConstraintMatrix(),
const bool keep_constrained_dofs = true,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int);
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id);
/**
* @deprecated This is the old
/**
* This function does the same as
* the other with the same name,
- * but it gets a ConstraintMatrix
- * additionally.
- * This is for the case where you
- * have fluxes but constraints as
+ * but it gets a ConstraintMatrix
+ * additionally.
+ * This is for the case where you
+ * have fluxes but constraints as
* well.
* Not implemented for
* hp::DoFHandler.
template <class DH>
static void
extract_subdomain_dofs (const DH &dof_handler,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
std::vector<bool> &selected_dofs);
+
+ /**
+ * Extract the set of global DoF
+ * indices that are owned by the
+ * current processor. For regular
+ * DoFHandler objects, this set
+ * is the complete set with all
+ * DoF indices. In either case,
+ * it equals what
+ * DoFHandler::locally_owned_dofs()
+ * returns.
+ */
+ template <class DH>
+ static void
+ extract_locally_owned_dofs (const DH & dof_handler,
+ IndexSet & dof_set);
+
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is a superset of
+ * DoFHandler::locally_owned_dofs()
+ * and contains all DoF indices
+ * that live on all locally owned
+ * cells (including on the
+ * interface to ghost
+ * cells). However, it does not
+ * contain the DoF indices that
+ * are exclusively defined on
+ * ghost or artificial cells (see
+ * @ref GlossArtificialCell "the
+ * glossary").
+ *
+ * The degrees of freedom identified by
+ * this function equal those obtained
+ * from the
+ * dof_indices_with_subdomain_association()
+ * function when called with the locally
+ * owned subdomain id.
+ */
+ template <class DH>
+ static void
+ extract_locally_active_dofs (const DH & dof_handler,
+ IndexSet & dof_set);
+
+ /**
+ * Extract the set of global DoF
+ * indices that are active on the
+ * current DoFHandler. For
+ * regular DoFHandlers, these are
+ * all DoF indices, but for
+ * DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * this set is the union of
+ * DoFHandler::locally_owned_dofs()
+ * and the DoF indices on all
+ * ghost cells. In essence, it is
+ * the DoF indices on all cells
+ * that are not artificial (see
+ * @ref GlossArtificialCell "the glossary").
+ */
+ template <class DH>
+ static void
+ extract_locally_relevant_dofs (const DH & dof_handler,
+ IndexSet & dof_set);
+
/**
* Extract a vector that represents the
* constant modes of the DoFHandler for
* consist of as many vectors as there
* are true arguments in
* <tt>component_select</tt>, each of
- * which will be one in one component and
+ * which will be one in one vector component and
* zero in all others. We store this
* object in a vector of vectors, where
* the outer vector is of the size of the
* number of selected components, and
* each inner vector has as many
- * components as there are degrees of
+ * components as there are (locally owned) degrees of
* freedom in the selected
* components. Note that any matrix
* associated with this null space must
* function, or use the
* <tt>GridTools::get_subdomain_association</tt>
* function.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
*/
template <class DH>
static void
get_subdomain_association (const DH &dof_handler,
- std::vector<unsigned int> &subdomain);
+ std::vector<types::subdomain_id_t> &subdomain);
/**
* Count how many degrees of freedom are
* @em cells with this subdomain, use the
* <tt>GridTools::count_cells_with_subdomain_association</tt>
* function.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
*/
template <class DH>
static unsigned int
count_dofs_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain);
-
- /**
- * Similar to the previous
- * function, but do not just
- * return the number of degrees
- * of freedom that are owned by a
- * given subdomain, but return a
- * set of their indices.
- */
- template <class DH>
- static
- IndexSet
- dof_indices_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain);
+ const types::subdomain_id_t subdomain);
/**
* Count how many degrees of freedom are
* therefore store how many degrees of
* freedom of each vector component are
* associated with the given subdomain.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
*/
template <class DH>
static void
count_dofs_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain,
+ const types::subdomain_id_t subdomain,
std::vector<unsigned int> &n_dofs_on_subdomain);
+ /**
+ * Return a set of indices that denotes
+ * the degrees of freedom that live on
+ * the given subdomain, i.e. that are on
+ * cells owned by the current
+ * processor. Note that this includes the
+ * ones that this subdomain "owns"
+ * (i.e. the ones for which
+ * get_subdomain_association() returns a
+ * value equal to the subdomain given
+ * here and that are selected by the
+ * extract_locally_owned() function) but
+ * also all of those that sit on the
+ * boundary between the given subdomain
+ * and other subdomain. In essence,
+ * degrees of freedom that sit on
+ * boundaries between subdomain will be
+ * in the index sets returned by this
+ * function for more than one subdomain.
+ *
+ * Note that this function is of
+ * questionable use for DoFHandler objects built on
+ * parallel::distributed::Triangulation
+ * since in that case ownership of
+ * individual degrees of freedom by MPI
+ * processes is controlled by the DoF
+ * handler object, not based on some
+ * geometric algorithm in conjunction
+ * with subdomain id. In particular, the
+ * degrees of freedom identified by the
+ * functions in this namespace as
+ * associated with a subdomain are not
+ * the same the
+ * DoFHandler class
+ * identifies as those it owns.
+ */
+ template <class DH>
+ static
+ IndexSet
+ dof_indices_with_subdomain_association (const DH &dof_handler,
+ const types::subdomain_id_t subdomain);
+
/**
* Count how many degrees of
* freedom out of the total
* step-31, and
* step-32 tutorial
* programs.
+ *
+ * @pre The dofs_per_block
+ * variable has as many
+ * components as the finite
+ * element used by the
+ * dof_handler argument has
+ * blocks, or alternatively as
+ * many blocks as are enumerated
+ * in the target_blocks argument
+ * if given.
*/
template <int dim, int spacedim>
static void
count_dofs_per_block (const DoFHandler<dim,spacedim>& dof_handler,
std::vector<unsigned int>& dofs_per_block,
- std::vector<unsigned int> target_block
+ std::vector<unsigned int> target_blocks
= std::vector<unsigned int>());
/**
--- /dev/null
+//----------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2006, 2007, 2008, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//----------------------------------------------------------------------
+#ifndef __deal2__number_cache_h
+#define __deal2__number_cache_h
+
+#include <base/config.h>
+#include <base/index_set.h>
+
+#include <vector>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ /**
+ * A structure used by the
+ * DoFHandler classes to store
+ * information about the degrees
+ * of freedom they deals with.
+ */
+ struct NumberCache
+ {
+ NumberCache ();
+
+ /**
+ * Determine an estimate for the
+ * memory consumption (in bytes) of
+ * this object.
+ */
+ unsigned int memory_consumption () const;
+
+ /**
+ * Total number of dofs,
+ * accumulated over all
+ * processors that may
+ * participate on this mesh.
+ */
+ unsigned int n_global_dofs;
+
+ /**
+ * Number of dofs owned by
+ * this MPI process. If this
+ * is a sequential
+ * computation, then this
+ * equals n_global_dofs.
+ */
+ unsigned int n_locally_owned_dofs;
+
+ /**
+ * An index set denoting the
+ * set of locally owned
+ * dofs. If this is a
+ * sequential computation,
+ * then it contains the
+ * entire range
+ * [0,n_global_dofs).
+ */
+ IndexSet locally_owned_dofs;
+
+ /**
+ * The number of dofs owned
+ * by each of the various MPI
+ * processes. If this is a
+ * sequential job, then the
+ * vector contains a single
+ * element equal to
+ * n_global_dofs.
+ */
+ std::vector<unsigned int> n_locally_owned_dofs_per_processor;
+
+ /**
+ * The dofs owned by each of
+ * the various MPI
+ * processes. If this is a
+ * sequential job, then the
+ * vector has a single
+ * element equal to
+ * locally_owned_dofs.
+ */
+ std::vector<IndexSet> locally_owned_dofs_per_processor;
+ };
+ }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // __deal2__dof_iterator_selector_h
*
* @ingroup Iterators
*/
- class Active
+ class Active
{
public:
/**
template <class Iterator>
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter that evaluates to true if
* either the iterator points to an
template <class Iterator>
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter that evaluates to true if
bool operator () (const Iterator &i) const;
};
-
+
/**
* Filter for iterators that
* evaluates to true if either the
*
* @ingroup Iterators
*/
- class LevelEqualTo
+ class LevelEqualTo
{
public:
/**
*
* @ingroup Iterators
*/
- class SubdomainEqualTo
+ class SubdomainEqualTo
{
public:
/**
* shall have to be evaluated
* to true.
*/
- SubdomainEqualTo (const unsigned int subdomain_id);
+ SubdomainEqualTo (const types::subdomain_id_t subdomain_id);
/**
* Evaluation operator. Returns
* Stored value to compare the
* subdomain with.
*/
- const unsigned int subdomain_id;
+ const types::subdomain_id_t subdomain_id;
};
}
* {
* return (static_cast<unsigned int>(c->level()) == level);
* };
- * @endcode
+ * @endcode
* then
* @code
* std::bind2nd (std::ptr_fun(&level_equal_to<active_cell_iterator>), 3)
*
* Finally, classes can be predicates. The following class is one:
* @code
- * class Active
+ * class Active
* {
* public:
* template <class Iterator>
* class SubdomainEqualTo
* {
* public:
- * SubdomainEqualTo (const unsigned int subdomain_id)
+ * SubdomainEqualTo (const types::subdomain_id_t subdomain_id)
* : subdomain_id (subdomain_id) {};
*
* template <class Iterator>
* }
*
* private:
- * const unsigned int subdomain_id;
+ * const types::subdomain_id_t subdomain_id;
* };
* @endcode
* Objects like <code>SubdomainEqualTo(3)</code> can then be used as predicates.
* will automatically be advanced
* to the first cell that has.
*/
- template <typename Predicate>
+ template <typename Predicate>
FilteredIterator (Predicate p,
const BaseIterator &bi);
* Destructor.
*/
~FilteredIterator ();
-
+
/**
* Assignment operator. Copy the
* iterator value of the
*/
FilteredIterator &
set_to_next_positive (const BaseIterator &bi);
-
+
/**
* As above, but search for the
* previous iterator from @p bi
*/
FilteredIterator &
set_to_previous_positive (const BaseIterator &bi);
-
+
/**
* Compare for equality of the
* underlying iterator values of
*
* We do not compare the
* predicates.
- */
+ */
bool operator < (const FilteredIterator &fi) const;
/**
<< "The element " << arg1
<< " with which you want to compare or which you want to"
<< " assign from is invalid since it does not satisfy the predicate.");
-
+
private:
/**
* type of this pointer.
*/
virtual PredicateBase * clone () const;
-
+
private:
/**
* Copy of the predicate.
operator = (const BaseIterator &bi)
{
Assert ((bi.state() != IteratorState::valid) || (*predicate)(bi),
- ExcInvalidElement(bi));
+ ExcInvalidElement(bi));
BaseIterator::operator = (bi);
return *this;
}
while ((this->state() == IteratorState::valid) &&
( ! (*predicate)(*this)))
BaseIterator::operator++ ();
-
+
return *this;
}
while ((this->state() == IteratorState::valid) &&
( ! (*predicate)(*this)))
BaseIterator::operator-- ();
-
+
return *this;
}
operator ++ (int)
{
const FilteredIterator old_state = *this;
-
+
if (this->state() == IteratorState::valid)
do
BaseIterator::operator++ ();
operator -- (int)
{
const FilteredIterator old_state = *this;
-
+
if (this->state() == IteratorState::valid)
do
BaseIterator::operator-- ();
-namespace IteratorFilters
+namespace IteratorFilters
{
// ---------------- IteratorFilters::Active ---------
}
-// ---------------- IteratorFilters::LevelEqualTo ---------
+// ---------------- IteratorFilters::LevelEqualTo ---------
inline
LevelEqualTo::LevelEqualTo (const unsigned int level)
:
// ---------------- IteratorFilters::SubdomainEqualTo ---------
inline
- SubdomainEqualTo::SubdomainEqualTo (const unsigned int subdomain_id)
+ SubdomainEqualTo::SubdomainEqualTo (const types::subdomain_id_t subdomain_id)
:
subdomain_id (subdomain_id)
{}
bool
SubdomainEqualTo::operator () (const Iterator &i) const
{
- return (static_cast<unsigned int>(i->subdomain_id()) == subdomain_id);
+ return (i->subdomain_id() == subdomain_id);
}
}
* Same function, but for 1d.
*/
static
- double diameter (const Triangulation<1> &tria);
+ double diameter (const Triangulation<1> &tria);
/**
* Same function, but for 1d, 2sd.
*/
static
double cell_measure (const std::vector<Point<dim> > &all_vertices,
const unsigned int (&vertex_indices)[GeometryInfo<dim>::vertices_per_cell]);
-
+
/**
* Remove vertices that are not
* referenced by any of the
* by that class, so we have to
* eliminate unused vertices
* beforehand.
- *
+ *
* Not implemented for the
* codimension one case.
*/
void delete_unused_vertices (std::vector<Point<spacedim> > &vertices,
std::vector<CellData<dim> > &cells,
SubCellData &subcelldata);
-
+
/**
* Remove vertices that are duplicated,
* due to the input of a structured grid,
SubCellData &subcelldata,
std::vector<unsigned int> &considered_vertices,
const double tol=1e-12);
-
+
/**
* Transform the vertices of the
* given triangulation by
* the unit cell. In this case,
* even points at a very small
* distance outside the unit cell
- * are allowed.
+ * are allowed.
*
* If a point lies on the
* boundary of two or more cells,
* outside an actual unit cell,
* due to numerical roundoff.
* Therefore, the point returned
- * by this function should
+ * by this function should
* be projected onto the unit cell,
* using GeometryInfo::project_to_unit_cell.
* This is not automatically performed
template <class Container>
static void
get_active_neighbors (const typename Container::active_cell_iterator &cell,
- std::vector<typename Container::active_cell_iterator> &active_neighbors);
+ std::vector<typename Container::active_cell_iterator> &active_neighbors);
/**
* Produce a sparsity pattern in which
static void
get_face_connectivity_of_cells (const Triangulation<dim, spacedim> &triangulation,
SparsityPattern &connectivity);
-
+
/**
* Use the METIS partitioner to generate
* a partitioning of the active cells
partition_triangulation (const unsigned int n_partitions,
const SparsityPattern &cell_connection_graph,
Triangulation<dim,spacedim> &triangulation);
-
+
/**
* For each active cell, return in the
* output array to which subdomain (as
template <int dim, int spacedim>
static void
get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
- std::vector<unsigned int> &subdomain);
+ std::vector<types::subdomain_id_t> &subdomain);
/**
* Count how many cells are uniquely
* associated with the given @p subdomain
* index.
*
- * This function will generate an
- * exception if there are no cells with
- * the given @p subdomain index.
+ * This function may return zero
+ * if there are no cells with the
+ * given @p subdomain index. This
+ * can happen, for example, if
+ * you try to partition a coarse
+ * mesh into more partitions (one
+ * for each processor) than there
+ * are cells in the mesh.
*
* This function returns the number of
* cells associated with one
template <int dim, int spacedim>
static unsigned int
count_cells_with_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
- const unsigned int subdomain);
+ const types::subdomain_id_t subdomain);
/**
* Given two mesh containers
typename Triangulation<dim,spacedim>::DistortedCellList
fix_up_distorted_child_cells (const typename Triangulation<dim,spacedim>::DistortedCellList &distorted_cells,
Triangulation<dim,spacedim> &triangulation);
-
+
/**
* Exception
*/
{
Assert (triangulation.n_levels() == 1,
ExcTriangulationHasBeenRefined());
-
+
std::vector<bool> treated_vertices (triangulation.n_vertices(),
false);
GridTools::get_active_child_cells (const typename DH::cell_iterator& cell)
{
std::vector<typename DH::active_cell_iterator> child_cells;
-
+
if (cell->has_children())
{
for (unsigned int child=0;
}
-
+
#if deal_II_dimension == 1
template <class Container>
{
while (neighbor_child->has_children())
neighbor_child = neighbor_child->child (n==0 ? 1 : 0);
-
+
Assert (neighbor_child->neighbor(n==0 ? 1 : 0)==cell,
ExcInternalError());
}
}
template <int dim, int spacedim> class MGDoFHandler;
+
/*------------------------------------------------------------------------*/
/**
* Reset this triangulation into a
* virgin state by deleting all data.
*
- * Note that this operation is only allowed
- * if no subscriptions to this object exist
- * any more, such as DoFHandler objects
- * using it.
+ * Note that this operation is only
+ * allowed if no subscriptions to this
+ * object exist any more, such as
+ * DoFHandler objects using it.
*/
- void clear ();
+ virtual void clear ();
/**
* Sets the mesh smoothing to @p
* mesh_smoothing. This overrides
* the MeshSmoothing given to the
- * constructor. It is allow to
+ * constructor. It is allowed to
* call this function only if
- * triangulation is empty.
+ * the triangulation is empty.
*/
- void set_mesh_smoothing(const MeshSmoothing mesh_smoothing);
+ virtual void set_mesh_smoothing (const MeshSmoothing mesh_smoothing);
/**
* If @p dim==spacedim, assign a boundary
* RefinementListener. Adding
* listeners to the
* Triangulation allows other
- * classes to be informed, when
+ * classes to be informed when
* the Triangulation is refined.
*/
void add_refinement_listener (RefinementListener &listener) const;
/**
- * Removes a
+ * Remove a
* RefinementListener. When some
* class needs no longer to be
* informed about refinements,
* Iterator to the first active
* cell on level @p level.
*
- * This function calls @p begin_active_line
- * in 1D and @p begin_active_quad in 2D.
+ * This function calls @p
+ * begin_active_line in 1D and @p
+ * begin_active_quad in 2D.
*/
active_cell_iterator begin_active(const unsigned int level = 0) const;
raw_cell_iterator last_raw () const;
/**
- * Return an iterator pointing to the last
- * cell of the level @p level, used or not.
+ * Return an iterator pointing to the
+ * last cell of the level @p level, used
+ * or not.
*
* This function calls @p last_raw_line
* in 1D and @p last_raw_quad in 2D.
raw_cell_iterator last_raw (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * used cell.
+ * Return an iterator pointing to the
+ * last used cell.
*
* This function calls @p last_line
* in 1D and @p last_quad in 2D.
cell_iterator last () const;
/**
- * Return an iterator pointing to the last
- * used cell on level @p level.
+ * Return an iterator pointing to the
+ * last used cell on level @p level.
*
* This function calls @p last_line
* in 1D and @p last_quad in 2D.
cell_iterator last (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * active cell.
+ * Return an iterator pointing to the
+ * last active cell.
*
- * This function calls @p last_active_line
- * in 1D and @p last_active_quad in 2D.
+ * This function calls @p
+ * last_active_line in 1D and @p
+ * last_active_quad in 2D.
*/
active_cell_iterator last_active () const;
/**
- * Return an iterator pointing to the last
- * active cell on level @p level.
+ * Return an iterator pointing to the
+ * last active cell on level @p level.
*
* This function calls @p last_active_line
* in 1D and @p last_active_quad in 2D.
* Iterator to the first active
* face.
*
- * This function calls @p begin_active_line
- * in 2D and @p begin_active_quad in 3D.
+ * This function calls @p
+ * begin_active_line in 2D and @p
+ * begin_active_quad in 3D.
*/
active_face_iterator begin_active_face() const;
raw_face_iterator end_face () const;
/**
- * Return a raw iterator which is past the end.
- * This is the same as <tt>end()</tt> and is
- * only for combatibility with older versions.
+ * Return a raw iterator which is past
+ * the end. This is the same as
+ * <tt>end()</tt> and is only for
+ * combatibility with older versions.
*/
raw_face_iterator end_raw_face () const;
/**
- * Return an active iterator which is past the end.
- * This is the same as <tt>end()</tt> and is
- * only for combatibility with older versions.
+ * Return an active iterator which is
+ * past the end. This is the same as
+ * <tt>end()</tt> and is only for
+ * combatibility with older versions.
*/
active_face_iterator end_active_face () const;
raw_face_iterator last_raw_face () const;
/**
- * Return an iterator pointing to the last
- * used face.
+ * Return an iterator pointing to the
+ * last used face.
*
- * This function calls @p last_line
- * in 2D and @p last_quad in 3D.
+ * This function calls @p last_line in
+ * 2D and @p last_quad in 3D.
*/
face_iterator last_face () const;
/**
- * Return an iterator pointing to the last
- * active face.
+ * Return an iterator pointing to the
+ * last active face.
*
- * This function calls @p last_active_line
- * in 2D and @p last_active_quad in 3D.
+ * This function calls @p
+ * last_active_line in 2D and @p
+ * last_active_quad in 3D.
*/
active_face_iterator last_active_face () const;
*/
/*@{*/
/**
- * Iterator to the first line, used
- * or not, on level @p level. If a level
+ * Iterator to the first line, used or
+ * not, on level @p level. If a level
* has no lines, a past-the-end iterator
* is returned.
* If lines are no cells, i.e. for @p dim>1
line_iterator end_line (const unsigned int level) const;
/**
- * Return a raw iterator which is the first
- * iterator not on level. If @p level is
- * the last level, then this returns
- * <tt>end()</tt>.
+ * Return a raw iterator which is the
+ * first iterator not on level. If @p
+ * level is the last level, then this
+ * returns <tt>end()</tt>.
*/
raw_line_iterator end_raw_line (const unsigned int level) const;
/**
* Return an active iterator which is the
- * first iterator not on level. If @p level
- * is the last level, then this returns
- * <tt>end()</tt>.
+ * first iterator not on level. If @p
+ * level is the last level, then this
+ * returns <tt>end()</tt>.
*/
active_line_iterator end_active_line (const unsigned int level) const;
last_raw_line () const;
/**
- * Return an iterator pointing to the last
- * line of the level @p level, used or not.
+ * Return an iterator pointing to the
+ * last line of the level @p level, used
+ * or not.
*/
raw_line_iterator
last_raw_line (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * used line.
+ * Return an iterator pointing to the
+ * last used line.
*/
line_iterator
last_line () const;
/**
- * Return an iterator pointing to the last
- * used line on level @p level.
+ * Return an iterator pointing to the
+ * last used line on level @p level.
*/
line_iterator
last_line (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * active line.
+ * Return an iterator pointing to the
+ * last active line.
*/
active_line_iterator
last_active_line () const;
/**
- * Return an iterator pointing to the last
- * active line on level @p level.
+ * Return an iterator pointing to the
+ * last active line on level @p level.
*/
active_line_iterator
last_active_line (const unsigned int level) const;
/*@{
*/
/**
- * Iterator to the first quad, used
- * or not, on the given level. If a level
+ * Iterator to the first quad, used or
+ * not, on the given level. If a level
* has no quads, a past-the-end iterator
- * is returned.
- * If quads are no cells, i.e. for $dim>2$
- * no level argument must be given.
+ * is returned. If quads are no cells,
+ * i.e. for $dim>2$ no level argument
+ * must be given.
*/
raw_quad_iterator
quad_iterator end_quad (const unsigned int level) const;
/**
- * Return a raw iterator which is the first
- * iterator not on level. If @p level is
- * the last level, then this returns
- * <tt>end()</tt>.
+ * Return a raw iterator which is the
+ * first iterator not on level. If @p
+ * level is the last level, then this
+ * returns <tt>end()</tt>.
*/
raw_quad_iterator end_raw_quad (const unsigned int level) const;
/**
* Return an active iterator which is the
- * first iterator not on level. If @p level
- * is the last level, then this returns
- * <tt>end()</tt>.
+ * first iterator not on level. If @p
+ * level is the last level, then this
+ * returns <tt>end()</tt>.
*/
active_quad_iterator end_active_quad (const unsigned int level) const;
last_raw_quad () const;
/**
- * Return an iterator pointing to the last
- * quad of the level @p level, used or not.
+ * Return an iterator pointing to the
+ * last quad of the level @p level, used
+ * or not.
*/
raw_quad_iterator
last_raw_quad (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * used quad.
+ * Return an iterator pointing to the
+ * last used quad.
*/
quad_iterator
last_quad () const;
/**
- * Return an iterator pointing to the last
- * used quad on level @p level.
+ * Return an iterator pointing to the
+ * last used quad on level @p level.
*/
quad_iterator
last_quad (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * active quad.
+ * Return an iterator pointing to the
+ * last active quad.
*/
active_quad_iterator
last_active_quad () const;
/**
- * Return an iterator pointing to the last
- * active quad on level @p level.
+ * Return an iterator pointing to the
+ * last active quad on level @p level.
*/
active_quad_iterator
last_active_quad (const unsigned int level) const;
/**
* Return an active iterator which is the
- * first iterator not on level. If @p level
- * is the last level, then this returns
- * <tt>end()</tt>.
+ * first iterator not on level. If @p
+ * level is the last level, then this
+ * returns <tt>end()</tt>.
*/
active_hex_iterator end_active_hex (const unsigned int level) const;
last_raw_hex () const;
/**
- * Return an iterator pointing to the last
- * hex of the level @p level, used or not.
+ * Return an iterator pointing to the
+ * last hex of the level @p level, used
+ * or not.
*/
raw_hex_iterator
last_raw_hex (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * used hex.
+ * Return an iterator pointing to the
+ * last used hex.
*/
hex_iterator
last_hex () const;
/**
- * Return an iterator pointing to the last
- * used hex on level @p level.
+ * Return an iterator pointing to the
+ * last used hex on level @p level.
*/
hex_iterator
last_hex (const unsigned int level) const;
/**
- * Return an iterator pointing to the last
- * active hex.
+ * Return an iterator pointing to the
+ * last active hex.
*/
active_hex_iterator
last_active_hex () const;
/**
- * Return an iterator pointing to the last
- * active hex on level @p level.
+ * Return an iterator pointing to the
+ * last active hex on level @p level.
*/
active_hex_iterator
last_active_hex (const unsigned int level) const;
unsigned int n_raw_hexs (const unsigned int level) const;
/**
- * Return total number of used hexahedra,
- * active or not.
+ * Return total number of used
+ * hexahedra, active or not.
*/
unsigned int n_hexs() const;
/**
- * Return total number of used hexahedra,
- * active or not on level @p level.
+ * Return total number of used
+ * hexahedra, active or not on level @p
+ * level.
*/
unsigned int n_hexs(const unsigned int level) const;
/**
- * Return total number of active hexahedra,
- * active or not.
+ * Return total number of active
+ * hexahedra, active or not.
*/
unsigned int n_active_hexs() const;
/**
- * Return total number of active hexahedra,
- * active or not on level @p level.
+ * Return total number of active
+ * hexahedra, active or not on level @p
+ * level.
*/
unsigned int n_active_hexs(const unsigned int level) const;
/**
* Return total number of used cells,
- * active or not.
- * Maps to <tt>n_lines()</tt> in one space
+ * active or not. Maps to
+ * <tt>n_lines()</tt> in one space
* dimension and so on.
*/
unsigned int n_cells () const;
/**
* Return total number of used cells,
* active or not, on level @p level.
- * Maps to <tt>n_lines(level)</tt> in one space
- * dimension and so on.
+ * Maps to <tt>n_lines(level)</tt> in
+ * one space dimension and so on.
*/
unsigned int n_cells (const unsigned int level) const;
/**
* Return total number of active cells.
- * Maps to <tt>n_active_lines()</tt> in one space
- * dimension and so on.
+ * Maps to <tt>n_active_lines()</tt> in
+ * one space dimension and so on.
*/
unsigned int n_active_cells () const;
/**
- * Return total number of active cells
- * on level @p level.
- * Maps to <tt>n_active_lines(level)</tt> in one
+ * Return total number of active cells on
+ * level @p level. Maps to
+ * <tt>n_active_lines(level)</tt> in one
* space dimension and so on.
*/
unsigned int n_active_cells (const unsigned int level) const;
<< ", but this level is empty.");
/*@}*/
protected:
+ /**
+ * Do some smoothing in the process
+ * of refining the triangulation. See
+ * the general doc of this class for
+ * more information about this.
+ */
+ MeshSmoothing smooth_grid;
/**
* Write a bool vector to the given stream,
std::istream &in);
private:
-
/**
- * The (public) clear() will only
- * work when the triangulation is
- * not subscriped to by other
- * users. The
- * clear_despite_subscriptions()
- * function now allows the
- * triangulation being cleared
- * even when there are
+ * The (public) function clear() will
+ * only work when the triangulation is
+ * not subscribed to by other users. The
+ * clear_despite_subscriptions() function
+ * now allows the triangulation being
+ * cleared even when there are
* subscriptions.
*
* Make sure, you know what you
*/
bool anisotropic_refinement;
- /**
- * Do some smoothing in the process
- * of refining the triangulation. See
- * the general doc of this class for
- * more information about this.
- */
- MeshSmoothing smooth_grid;
-
+
/**
* A flag that determines whether
* we are to check for distorted
*/
type ()
{}
-
+
/**
* Dummy
* constructor. Only
*/
static const unsigned int structure_dimension = structdim;
+ protected:
/**
* Declare the data type that
* this accessor class expects to
* returned.
*/
int parent_index () const;
-
+
/**
* @name Accessing sub-objects
*/
double diameter () const;
- /**
+ /**
* Length of an object in the direction
* of the given axis, specified in the
* local coordinate system. See the
* in the library.
*/
void clear_refinement_case () const;
-
+
/**
* Set the parent of a cell.
*/
/**
* Return the subdomain id of
* this cell.
+ *
+ * See the @ref GlossSubdomainId
+ * "glossary" for more
+ * information. This function
+ * should not be called if you
+ * use a
+ * parallel::distributed::Triangulation
+ * object.
*/
- unsigned int subdomain_id () const;
+ types::subdomain_id_t subdomain_id () const;
/**
* Set the subdomain id of this
* cell.
+ *
+ * See the @ref GlossSubdomainId
+ * "glossary" for more
+ * information. This function
+ * should not be called if you
+ * use a
+ * parallel::distributed::Triangulation
+ * object.
+ */
+ void set_subdomain_id (const types::subdomain_id_t new_subdomain_id) const;
+
+ /**
+ * Set the subdomain id of this
+ * cell and all its children (and
+ * grand-children, and so on) to
+ * the given value.
+ *
+ * See the @ref GlossSubdomainId
+ * "glossary" for more
+ * information. This function
+ * should not be called if you
+ * use a
+ * parallel::distributed::Triangulation
+ * object.
*/
- void set_subdomain_id (const unsigned int new_subdomain_id) const;
-
+ void recursively_set_subdomain_id (const types::subdomain_id_t new_subdomain_id) const;
+
/**
* Return an iterator to the
* parent.
*/
TriaIterator<CellAccessor<dim,spacedim> >
parent () const;
-
+
/**
* @}
*/
* Test whether the cell has children
* (this is the criterion for activity
* of a cell).
+ *
+ * See the @ref GlossActive "glossary"
+ * for more information.
*/
bool active () const;
+ /**
+ * Return whether this cell
+ * exists in the global mesh but
+ * (i) is owned by another
+ * processor, i.e. has a
+ * subdomain_id different from
+ * the one the current processor
+ * owns and (ii) is adjacent to a
+ * cell owned by the current
+ * processor.
+ *
+ * This function only makes sense
+ * if the triangulation used is
+ * of kind
+ * parallel::distributed::Triangulation. In
+ * all other cases, the returned
+ * value is always false.
+ *
+ * See the @ref GlossGhostCell
+ * "glossary" and the @ref
+ * distributed module for more
+ * information.
+ */
+ bool is_ghost () const;
+
+ /**
+ * Return whether this cell is
+ * artificial, i.e. it isn't one
+ * of the cells owned by the
+ * current processor, and it also
+ * doesn't border on one. As a
+ * consequence, it exists in the
+ * mesh to ensure that each
+ * processor has all coarse mesh
+ * cells and that the 2:1 ratio
+ * of neighboring cells is
+ * maintained, but it is not one
+ * of the cells we should work on
+ * on the current processor. In
+ * particular, there is no
+ * guarantee that this cell
+ * isn't, in fact, further
+ * refined on one of the other
+ * processors.
+ *
+ * This function only makes sense
+ * if the triangulation used is
+ * of kind
+ * parallel::distributed::Triangulation. In
+ * all other cases, the returned
+ * value is always false.
+ *
+ * See the @ref
+ * GlossArtificialCell "glossary"
+ * and the @ref distributed
+ * module for more information.
+ */
+ bool is_artificial () const;
+
/**
* Test whether the point @p p
* is inside this cell. Points on
#include <base/config.h>
+#include <base/geometry_info.h>
#include <base/template_constraints.h>
#include <grid/tria.h>
#include <grid/tria_levels.h>
#include <grid/tria_iterator.h>
#include <grid/tria_accessor.h>
#include <grid/tria_iterator.templates.h>
-#include <base/geometry_info.h>
+#include <distributed/tria.h>
#include <cmath>
DEAL_II_NAMESPACE_OPEN
+
/*------------------------ Functions: TriaAccessorBase ---------------------------*/
template <int structdim, int dim, int spacedim>
present_index (index),
tria (tria)
{
-
+
// non-cells have no level, so a 0
// should have been passed, or a -1
// for an end-iterator, or -2 for
present_level = a.present_level;
present_index = a.present_index;
tria = a.tria;
-
+
if (structdim != dim)
{
Assert ((present_level == 0) || (present_level == -1) || (present_level == -2),
present_level = a.present_level;
present_index = a.present_index;
tria = a.tria;
-
+
if (structdim != dim)
{
Assert ((present_level == 0) || (present_level == -1) || (present_level == -2),
if (structdim != dim)
{
- if (this->present_index < 0)
+ if (this->present_index < 0)
this->present_index = -1;
}
else
{
- while (this->present_index < 0)
+ while (this->present_index < 0)
{
// no -> go one level down
--this->present_level;
// lowest level reached?
- if (this->present_level == -1)
+ if (this->present_level == -1)
{
// return with past the end pointer
this->present_level = this->present_index = -1;
{
return &faces->quads;
}
-
+
inline
internal::Triangulation::TriaObjects<internal::Triangulation::TriaObject<1> >*
get_objects (internal::Triangulation::TriaFaces<1>*,
Assert (false, ExcInternalError());
return 0;
}
-
+
inline
internal::Triangulation::TriaObjects<internal::Triangulation::TriaObject<2> >*
get_objects (internal::Triangulation::TriaFaces<2>*,
Assert (false, ExcInternalError());
return 0;
}
-
+
inline
internal::Triangulation::TriaObjects<internal::Triangulation::TriaObject<3> >*
get_objects (internal::Triangulation::TriaFaces<3>*,
Assert (false, ExcInternalError());
return 0;
}
-
+
/**
* This function should never be
* used, but we need it for the
// enclosing namespace
// internal::TriaAccessor
using dealii::TriaAccessor;
-
+
/**
* A class with the same purpose as the similarly named class of the
* Triangulation class. See there for more information.
return accessor.objects().cells[accessor.present_index].face(i);
}
-
+
template <int dim, int spacedim>
static
unsigned int
const unsigned int quad_index=lookup_table[i][0];
const unsigned int std_line_index=lookup_table[i][1];
-
+
const unsigned int line_index=GeometryInfo<dim>::standard_to_real_face_line(
std_line_index,
accessor.face_orientation(quad_index),
return numbers::invalid_unsigned_int;
}
-
+
template <int dim, int spacedim>
static
unsigned int
{
return true;
}
-
+
template <int dim, int spacedim>
static
{
return true;
}
-
+
template <int dim, int spacedim>
static
< accessor.tria->levels[accessor.present_level]
->cells.face_flips.size(),
ExcInternalError());
-
+
return (accessor.tria->levels[accessor.present_level]
->cells.face_flips[accessor.present_index *
GeometryInfo<3>::faces_per_cell
{
return true;
}
-
+
template <int dim, int spacedim>
static
< accessor.tria->levels[accessor.present_level]
->cells.face_rotations.size(),
ExcInternalError());
-
+
return (accessor.tria->levels[accessor.present_level]
->cells.face_rotations[accessor.present_index *
GeometryInfo<3>::faces_per_cell
{
return true;
}
-
+
template <int spacedim>
static
//TODO: why is this face_orientation, not line_orientation as in the setter function?
return accessor.tria->faces->quads.face_orientation(accessor.present_index, line);
}
-
+
template <int dim, int spacedim>
static
const unsigned int quad_index=lookup_table[line][0];
const unsigned int std_line_index=lookup_table[line][1];
-
+
const unsigned int line_index=GeometryInfo<dim>::standard_to_real_face_line(
std_line_index,
accessor.face_orientation(quad_index),
accessor.face_flip(quad_index),
accessor.face_rotation(quad_index));
-
+
// now we got to the correct line and ask
// the quad for its line_orientation. however, if
// the face is rotated, it might be possible,
{ false, false }},
{ { true, false },
{ false, true }}}};
-
-
+
+
return (accessor.quad(quad_index)
->line_orientation(line_index)
== bool_table[std_line_index/2]
{
Assert (false, ExcInternalError());
}
-
+
template <int dim, int spacedim>
static
{
Assert (false, ExcInternalError());
}
-
+
template <int dim, int spacedim>
static
< accessor.tria->levels[accessor.present_level]
->cells.face_flips.size(),
ExcInternalError());
-
+
accessor.tria->levels[accessor.present_level]
->cells.face_flips[accessor.present_index *
GeometryInfo<3>::faces_per_cell
{
Assert (false, ExcInternalError());
}
-
+
template <int dim, int spacedim>
static
< accessor.tria->levels[accessor.present_level]
->cells.face_rotations.size(),
ExcInternalError());
-
+
accessor.tria->levels[accessor.present_level]
->cells.face_rotations[accessor.present_index *
GeometryInfo<3>::faces_per_cell
{
Assert (false, ExcInternalError());
}
-
+
template <int spacedim>
static
+ line]
= value;
}
-
+
template <int dim, int spacedim>
static
//
// second index: vertex index to be switched
// (or not)
-
+
static const unsigned int switch_table[2][2]={{1,0},{0,1}};
-
+
return accessor.line(corner%2)
->vertex_index(switch_table[accessor.line_orientation(corner%2)][corner/2]);
}
template <int structdim, int dim, int spacedim>
inline
bool
-TriaAccessor<structdim,dim,spacedim>::used () const
+TriaAccessor<structdim,dim,spacedim>::used () const
{
Assert (this->state() == IteratorState::valid,
TriaAccessorExceptions::ExcDereferenceInvalidObject());
{
Assert (corner<GeometryInfo<structdim>::vertices_per_cell,
ExcIndexRange(corner,0,GeometryInfo<structdim>::vertices_per_cell));
-
+
return internal::TriaAccessor::Implementation::vertex_index (*this, corner);
}
TriaAccessor<structdim,dim,spacedim>::face_orientation (const unsigned int face) const
{
Assert (used(), TriaAccessorExceptions::ExcCellNotUsed());
-
+
return internal::TriaAccessor::Implementation::face_orientation (*this, face);
}
TriaAccessor<structdim,dim,spacedim>::face_flip (const unsigned int face) const
{
Assert (used(), TriaAccessorExceptions::ExcCellNotUsed());
-
+
return internal::TriaAccessor::Implementation::face_flip (*this, face);
}
const bool value) const
{
Assert (used(), TriaAccessorExceptions::ExcCellNotUsed());
-
+
internal::TriaAccessor::Implementation::set_face_orientation (*this, face, value);
}
const bool value) const
{
Assert (used(), TriaAccessorExceptions::ExcCellNotUsed());
-
+
internal::TriaAccessor::Implementation::set_face_flip (*this, face, value);
}
Assert (has_children(), TriaAccessorExceptions::ExcCellHasNoChildren());
Assert (i<n_children(),
ExcIndexRange (i, 0, n_children()));
-
+
// each set of two children are stored
// consecutively, so we only have to find
// the location of the set of children
Assert (this_refinement_case != RefinementCase<2>::no_refinement,
TriaAccessorExceptions::ExcCellHasNoChildren());
-
+
if (this_refinement_case == RefinementCase<2>::cut_xy)
return child_index(i);
else if ((this_refinement_case == RefinementCase<2>::cut_x)
Assert (this_refinement_case != RefinementCase<2>::no_refinement,
TriaAccessorExceptions::ExcCellHasNoChildren());
-
+
if (this_refinement_case == RefinementCase<2>::cut_xy)
return child(i);
else if ((this_refinement_case == RefinementCase<2>::cut_x)
return child(i/2)->child(i%2);
else
Assert(false,
- ExcMessage("This cell has no grandchildren equivalent to isotropic refinement"));
+ ExcMessage("This cell has no grandchildren equivalent to isotropic refinement"));
}
default:
// each set of two children are stored
// consecutively, so we only have to find
// the location of the set of children
- const unsigned int n_sets_of_two = GeometryInfo<structdim>::max_children_per_cell/2;
+ const unsigned int n_sets_of_two = GeometryInfo<structdim>::max_children_per_cell/2;
Assert ((index==-1) ||
(i==0 && !this->has_children() && (index>=0)) ||
(i>0 && this->has_children() && (index>=0) &&
this->objects().children[n_sets_of_two*this->present_index+i/2] == -1),
TriaAccessorExceptions::ExcCantSetChildren(index));
-
+
this->objects().children[n_sets_of_two*this->present_index+i/2] = index;
}
template <int structdim, int dim, int spacedim>
inline
void
-TriaAccessor<structdim,dim,spacedim>::set_user_flag () const
+TriaAccessor<structdim,dim,spacedim>::set_user_flag () const
{
Assert (this->used(), TriaAccessorExceptions::ExcCellNotUsed());
this->objects().user_flags[this->present_index] = true;
Assert (this->used(), TriaAccessorExceptions::ExcCellNotUsed());
this->objects().user_flags[this->present_index] = false;
}
-
+
template <int structdim, int dim, int spacedim>
template <int dim, int spacedim>
inline
TriaIterator<TriaAccessor<dim-1, dim, spacedim> >
-CellAccessor<dim,spacedim>::face (const unsigned int i) const
+CellAccessor<dim,spacedim>::face (const unsigned int i) const
{
switch (dim)
{
template <int dim, int spacedim>
inline
unsigned int
-CellAccessor<dim,spacedim>::face_index (const unsigned int i) const
+CellAccessor<dim,spacedim>::face_index (const unsigned int i) const
{
switch (dim)
{
case 2:
return this->line_index(i);
-
+
case 3:
return this->quad_index(i);
-
+
default:
return numbers::invalid_unsigned_int;
}
template <int dim, int spacedim>
inline
int
-CellAccessor<dim,spacedim>::neighbor_index (const unsigned int i) const
+CellAccessor<dim,spacedim>::neighbor_index (const unsigned int i) const
{
AssertIndexRange (i,GeometryInfo<dim>::faces_per_cell);
return this->tria->levels[this->present_level]->
Assert (this->used() && this->active(), ExcRefineCellNotActive());
Assert (!coarsen_flag_set(),
ExcCellFlaggedForCoarsening());
-
+
this->tria->levels[this->present_level]->refine_flags[this->present_index] = refinement_case;
}
ExcIndexRange(face_no,0,GeometryInfo<dim>::faces_per_cell));
Assert (face_refinement_case < RefinementCase<dim>::isotropic_refinement+1,
ExcIndexRange(face_refinement_case,0,RefinementCase<dim>::isotropic_refinement+1));
-
+
// the new refinement case is a combination
// of the minimum required one for the given
// face refinement and the already existing
// flagged refinement case
RefinementCase<dim> old_ref_case = refine_flag_set();
- RefinementCase<dim>
+ RefinementCase<dim>
new_ref_case = (old_ref_case
| GeometryInfo<dim>::min_cell_refinement_case_for_face_refinement(face_refinement_case,
face_no,
{
Assert(face(face_no)->child(1)->refinement_case()==RefinementCase<2>::cut_y,
ExcInternalError());
- return internal::SubfaceCase<3>::case_x1y2y;
+ return internal::SubfaceCase<3>::case_x1y2y;
}
else
return internal::SubfaceCase<3>::case_x1y;
{
Assert(face(face_no)->child(1)->refinement_case()==RefinementCase<2>::cut_x,
ExcInternalError());
- return internal::SubfaceCase<3>::case_y1x2x;
+ return internal::SubfaceCase<3>::case_y1x2x;
}
else
return internal::SubfaceCase<3>::case_y1x;
{
Assert (this->used() && this->active(), ExcRefineCellNotActive());
Assert (!refine_flag_set(), ExcCellFlaggedForRefinement());
-
+
this->tria->levels[this->present_level]->coarsen_flags[this->present_index] = true;
}
+template <int dim, int spacedim>
+inline
+bool
+CellAccessor<dim,spacedim>::is_ghost () const
+{
+#ifndef DEAL_II_USE_P4EST
+ return false;
+#else
+ const types::subdomain_id_t subdomain = this->subdomain_id();
+ if (subdomain == types::artificial_subdomain_id)
+ return false;
+
+ const parallel::distributed::Triangulation<dim,spacedim> *pdt
+ = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
+
+ if (pdt == 0)
+ return false;
+ else
+ return (subdomain != pdt->locally_owned_subdomain());
+#endif
+}
+
+
+
+template <int dim, int spacedim>
+inline
+bool
+CellAccessor<dim,spacedim>::is_artificial () const
+{
+#ifndef DEAL_II_USE_P4EST
+ return false;
+#else
+ return (this->subdomain_id() == types::artificial_subdomain_id);
+#endif
+}
+
+
+
template <int dim, int spacedim>
inline
unsigned int
// return this value as the
// neighbor is not coarser
return n2;
- else
+ else
// the neighbor is coarser
return neighbor_of_coarser_neighbor(neighbor).first;
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2010 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* coarsened.
*/
std::vector<bool> coarsen_flags;
-
+
/**
* Levels and indices of the neighbors
* of the cells. Convention is, that the
* cells with a given subdomain
* number.
*/
- std::vector<unsigned int> subdomain_ids;
-
+ std::vector<types::subdomain_id_t> subdomain_ids;
+
/**
* One integer for every consecutive
* pair of cells to store which
* index their parent has.
*/
std::vector<int> parents;
-
+
/**
* Reserve enough space to accomodate
* @p total_cells cells on this level.
* of this object.
*/
unsigned int memory_consumption () const;
-
+
/**
* The object containing the data on lines and
* related functions
<< "The containers have sizes " << arg1 << " and "
<< arg2 << ", which is not as expected.");
};
-
+
//TODO: Replace TriaObjectsHex to avoid this specialization
-
+
/**
* Specialization of TriaLevels for 3D. Since we need TriaObjectsHex
* instead of TriaObjects. Refer to the documentation of the template
std::vector<bool> coarsen_flags;
std::vector<std::pair<int,int> > neighbors;
std::vector<int> parents;
- std::vector<unsigned int> subdomain_ids;
+ std::vector<types::subdomain_id_t> subdomain_ids;
void reserve_space (const unsigned int total_cells,
const unsigned int dimension);
void monitor_memory (const unsigned int true_dimension) const;
<< "The containers have sizes " << arg1 << " and "
<< arg2 << ", which is not as expected.");
};
-
-
+
+
}
}
#include <base/smartpointer.h>
#include <dofs/function_map.h>
#include <dofs/dof_iterator_selector.h>
+#include <dofs/number_cache.h>
#include <hp/fe_collection.h>
#include <vector>
/*---------------------------------------*/
- /**
- * Return number of degrees of freedom.
- * Included in this number are those
- * DoFs which are constrained by
- * hanging nodes.
- */
+ /**
+ * Return the global number of
+ * degrees of freedom. If the
+ * current object handles all
+ * degrees of freedom itself
+ * (even if you may intend to
+ * solve your linear system in
+ * parallel, such as in step-17
+ * or step-18), then this number
+ * equals the number of locally
+ * owned degrees of freedom since
+ * this object doesn't know
+ * anything about what you want
+ * to do with it and believes
+ * that it owns every degree of
+ * freedom it knows about.
+ *
+ * On the other hand, if this
+ * object operates on a
+ * parallel::distributed::Triangulation
+ * object, then this function
+ * returns the global number of
+ * degrees of freedom,
+ * accumulated over all
+ * processors.
+ *
+ * In either case, included in
+ * the returned number are those
+ * DoFs which are constrained by
+ * hanging nodes, see @ref constraints.
+ */
unsigned int n_dofs () const;
/**
unsigned int
n_boundary_dofs (const std::set<unsigned char> &boundary_indicators) const;
+ /**
+ * Return the number of
+ * degrees of freedom that
+ * belong to this
+ * process.
+ *
+ * If this is a sequential job,
+ * then the result equals that
+ * produced by n_dofs(). On the
+ * other hand, if we are
+ * operating on a
+ * parallel::distributed::Triangulation,
+ * then it includes only the
+ * degrees of freedom that the
+ * current processor owns. Note
+ * that in this case this does
+ * not include all degrees of
+ * freedom that have been
+ * distributed on the current
+ * processor's image of the mesh:
+ * in particular, some of the
+ * degrees of freedom on the
+ * interface between the cells
+ * owned by this processor and
+ * cells owned by other
+ * processors may be theirs, and
+ * degrees of freedom on ghost
+ * cells are also not necessarily
+ * included.
+ */
+ unsigned int n_locally_owned_dofs() const;
+
+ /**
+ * Return an IndexSet describing
+ * the set of locally owned DoFs
+ * as a subset of
+ * 0..n_dofs(). The number of
+ * elements of this set equals
+ * n_locally_owned_dofs().
+ */
+ const IndexSet & locally_owned_dofs() const;
+
+
+ /**
+ * Returns a vector that
+ * stores the locally owned
+ * DoFs of each processor. If
+ * you are only interested in
+ * the number of elements
+ * each processor owns then
+ * n_dofs_per_processor() is
+ * a better choice.
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element that equals the
+ * IndexSet representing the
+ * entire range [0,n_dofs()].
+ */
+ const std::vector<IndexSet> &
+ locally_owned_dofs_per_processor () const;
+
+ /**
+ * Return a vector that
+ * stores the number of
+ * degrees of freedom each
+ * processor that
+ * participates in this
+ * triangulation owns
+ * locally. The sum of all
+ * these numbers equals the
+ * number of degrees of
+ * freedom that exist
+ * globally, i.e. what
+ * n_dofs() returns.
+ *
+ * Each element of the vector
+ * returned by this function
+ * equals the number of
+ * elements of the
+ * corresponding sets
+ * returned by
+ * global_dof_indices().
+ *
+ * If this is a sequential job,
+ * then the vector has a single
+ * element equal to n_dofs().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_dofs_per_processor () const;
+
/**
* Return a constant reference to
* the set of finite element
*/
internal::hp::DoFFaces<dim> * faces;
-
- /**
- * Store the number of dofs
- * created last time.
- */
- unsigned int used_dofs;
+ /**
+ * A structure that contains all
+ * sorts of numbers that
+ * characterize the degrees of
+ * freedom this object works on.
+ *
+ * For most members of this
+ * structure, there is an
+ * accessor function in this
+ * class that returns its value.
+ */
+ internal::DoFHandler::NumberCache number_cache;
/**
* Array to store the indices
/* ----------------------- Inline functions ---------------------------------- */
- template<int dim, int spacedim>
+ template <int dim, int spacedim>
inline
unsigned int
DoFHandler<dim,spacedim>::n_dofs () const
{
- Assert (finite_elements != 0,
- ExcMessage ("No finite element collection is associated with "
- "this DoFHandler"));
- return used_dofs;
+ return number_cache.n_global_dofs;
+ }
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ DoFHandler<dim, spacedim>::n_locally_owned_dofs() const
+ {
+ return number_cache.n_locally_owned_dofs;
+ }
+
+
+ template <int dim, int spacedim>
+ const IndexSet &
+ DoFHandler<dim, spacedim>::locally_owned_dofs() const
+ {
+ return number_cache.locally_owned_dofs;
+ }
+
+
+ template <int dim, int spacedim>
+ const std::vector<unsigned int> &
+ DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
+ {
+ return number_cache.n_locally_owned_dofs_per_processor;
+ }
+
+
+ template <int dim, int spacedim>
+ const std::vector<IndexSet> &
+ DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor () const
+ {
+ return number_cache.locally_owned_dofs_per_processor;
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
const unsigned int n_q_points,
const std::vector<unsigned int> &n_postprocessor_outputs,
const FE &finite_elements);
-
+
const unsigned int n_components;
const unsigned int n_datasets;
const unsigned int n_subdivisions;
std::vector<std::vector<Tensor<2,spacedim> > > patch_hessians_system;
std::vector<std::vector<dealii::Vector<double> > > postprocessed_values;
- const dealii::hp::FECollection<dim,spacedim> fe_collection;
+ const dealii::hp::FECollection<dim,spacedim> fe_collection;
};
* <tt>if () ... else ...</tt> clauses yourself), and also functions
* and classes offering ways to control the appearance of the output
* by setting flags for each output format.
- *
+ *
*
* <h3>Information for derived classes</h3>
*
* associated to degrees of freedom
*/
type_dof_data,
-
+
/**
* Data vector entries are one per
* grid cell
*/
type_cell_data,
-
+
/**
* Find out automatically
*/
type_automatic
};
-
+
/**
* Constructor
*/
template <class DH2>
void merge_patches (const DataOut_DoFData<DH2,patch_dim,patch_space_dim> &source,
const Point<patch_space_dim> &shift = Point<patch_space_dim>());
-
+
/**
* Release the pointers to the
* data vectors and the DoF
<< "components. However, the vector component at\n"
<< "position " << arg1 << " with name <" << arg2
<< "> does not satisfy these conditions.");
-
+
protected:
/**
* For each vector that has been added
* be aquired from the postprocessor.
*/
DataEntryBase (const DataPostprocessor<DH::space_dimension> *data_postprocessor);
-
+
/**
* Destructor made virtual.
*/
virtual ~DataEntryBase ();
-
+
/**
* Assuming that the stored vector is
* a cell vector, extract the given
void
get_function_values (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<double> &patch_values) const = 0;
-
+
/**
* Given a FEValuesBase object,
* extract the values on the present
void
get_function_gradients (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<Tensor<1,DH::space_dimension> > &patch_gradients) const = 0;
-
+
/**
* Given a FEValuesBase object,
* extract the gradients on the present
void
get_function_hessians (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<Tensor<2,DH::space_dimension> > &patch_hessians) const = 0;
-
+
/**
* Given a FEValuesBase object, extract
* the second derivatives on the
* vectors.
*/
virtual void clear () = 0;
-
+
/**
* Determine an estimate for
* the memory consumption (in
*/
const std::vector<DataComponentInterpretation::DataComponentInterpretation>
data_component_interpretation;
-
+
/**
* Pointer to a DataPostprocessing
* object which shall be applied to
void
get_function_values (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<double> &patch_values) const;
-
+
/**
* Given a FEValuesBase object,
* extract the values on the present
void
get_function_gradients (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<Tensor<1,DH::space_dimension> > &patch_gradients) const;
-
+
/**
* Given a FEValuesBase object,
* extract the gradients on the present
void
get_function_hessians (const FEValuesBase<DH::dimension,DH::space_dimension> &fe_patch_values,
std::vector<Tensor<2,DH::space_dimension> > &patch_hessians) const;
-
+
/**
* Given a FEValuesBase object, extract
* the second derivatives on the
* vectors.
*/
virtual void clear ();
-
+
/**
* Determine an estimate for
* the memory consumption (in
* classes.
*/
std::vector<Patch> patches;
-
+
/**
* Function by which the base
* class's functions get to know
virtual
std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> >
get_vector_data_ranges () const;
-
+
/**
* Make all template siblings
* friends. Needed for the
*
* The only thing this class offers is the function build_patches() which
* loops over all cells of the triangulation stored by the
- * attach_dof_handler() function of the base class and convert the data on
- * these to actual patches which are the objects that are later output by the
- * functions of the base classes. You can give a parameter to the function
- * which determines how many subdivisions in each coordinate direction are to
- * be performed, i.e. of how many subcells each patch shall consist. Default
- * is one, but you may want to choose a higher number for higher order
- * elements, so as two for quadratic elements, three for cubic elements three,
- * and so on. The purpose of this parameter is because most graphics programs
- * do not allow to specify higher order shape functions in the file formats:
- * only data at vertices can be plotted and is then shown as a bilinear
- * interpolation within the interior of cells. This may be insufficient if you
- * have higher order finite elements, and the only way to achieve better
- * output is to subdivide each cell of the mesh into several cells for
- * graphical output.
+ * attach_dof_handler() function of the base class (with the exception of
+ * cells of parallel::distributed::Triangulation objects that are not owned by
+ * the current processor) and converts the data on these to actual patches
+ * which are the objects that are later output by the functions of the base
+ * classes. You can give a parameter to the function which determines how many
+ * subdivisions in each coordinate direction are to be performed, i.e. of how
+ * many subcells each patch shall consist. Default is one, but you may want to
+ * choose a higher number for higher order elements, so as two for quadratic
+ * elements, three for cubic elements three, and so on. The purpose of this
+ * parameter is because most graphics programs do not allow to specify higher
+ * order shape functions in the file formats: only data at vertices can be
+ * plotted and is then shown as a bilinear interpolation within the interior
+ * of cells. This may be insufficient if you have higher order finite
+ * elements, and the only way to achieve better output is to subdivide each
+ * cell of the mesh into several cells for graphical output.
*
* Note that after having called build_patches() once, you can call one or
* more of the write() functions of DataOutInterface. You can therefore
* cannot be interpolated to a coarser cell. If you do have cell data and use
* this pair of functions and they return a non-active cell, then an exception
* will be thrown.
- *
+ *
* @ingroup output
* @author Wolfgang Bangerth, 1999
*/
template <int dim, class DH=DoFHandler<dim> >
-class DataOut : public DataOut_DoFData<DH, DH::dimension, DH::space_dimension>
+class DataOut : public DataOut_DoFData<DH, DH::dimension, DH::space_dimension>
{
public:
/**
curved_boundary,
curved_inner_cells
};
-
+
/**
* This is the central function
* of this class since it builds
virtual void build_patches (const Mapping<DH::dimension,DH::space_dimension> &mapping,
const unsigned int n_subdivisions = 0,
const CurvedCellRegion curved_region = curved_boundary);
-
+
/**
* Return the first cell which we
* want output for. The default
* cells in a derived class.
*/
virtual cell_iterator first_cell ();
-
+
/**
* Return the next cell after
* @p cell which we want output
int,
<< "The number of subdivisions per patch, " << arg1
<< ", is not valid.");
-
+
private:
+
+ /**
+ * Return the first cell produced
+ * by the
+ * first_cell()/next_cell()
+ * function pair that is locally
+ * owned. If this object operates
+ * on a non-distributed
+ * triangulation, the result
+ * equals what first_cell()
+ * returns.
+ */
+ cell_iterator first_locally_owned_cell ();
+
+ /**
+ * Return the next cell produced
+ * by the next_cell() function
+ * that is locally owned. If this
+ * object operates on a
+ * non-distributed triangulation,
+ * the result equals what
+ * first_cell() returns.
+ */
+ cell_iterator next_locally_owned_cell (const cell_iterator &cell);
+
/**
* Build one patch. This function
* is called in a WorkStream
* context.
+ *
+ * The result is written into the patch
+ * variable.
*/
void build_one_patch (const std::pair<cell_iterator, unsigned int> *cell_and_index,
internal::DataOut::ParallelData<DH::dimension, DH::space_dimension> &data,
ExcMessage ("Both sources need to declare the same components "
"as vectors."));
}
-
+
// merge patches. store old number
// of elements, since we need to
// adjust patch numbers, etc
for (unsigned int i=old_n_patches; i<patches.size(); ++i)
for (unsigned int v=0; v<GeometryInfo<patch_dim>::vertices_per_cell; ++v)
patches[i].vertices[v] += shift;
-
+
// adjust patch numbers
for (unsigned int i=old_n_patches; i<patches.size(); ++i)
patches[i].patch_index += old_n_patches;
-
+
// adjust patch neighbors
for (unsigned int i=old_n_patches; i<patches.size(); ++i)
for (unsigned int n=0; n<GeometryInfo<patch_dim>::faces_per_cell; ++n)
// $Id$
// Version: $Name$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* since accuracy is not so important here, and since this can save rather
* a lot of memory, when using many cells.
*
- *
+ *
* <h3>Implementation</h3>
*
* In principle, the implementation of the error estimation is simple: let
* $h$ is taken to be the greatest length of the diagonals of the cell. For
* more or less uniform cells without deformed angles, this coincides with
* the diameter of the cell.
- *
+ *
*
* <h3>Vector-valued functions</h3>
*
*
*
* <h3>%Boundary values</h3>
- *
+ *
* If the face is at the boundary, i.e. there is no neighboring cell to which
* the jump in the gradiend could be computed, there are two possibilities:
* <ul>
* Thanks go to Franz-Theo Suttmeier for clarifications about boundary
* conditions.
*
- *
+ *
* <h3>Handling of hanging nodes</h3>
- *
+ *
* The integration along faces with hanging nodes is quite tricky, since one
* of the elements has to be shifted one level up or down. See the
* documentation for the FESubFaceValues class for more information about
* afterwards. This is in particular true
* for the case where meshes are very
* large and computing indicators for @em
- * every cells is too expensive, while
+ * every cell is too expensive, while
* computing indicators for only local
* cells is acceptable. Note that if you
* only ask for the indicators of a
* parameter is retained for
* compatibility with old versions of the
* library.
+ *
+ * @note If the DoFHandler object
+ * given as an argument to this
+ * function builds on a
+ * parallel::distributed::Triangulation,
+ * this function skips
+ * computations on all cells that
+ * are not locally owned. In that
+ * case, the only valid value for
+ * the subdomain_id argument
+ * (besides the invalid value) is
+ * the subdomain id that is
+ * associated with the currently
+ * processor, as reported by
+ * parallel::distributed::Triangulation::locally_owned_subdomain(). Even
+ * though nothing is computed on
+ * cells that we don't locally
+ * own, the error indicator
+ * vector must still have a
+ * length equal to the number of
+ * active cell in the mesh as
+ * reported by
+ * parallel::distributed::Triangulation::n_locally_owned_active_cells().
*/
template <typename InputVector, class DH>
static void estimate (const Mapping<dim, spacedim> &mapping,
Vector<float> &error,
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
- const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const unsigned int n_threads = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
-
+
/**
* Calls the @p estimate
* function, see above, with
* <tt>mapping=MappingQ1@<dim@>()</tt>.
- */
+ */
template <typename InputVector, class DH>
static void estimate (const DH &dof,
const Quadrature<dim-1> &quadrature,
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
-
+
/**
* Same function as above, but
* accepts more than one solution
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
/**
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<dim> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
-
-
+
+
/**
* Exception
*/
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
/**
* Calls the @p estimate
* function, see above, with
* <tt>mapping=MappingQ1<1>()</tt>.
- */
+ */
template <typename InputVector, class DH>
static void estimate (const DH &dof,
const Quadrature<0> &quadrature,
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
-
+
/**
* Same function as above, but
* accepts more than one solution
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
/**
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
const std::vector<bool> &component_mask = std::vector<bool>(),
const Function<1> *coefficients = 0,
const unsigned int n_threads = multithread_info.n_default_threads,
- const unsigned int subdomain_id = numbers::invalid_unsigned_int,
+ const types::subdomain_id_t subdomain_id = types::invalid_subdomain_id,
const unsigned int material_id = numbers::invalid_unsigned_int);
-
+
/**
* Exception
*/
fe, support_quadrature, update_quadrature_points);
for (; cell!=endc; ++cell)
+ if (!cell->is_artificial() && !cell->is_ghost())
{
const unsigned int fe_index = cell->active_fe_index();
const std::vector<bool> &component_mask_)
{
//ConstraintMatrix boundary_constraints();
- // interpolate_boundary_values (dof, boundary_component, boundary_function,
+ // interpolate_boundary_values (dof, boundary_component, boundary_function,
// boundary_constraints, component_mask);
-
+
const unsigned int dim=DH::dimension;
const unsigned int spacedim=DH::space_dimension;
typename DH::active_cell_iterator cell = dof.begin_active(),
endc = dof.end();
for (; cell!=endc; ++cell)
- for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
- ++face_no)
- {
- const FiniteElement<dim,DH::space_dimension> &fe = cell->get_fe();
-
- // we can presently deal only with
- // primitive elements for boundary
- // values. this does not preclude
- // us using non-primitive elements
- // in components that we aren't
- // interested in, however. make
- // sure that all shape functions
- // that are non-zero for the
- // components we are interested in,
- // are in fact primitive
- for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
- {
- const std::vector<bool> &nonzero_component_array
- = cell->get_fe().get_nonzero_components (i);
- for (unsigned int c=0; c<n_components; ++c)
- if ((nonzero_component_array[c] == true)
- &&
- (component_mask[c] == true))
- Assert (cell->get_fe().is_primitive (i),
- ExcMessage ("This function can only deal with requested boundary "
- "values that correspond to primitive (scalar) base "
- "elements"));
- }
+ if (!cell->is_artificial())
+ for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
+ ++face_no)
+ {
+ const FiniteElement<dim,DH::space_dimension> &fe = cell->get_fe();
+
+ // we can presently deal only with
+ // primitive elements for boundary
+ // values. this does not preclude
+ // us using non-primitive elements
+ // in components that we aren't
+ // interested in, however. make
+ // sure that all shape functions
+ // that are non-zero for the
+ // components we are interested in,
+ // are in fact primitive
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
+ {
+ const std::vector<bool> &nonzero_component_array
+ = cell->get_fe().get_nonzero_components (i);
+ for (unsigned int c=0; c<n_components; ++c)
+ if ((nonzero_component_array[c] == true)
+ &&
+ (component_mask[c] == true))
+ Assert (cell->get_fe().is_primitive (i),
+ ExcMessage ("This function can only deal with requested boundary "
+ "values that correspond to primitive (scalar) base "
+ "elements"));
+ }
- typename DH::face_iterator face = cell->face(face_no);
- const unsigned char boundary_component = face->boundary_indicator();
- if (function_map.find(boundary_component) != function_map.end())
- {
- // face is of the right component
- x_fe_values.reinit(cell, face_no);
- const FEFaceValues<dim> &fe_values = x_fe_values.get_present_fe_values();
-
- // get indices, physical location and
- // boundary values of dofs on this
- // face
- face_dofs.resize (fe.dofs_per_face);
- face->get_dof_indices (face_dofs, cell->active_fe_index());
- const std::vector<Point<DH::space_dimension> > &dof_locations
- = fe_values.get_quadrature_points ();
+ typename DH::face_iterator face = cell->face(face_no);
+ const unsigned char boundary_component = face->boundary_indicator();
+ if (function_map.find(boundary_component) != function_map.end())
+ {
+ // face is of the right component
+ x_fe_values.reinit(cell, face_no);
+ const FEFaceValues<dim> &fe_values = x_fe_values.get_present_fe_values();
- if (fe_is_system)
- {
- // resize
- // array. avoid
- // construction of a
- // memory allocating
- // temporary if
- // possible
- if (dof_values_system.size() < fe.dofs_per_face)
- dof_values_system.resize (fe.dofs_per_face,
- Vector<double>(fe.n_components()));
- else
- dof_values_system.resize (fe.dofs_per_face);
-
- function_map.find(boundary_component)->second
- ->vector_value_list (dof_locations, dof_values_system);
-
- // enter those dofs
- // into the list that
- // match the
- // component
- // signature. avoid
- // the usual
- // complication that
- // we can't just use
- // *_system_to_component_index
- // for non-primitive
- // FEs
- for (unsigned int i=0; i<face_dofs.size(); ++i)
- {
- unsigned int component;
- if (fe.is_primitive())
- component = fe.face_system_to_component_index(i).first;
- else
- {
- // non-primitive
- // case. make
- // sure that
- // this
- // particular
- // shape
- // function
- // _is_
- // primitive,
- // and get at
- // it's
- // component. use
- // usual
- // trick to
- // transfer
- // face dof
- // index to
- // cell dof
- // index
- const unsigned int cell_i
- = (dim == 1 ?
- i
- :
- (dim == 2 ?
- (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
- :
- (dim == 3 ?
- (i<4*fe.dofs_per_vertex ?
- i
- :
- (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
- i+4*fe.dofs_per_vertex
- :
- i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
- :
- numbers::invalid_unsigned_int)));
- Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
-
- // make sure
- // that if
- // this is
- // not a
- // primitive
- // shape function,
- // then all
- // the
- // corresponding
- // components
- // in the
- // mask are
- // not set
- if (!fe.is_primitive(cell_i))
- for (unsigned int c=0; c<n_components; ++c)
- if (fe.get_nonzero_components(cell_i)[c])
- Assert (component_mask[c] == false,
- FETools::ExcFENotPrimitive());
-
- // let's pick
- // the first
- // of
- // possibly
- // more than
- // one
- // non-zero
- // components. if
- // shape
- // function
- // is
- // non-primitive,
- // then we
- // will
- // ignore the
- // result in
- // the
- // following
- // anyway,
- // otherwise
- // there's
- // only one
- // non-zero
- // component
- // which we
- // will use
- component = (std::find (fe.get_nonzero_components(cell_i).begin(),
- fe.get_nonzero_components(cell_i).end(),
- true)
- -
- fe.get_nonzero_components(cell_i).begin());
- }
+ // get indices, physical location and
+ // boundary values of dofs on this
+ // face
+ face_dofs.resize (fe.dofs_per_face);
+ face->get_dof_indices (face_dofs, cell->active_fe_index());
+ const std::vector<Point<DH::space_dimension> > &dof_locations
+ = fe_values.get_quadrature_points ();
- if (component_mask[component] == true)
- boundary_values[face_dofs[i]] = dof_values_system[i](component);
- }
- }
- else
- // fe has only one component,
- // so save some computations
- {
- // get only the one component that
- // this function has
- dof_values_scalar.resize (fe.dofs_per_face);
- function_map.find(boundary_component)->second
- ->value_list (dof_locations, dof_values_scalar, 0);
+ if (fe_is_system)
+ {
+ // resize
+ // array. avoid
+ // construction of a
+ // memory allocating
+ // temporary if
+ // possible
+ if (dof_values_system.size() < fe.dofs_per_face)
+ dof_values_system.resize (fe.dofs_per_face,
+ Vector<double>(fe.n_components()));
+ else
+ dof_values_system.resize (fe.dofs_per_face);
+
+ function_map.find(boundary_component)->second
+ ->vector_value_list (dof_locations, dof_values_system);
+
+ // enter those dofs
+ // into the list that
+ // match the
+ // component
+ // signature. avoid
+ // the usual
+ // complication that
+ // we can't just use
+ // *_system_to_component_index
+ // for non-primitive
+ // FEs
+ for (unsigned int i=0; i<face_dofs.size(); ++i)
+ {
+ unsigned int component;
+ if (fe.is_primitive())
+ component = fe.face_system_to_component_index(i).first;
+ else
+ {
+ // non-primitive
+ // case. make
+ // sure that
+ // this
+ // particular
+ // shape
+ // function
+ // _is_
+ // primitive,
+ // and get at
+ // it's
+ // component. use
+ // usual
+ // trick to
+ // transfer
+ // face dof
+ // index to
+ // cell dof
+ // index
+ const unsigned int cell_i
+ = (dim == 1 ?
+ i
+ :
+ (dim == 2 ?
+ (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
+ :
+ (dim == 3 ?
+ (i<4*fe.dofs_per_vertex ?
+ i
+ :
+ (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
+ i+4*fe.dofs_per_vertex
+ :
+ i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
+ :
+ numbers::invalid_unsigned_int)));
+ Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
+
+ // make sure
+ // that if
+ // this is
+ // not a
+ // primitive
+ // shape function,
+ // then all
+ // the
+ // corresponding
+ // components
+ // in the
+ // mask are
+ // not set
+ if (!fe.is_primitive(cell_i))
+ for (unsigned int c=0; c<n_components; ++c)
+ if (fe.get_nonzero_components(cell_i)[c])
+ Assert (component_mask[c] == false,
+ FETools::ExcFENotPrimitive());
+
+ // let's pick
+ // the first
+ // of
+ // possibly
+ // more than
+ // one
+ // non-zero
+ // components. if
+ // shape
+ // function
+ // is
+ // non-primitive,
+ // then we
+ // will
+ // ignore the
+ // result in
+ // the
+ // following
+ // anyway,
+ // otherwise
+ // there's
+ // only one
+ // non-zero
+ // component
+ // which we
+ // will use
+ component = (std::find (fe.get_nonzero_components(cell_i).begin(),
+ fe.get_nonzero_components(cell_i).end(),
+ true)
+ -
+ fe.get_nonzero_components(cell_i).begin());
+ }
+
+ if (component_mask[component] == true)
+ boundary_values[face_dofs[i]] = dof_values_system[i](component);
+ }
+ }
+ else
+ // fe has only one component,
+ // so save some computations
+ {
+ // get only the one component that
+ // this function has
+ dof_values_scalar.resize (fe.dofs_per_face);
+ function_map.find(boundary_component)->second
+ ->value_list (dof_locations, dof_values_scalar, 0);
- // enter into list
+ // enter into list
- for (unsigned int i=0; i<face_dofs.size(); ++i)
- boundary_values[face_dofs[i]] = dof_values_scalar[i];
- }
- }
- }
+ for (unsigned int i=0; i<face_dofs.size(); ++i)
+ boundary_values[face_dofs[i]] = dof_values_scalar[i];
+ }
+ }
+ }
}
std::map<unsigned int,double> boundary_values;
interpolate_boundary_values (mapping, dof, function_map,
boundary_values, component_mask_);
- std::map<unsigned int,double>::const_iterator boundary_value =
+ std::map<unsigned int,double>::const_iterator boundary_value =
boundary_values.begin();
for ( ; boundary_value !=boundary_values.end(); ++boundary_value)
{
- if (!constraints.is_constrained(boundary_value->first))
+ if (constraints.can_store_line (boundary_value->first)
+ &&
+ !constraints.is_constrained(boundary_value->first))
{
constraints.add_line (boundary_value->first);
constraints.set_inhomogeneity (boundary_value->first,
std::map<unsigned int,double> boundary_values;
project_boundary_values (mapping, dof, boundary_functions, q,
boundary_values, component_mapping);
- std::map<unsigned int,double>::const_iterator boundary_value =
+ std::map<unsigned int,double>::const_iterator boundary_value =
boundary_values.begin();
for ( ; boundary_value !=boundary_values.end(); ++boundary_value)
{
const Tensor<1,dim> &constraining_vector,
ConstraintMatrix &constraints)
{
-
+
// choose the DoF that has the
// largest component in the
// constraining_vector as the
{
if (std::fabs(constraining_vector[0]) > std::fabs(constraining_vector[1]))
{
- if (!constraints.is_constrained(dof_indices.dof_indices[0]))
+ if (!constraints.is_constrained(dof_indices.dof_indices[0])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[0]))
{
constraints.add_line (dof_indices.dof_indices[0]);
}
else
{
- if (!constraints.is_constrained(dof_indices.dof_indices[1]))
+ if (!constraints.is_constrained(dof_indices.dof_indices[1])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[1]))
{
constraints.add_line (dof_indices.dof_indices[1]);
&&
(std::fabs(constraining_vector[0]) >= std::fabs(constraining_vector[2])))
{
- if (!constraints.is_constrained(dof_indices.dof_indices[0]))
+ if (!constraints.is_constrained(dof_indices.dof_indices[0])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[0]))
{
constraints.add_line (dof_indices.dof_indices[0]);
&&
(std::fabs(constraining_vector[1]) >= std::fabs(constraining_vector[2])))
{
- if (!constraints.is_constrained(dof_indices.dof_indices[1]))
+ if (!constraints.is_constrained(dof_indices.dof_indices[1])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[1]))
{
constraints.add_line (dof_indices.dof_indices[1]);
}
else
{
- if (!constraints.is_constrained(dof_indices.dof_indices[2]))
+ if (!constraints.is_constrained(dof_indices.dof_indices[2])
+ &&
+ constraints.can_store_line(dof_indices.dof_indices[2]))
{
constraints.add_line (dof_indices.dof_indices[2]);
namespace internals {
namespace VectorTools {
-
+
// This function computes the
// projection of the boundary
// function on edges for 3D.
std::vector<double>& dof_values)
{
const unsigned int dim = 3;
-
+
hp_fe_values.reinit
(cell,
(cell->active_fe_index () * GeometryInfo<dim>::faces_per_cell + face)
* GeometryInfo<dim>::lines_per_face + line);
-
+
// Initialize the required
// objects.
const FEValues<dim>&
jacobians = fe_values.get_jacobians ();
const std::vector<Point<dim> >&
quadrature_points = fe_values.get_quadrature_points ();
-
+
std::vector<Point<dim> > tangentials (fe_values.n_quadrature_points);
std::vector<Vector<double> > values (fe_values.n_quadrature_points,
Vector<double> (dim));
-
+
// Get boundary function values
// at quadrature points.
boundary_function.vector_value_list (quadrature_points, values);
-
+
const std::vector<Point<dim> >&
reference_quadrature_points = fe_values.get_quadrature ().get_points ();
const unsigned int superdegree = cell->get_fe ().degree;
const unsigned int degree = superdegree - 1;
-
+
// coordinate directions of
// the edges of the face.
const unsigned int
{ 0, 0, 2, 2 },
{ 1, 1, 0, 0 },
{ 1, 1, 0, 0 } };
-
+
// The interpolation for the
// lowest order edge shape
// functions is just the mean
- // value of the tangential
+ // value of the tangential
// components of the boundary
- // function on the edge.
+ // function on the edge.
for (unsigned int q_point = 0; q_point < fe_values.n_quadrature_points;
++q_point)
{
// the quadrature point.
Point<dim> shifted_reference_point_1 = reference_quadrature_points[q_point];
Point<dim> shifted_reference_point_2 = reference_quadrature_points[q_point];
-
+
shifted_reference_point_1 (edge_coordinate_direction[face][line]) += 1e-13;
shifted_reference_point_2 (edge_coordinate_direction[face][line]) -= 1e-13;
tangentials[q_point]
shifted_reference_point_2)));
tangentials[q_point]
/= std::sqrt (tangentials[q_point].square ());
-
+
// Compute the mean value.
dof_values[line * superdegree]
+= (fe_values.JxW (q_point)
+ jacobians[q_point][2][edge_coordinate_direction[face][line]]
* jacobians[q_point][2][edge_coordinate_direction[face][line]]));
}
-
+
// If there are also higher
// order shape functions we
// have still some work left.
const FEValuesExtractors::Vector vec (first_vector_component);
FullMatrix<double> assembling_matrix (degree, fe_values.n_quadrature_points);
Vector<double> assembling_vector (fe_values.n_quadrature_points);
-
+
// We set up a linear system
// of equations to get the
// values for the remaining
jacobians[q_point][2][edge_coordinate_direction[face][line]]
* jacobians[q_point][2][edge_coordinate_direction[face][line]]))
* tangentials[q_point];
-
+
const Tensor<1, dim> shape_value
= fe_values[vec].value (cell->get_fe ()
.face_to_cell_index (line * superdegree, face),
(values[q_point] (2)
-
dof_values[line * superdegree] * shape_value[2]) * tmp[2]);
-
+
for (unsigned int i = 0; i < degree; ++i)
assembling_matrix (i, q_point)
= fe_values[vec].value (cell->get_fe ()
face),
q_point) * tmp;
}
-
+
FullMatrix<double> cell_matrix (degree, degree);
-
+
// Create the system matrix
// by multiplying the
// assembling matrix with its
// transposed.
assembling_matrix.mTmult (cell_matrix, assembling_matrix);
-
+
FullMatrix<double> cell_matrix_inv (degree, degree);
// Compute its inverse.
cell_matrix_inv.invert (cell_matrix);
-
+
Vector<double> cell_rhs (degree);
-
+
// Create the system right
// hand side vector by
// multiplying the assembling
// matrix with the assembling
// vector.
assembling_matrix.vmult (cell_rhs, assembling_vector);
-
+
Vector<double> solution (degree);
-
+
cell_matrix_inv.vmult (solution, cell_rhs);
// Store the computed values.
for (unsigned int i = 0; i < degree; ++i)
- dof_values[i + line * superdegree + 1] = solution (i);
+ dof_values[i + line * superdegree + 1] = solution (i);
}
}
-
+
// dummy implementation of above
// function for all other
// dimensions
{
Assert (false, ExcInternalError ());
}
-
+
// This function computes the
// projection of the boundary
// function on the interior of
fe_values = hp_fe_values.get_present_fe_values ();
const std::vector<Tensor<2, dim> >&
jacobians = fe_values.get_jacobians ();
-
+
std::vector<Vector<double> >
values (fe_values.n_quadrature_points, Vector<double> (dim));
-
+
switch (dim)
{
case 2:
quadrature_points = fe_values.get_quadrature_points ();
std::vector<Point<dim> >
tangentials (fe_values.n_quadrature_points);
-
+
// Get boundary function
// values at quadrature
// points.
boundary_function.vector_value_list (quadrature_points, values);
-
+
const std::vector<Point<dim> >&
reference_quadrature_points = fe_values.get_quadrature ().get_points ();
const unsigned int degree = cell->get_fe ().degree - 1;
-
+
// coordinate directions
// of the face.
const unsigned int
face_coordinate_direction[GeometryInfo<dim>::faces_per_cell]
= { 1, 1, 0, 0 };
-
+
// The interpolation for
// the lowest order face
// shape functions is just
// the mean value of the
// tangential components
// of the boundary function
- // on the edge.
+ // on the edge.
for (unsigned int q_point = 0;
q_point < fe_values.n_quadrature_points; ++q_point)
{
= reference_quadrature_points[q_point];
Point<dim> shifted_reference_point_2
= reference_quadrature_points[q_point];
-
+
shifted_reference_point_1 (face_coordinate_direction[face])
+= 1e-13;
shifted_reference_point_2 (face_coordinate_direction[face])
+ jacobians[q_point][1][face_coordinate_direction[face]]
* jacobians[q_point][1][face_coordinate_direction[face]]);
}
-
+
// If there are also
// higher order shape
// functions we have
FullMatrix<double> assembling_matrix (degree,
fe_values.n_quadrature_points);
Vector<double> assembling_vector (fe_values.n_quadrature_points);
-
+
// We set up a
// linear system
// of equations to
+ jacobians[q_point][1][face_coordinate_direction[face]]
* jacobians[q_point][1][face_coordinate_direction[face]]))
* tangentials[q_point];
-
+
const Tensor<1, dim> shape_value
= fe_values[vec].value (cell->get_fe ()
.face_to_cell_index (0, face),
q_point);
-
+
assembling_vector (q_point) = (values[q_point] (0)
-
dof_values[0] * shape_value[0]) * tmp[0]
(values[q_point] (1)
-
dof_values[1] * shape_value[1]) * tmp[1];
-
+
// In the weak
// form the
// right hand
.face_to_cell_index (i + 1, face),
q_point) * tmp;
}
-
+
FullMatrix<double> cell_matrix (degree, degree);
-
+
// Create the system
// matrix by multiplying
// the assembling
// matrix with its
// transposed.
assembling_matrix.mTmult (cell_matrix, assembling_matrix);
-
+
FullMatrix<double> cell_matrix_inv (degree, degree);
// Compute its inverse.
cell_matrix_inv.invert (cell_matrix);
-
+
Vector<double> cell_rhs (degree);
-
+
// Create the system
// right hand side
// vector by
// with the assembling
// vector.
assembling_matrix.vmult (cell_rhs, assembling_vector);
-
+
Vector<double> solution (degree);
-
+
cell_matrix_inv.vmult (solution, cell_rhs);
-
+
// Store the computed
// values.
for (unsigned int i = 0; i < degree; ++i)
dof_values[i + 1] = solution (i);
}
-
+
break;
}
-
+
case 3:
{
const std::vector<Point<dim> >&
quadrature_points = fe_values.get_quadrature_points ();
-
+
// Get boundary function
// values at quadrature
// points.
boundary_function.vector_value_list (quadrature_points, values);
-
+
const FEValuesExtractors::Vector vec (first_vector_component);
const unsigned int superdegree = cell->get_fe ().degree;
const unsigned int degree = superdegree - 1;
FullMatrix<double> cell_matrix_inv (assembling_matrix.m (),
assembling_matrix.m ());
Vector<double> solution (cell_matrix.m ());
-
+
// Get coordinate directions
// of the face.
const unsigned int
{ 0, 1 },
{ 1, 0 },
{ 1, 0 } };
-
+
// The projection is
// divided into two steps.
// In the first step we
// interpolated part
// on the edges.
Tensor<1, dim> tmp;
-
+
for (unsigned int d = 0; d < dim; ++d)
tmp[d] = values[q_point] (d);
-
+
for (unsigned int i = 0; i < 2; ++i)
for (unsigned int j = 0; j <= degree; ++j)
tmp -= dof_values[(i + 2 * local_face_coordinate_directions[face][0]) * superdegree + j]
* fe_values[vec].value (cell->get_fe ().face_to_cell_index
((i + 2 * local_face_coordinate_directions[face][0])
* superdegree + j, face), q_point);
-
+
const double JxW
= std::sqrt (fe_values.JxW (q_point)
/ ((jacobians[q_point][0][global_face_coordinate_directions[face][0]]
+
jacobians[q_point][2][global_face_coordinate_directions[face][1]]
* jacobians[q_point][2][global_face_coordinate_directions[face][1]])));
-
+
// In the weak form
// the right hand
// side function
// the face.
for (unsigned int d = 0; d < dim; ++d)
assembling_vector (dim * q_point + d) = JxW * tmp[d];
-
+
for (unsigned int i = 0; i <= degree; ++i)
for (unsigned int j = 0; j < degree; ++j)
{
+ GeometryInfo<dim>::lines_per_face,
face),
q_point));
-
+
for (unsigned int d = 0; d < dim; ++d)
assembling_matrix (i * degree + j,
dim * q_point + d)
= shape_value[d];
}
}
-
+
// Create the system
// matrix by
// multiplying the
cell_matrix_inv.invert (cell_matrix);
assembling_matrix.vmult (cell_rhs, assembling_vector);
cell_matrix_inv.vmult (solution, cell_rhs);
-
+
// Store the computed
// values.
for (unsigned int i = 0; i <= degree; ++i)
dof_values[(i + GeometryInfo<dim>::lines_per_face)
* degree + j + GeometryInfo<dim>::lines_per_face]
= solution (i * degree + j);
-
+
// Now we do the
// same as above
// with the vertical
q_point < fe_values.n_quadrature_points; ++q_point)
{
Tensor<1, dim> tmp;
-
+
for (unsigned int d = 0; d < dim; ++d)
tmp[d] = values[q_point] (d);
-
+
for (unsigned int i = 0; i < 2; ++i)
for (unsigned int j = 0; j <= degree; ++j)
tmp
* fe_values[vec].value (cell->get_fe ().face_to_cell_index
((i + 2 * local_face_coordinate_directions[face][1])
* superdegree + j, face), q_point);
-
+
const double JxW
= std::sqrt (fe_values.JxW (q_point)
/ ((jacobians[q_point][0][global_face_coordinate_directions[face][0]]
+
jacobians[q_point][2][global_face_coordinate_directions[face][1]]
* jacobians[q_point][2][global_face_coordinate_directions[face][1]])));
-
+
for (unsigned int d = 0; d < dim; ++d)
assembling_vector (dim * q_point + d) = JxW * tmp[d];
-
+
for (unsigned int i = 0; i < degree; ++i)
for (unsigned int j = 0; j <= degree; ++j)
{
* fe_values[vec].value (cell->get_fe ().face_to_cell_index
((i + degree + GeometryInfo<dim>::lines_per_face)
* superdegree + j, face), q_point));
-
+
for (unsigned int d = 0; d < dim; ++d)
assembling_matrix (i * superdegree + j, dim * q_point + d)
= shape_value[d];
}
}
-
+
assembling_matrix.mTmult (cell_matrix, assembling_matrix);
cell_matrix_inv.invert (cell_matrix);
assembling_matrix.vmult (cell_rhs, assembling_vector);
cell_matrix_inv.vmult (solution, cell_rhs);
-
+
for (unsigned int i = 0; i < degree; ++i)
for (unsigned int j = 0; j <= degree; ++j)
dof_values[(i + degree + GeometryInfo<dim>::lines_per_face) * superdegree + j]
= solution (i * superdegree + j);
-
+
break;
}
-
+
default:
Assert (false, ExcNotImplemented ());
}
}
}
}
-
-
+
+
template <int dim>
hp::FECollection<dim> fe_collection (dof_handler.get_fe ());
hp::MappingCollection<dim> mapping_collection (mapping);
hp::QCollection<dim> face_quadrature_collection;
-
+
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
face_quadrature_collection.push_back
(QProjector<dim>::project_to_face (reference_face_quadrature, face));
-
+
hp::FEValues<dim> fe_face_values (mapping_collection, fe_collection,
face_quadrature_collection,
update_jacobians |
update_JxW_values |
update_quadrature_points |
update_values);
-
+
std::vector<double> dof_values (dofs_per_face);
std::vector<unsigned int> face_dof_indices (dofs_per_face);
typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
-
+
switch (dim)
{
case 2:
typedef FiniteElement<dim> FEL;
AssertThrow (dynamic_cast<const FE_Nedelec<dim>*> (&cell->get_fe ()) != 0,
typename FEL::ExcInterpolationNotImplemented ());
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
dof_values[dof] = 0.0;
-
+
// Compute the
// projection of the
// boundary function on
internals::VectorTools
::compute_face_projection (cell, face, fe_face_values,
boundary_function,
- first_vector_component, dof_values);
+ first_vector_component, dof_values);
cell->face (face)->get_dof_indices (face_dof_indices,
cell->active_fe_index ());
-
+
// Add the computed
// constraints to the
// constraint matrix,
if (!(constraints.is_constrained (face_dof_indices[dof])))
{
constraints.add_line (face_dof_indices[dof]);
-
+
if (std::abs (dof_values[dof]) > 1e-14)
constraints.set_inhomogeneity (face_dof_indices[dof], dof_values[dof]);
}
}
-
+
break;
}
-
+
case 3:
{
const QGauss<dim - 2> reference_edge_quadrature (2 * superdegree);
const unsigned int degree = superdegree - 1;
const unsigned int n_dofs = dof_handler.n_dofs ();
hp::QCollection<dim> edge_quadrature_collection;
-
+
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
for (unsigned int line = 0; line < GeometryInfo<dim>::lines_per_face; ++line)
edge_quadrature_collection.push_back
(QProjector<dim>::project_to_face
(QProjector<dim - 1>::project_to_face
(reference_edge_quadrature, line), face));
-
+
hp::FEValues<dim> fe_edge_values (mapping_collection, fe_collection,
edge_quadrature_collection,
update_jacobians |
update_values);
std::vector<double> computed_constraints (n_dofs);
std::vector<int> projected_dofs (n_dofs);
-
+
for (unsigned int dof = 0; dof < n_dofs; ++dof)
projected_dofs[dof] = -1;
-
+
for (; cell != dof_handler.end (); ++cell)
if (cell->at_boundary ())
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
typedef FiniteElement<dim> FEL;
AssertThrow (dynamic_cast<const FE_Nedelec<dim>*> (&cell->get_fe ()) != 0,
typename FEL::ExcInterpolationNotImplemented ());
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
dof_values[dof] = 0.0;
-
+
cell->face (face)->get_dof_indices (face_dof_indices,
cell->active_fe_index ());
-
+
// First we compute the
// projection on the
// edges.
dof < (line + 1) * superdegree; ++dof)
projected_dofs[face_dof_indices[dof]] = degree;
}
-
+
// If we have
// computed the
// values in a
dof < (line + 1) * superdegree; ++dof)
dof_values[dof] = computed_constraints[face_dof_indices[dof]];
}
-
+
// If there are higher
// order shape
// functions, there is
boundary_function,
first_vector_component,
dof_values);
-
+
// Mark the projected
// degrees of
// freedom.
dof < dofs_per_face; ++dof)
projected_dofs[face_dof_indices[dof]] = degree;
}
-
+
// Store the computed
// values in the global
// vector.
if (std::abs (dof_values[dof]) > 1e-14)
computed_constraints[face_dof_indices[dof]] = dof_values[dof];
}
-
+
// Add the computed constraints
// to the constraint matrix, if
// the degree of freedom is not
constraints.set_inhomogeneity (dof, computed_constraints[dof]);
}
}
-
+
default:
Assert (false, ExcNotImplemented ());
}
{
hp::FECollection<dim> fe_collection (dof_handler.get_fe ());
hp::QCollection<dim> face_quadrature_collection;
-
+
for (unsigned int i = 0; i < fe_collection.size (); ++i)
{
const QGauss<dim - 1>
reference_face_quadrature (2 * fe_collection[i].degree);
-
+
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
face_quadrature_collection.push_back
(QProjector<dim>::project_to_face (reference_face_quadrature, face));
}
-
+
hp::FEValues<dim> fe_face_values (mapping_collection, fe_collection,
face_quadrature_collection,
update_jacobians |
std::vector<double> dof_values;
std::vector<unsigned int> face_dof_indices;
typename hp::DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active ();
-
+
switch (dim)
{
case 2:
typedef FiniteElement<dim> FEL;
AssertThrow (dynamic_cast<const FE_Nedelec<dim> *> (&cell->get_fe ()) != 0,
typename FEL::ExcInterpolationNotImplemented ());
-
+
const unsigned int dofs_per_face = cell->get_fe ().dofs_per_face;
-
+
dof_values.resize (dofs_per_face);
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
dof_values[dof] = 0.0;
-
+
internals::VectorTools
::compute_face_projection (cell, face, fe_face_values,
boundary_function,
face_dof_indices.resize (dofs_per_face);
cell->face (face)->get_dof_indices (face_dof_indices,
cell->active_fe_index ());
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
if (!(constraints.is_constrained (face_dof_indices[dof])))
{
constraints.add_line (face_dof_indices[dof]);
-
+
if (std::abs (dof_values[dof]) > 1e-14)
constraints.set_inhomogeneity (face_dof_indices[dof], dof_values[dof]);
}
}
-
+
break;
}
-
+
case 3:
{
const unsigned int n_dofs = dof_handler.n_dofs ();
hp::QCollection<dim> edge_quadrature_collection;
-
+
for (unsigned int i = 0; i < fe_collection.size (); ++i)
{
const QGauss<dim - 2>
reference_edge_quadrature (2 * fe_collection[i].degree);
-
+
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
for (unsigned int line = 0; line < GeometryInfo<dim>::lines_per_face; ++line)
edge_quadrature_collection.push_back
(QProjector<dim - 1>::project_to_face (reference_edge_quadrature, line),
face));
}
-
+
hp::FEValues<dim> fe_edge_values (mapping_collection, fe_collection,
edge_quadrature_collection,
update_jacobians |
update_values);
std::vector<double> computed_constraints (n_dofs);
std::vector<int> projected_dofs (n_dofs);
-
+
for (unsigned int dof = 0; dof < n_dofs; ++dof)
projected_dofs[dof] = -1;
-
+
for (; cell != dof_handler.end (); ++cell)
if (cell->at_boundary ())
for (unsigned int face = 0; face < GeometryInfo<dim>::faces_per_cell; ++face)
typedef FiniteElement<dim> FEL;
AssertThrow (dynamic_cast<const FE_Nedelec<dim> *> (&cell->get_fe ()) != 0,
typename FEL::ExcInterpolationNotImplemented ());
-
+
const unsigned int superdegree = cell->get_fe ().degree;
const unsigned int degree = superdegree - 1;
const unsigned int dofs_per_face = cell->get_fe ().dofs_per_face;
-
+
dof_values.resize (dofs_per_face);
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
dof_values[dof] = 0.0;
-
+
face_dof_indices.resize (dofs_per_face);
cell->face (face)->get_dof_indices (face_dof_indices,
cell->active_fe_index ());
-
+
for (unsigned int line = 0;
line < GeometryInfo<dim>::lines_per_face; ++line)
{
boundary_function,
first_vector_component,
dof_values);
-
+
for (unsigned int dof = line * superdegree;
dof < (line + 1) * superdegree; ++dof)
projected_dofs[face_dof_indices[dof]] = degree;
}
-
+
else
for (unsigned int dof = line * superdegree;
dof < (line + 1) * superdegree; ++dof)
dof_values[dof] = computed_constraints[face_dof_indices[dof]];
}
-
+
if (degree > 0)
{
internals::VectorTools
boundary_function,
first_vector_component,
dof_values);
-
+
for (unsigned int dof = GeometryInfo<dim>::lines_per_face * superdegree;
dof < dofs_per_face; ++dof)
projected_dofs[face_dof_indices[dof]] = degree;
}
-
+
for (unsigned int dof = 0; dof < dofs_per_face; ++dof)
if (std::abs (dof_values[dof]) > 1e-14)
computed_constraints[face_dof_indices[dof]] = dof_values[dof];
}
-
+
for (unsigned int dof = 0; dof < n_dofs; ++dof)
if ((projected_dofs[dof] != -1) && !(constraints.is_constrained (dof)))
{
constraints.set_inhomogeneity (dof, computed_constraints[dof]);
}
}
-
+
default:
Assert (false, ExcNotImplemented ());
}
cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
- for (unsigned int face_no=0; face_no < GeometryInfo<dim>::faces_per_cell;
- ++face_no)
- if (boundary_ids.find(cell->face(face_no)->boundary_indicator())
- != boundary_ids.end())
- {
- typename DH<dim,spacedim>::face_iterator face = cell->face(face_no);
-
- // get the indices of the
- // dofs on this cell...
- face->get_dof_indices (face_dofs, cell->active_fe_index());
-
- // ...and the normal
- // vectors at the locations
- // where they are defined:
- const std::vector<Point<dim-1> > &
- unit_support_points = fe.get_unit_face_support_points();
- Quadrature<dim-1> aux_quad (unit_support_points);
- FEFaceValues<dim> fe_values (mapping, fe, aux_quad,
- update_normal_vectors);
- fe_values.reinit(cell, face_no);
-
- // then identify which of
- // them correspond to the
- // selected set of vector
- // components
- for (unsigned int i=0; i<face_dofs.size(); ++i)
- if (fe.face_system_to_component_index(i).first ==
- first_vector_component)
- {
- // find corresponding other
- // components of vector
- internal::VectorTools::VectorDoFTuple<dim> vector_dofs;
- vector_dofs.dof_indices[0] = face_dofs[i];
+ if (!cell->is_artificial())
+ for (unsigned int face_no=0; face_no < GeometryInfo<dim>::faces_per_cell;
+ ++face_no)
+ if (boundary_ids.find(cell->face(face_no)->boundary_indicator())
+ != boundary_ids.end())
+ {
+ typename DH<dim,spacedim>::face_iterator face = cell->face(face_no);
- for (unsigned int k=0; k<fe.dofs_per_face; ++k)
- if ((k != i)
- &&
- (unit_support_points[k] == unit_support_points[i])
- &&
- (fe.face_system_to_component_index(k).first >=
- first_vector_component)
- &&
- (fe.face_system_to_component_index(k).first <
- first_vector_component + dim))
- vector_dofs.dof_indices[fe.face_system_to_component_index(k).first -
- first_vector_component]
- = face_dofs[k];
-
- for (unsigned int d=0; d<dim; ++d)
- Assert (vector_dofs.dof_indices[d] < dof_handler.n_dofs(),
- ExcInternalError());
-
- // and enter the
- // (dofs,(normal_vector,cell))
- // entry into the map
- dof_to_normals_map
- .insert (std::make_pair (vector_dofs,
- std::make_pair (fe_values.normal_vector(i),
- cell)));
- }
- }
+ // get the indices of the
+ // dofs on this cell...
+ face->get_dof_indices (face_dofs, cell->active_fe_index());
+
+ // ...and the normal
+ // vectors at the locations
+ // where they are defined:
+ const std::vector<Point<dim-1> > &
+ unit_support_points = fe.get_unit_face_support_points();
+ Quadrature<dim-1> aux_quad (unit_support_points);
+ FEFaceValues<dim> fe_values (mapping, fe, aux_quad,
+ update_normal_vectors);
+ fe_values.reinit(cell, face_no);
+
+ // then identify which of
+ // them correspond to the
+ // selected set of vector
+ // components
+ for (unsigned int i=0; i<face_dofs.size(); ++i)
+ if (fe.face_system_to_component_index(i).first ==
+ first_vector_component)
+ {
+ // find corresponding other
+ // components of vector
+ internal::VectorTools::VectorDoFTuple<dim> vector_dofs;
+ vector_dofs.dof_indices[0] = face_dofs[i];
+
+ for (unsigned int k=0; k<fe.dofs_per_face; ++k)
+ if ((k != i)
+ &&
+ (unit_support_points[k] == unit_support_points[i])
+ &&
+ (fe.face_system_to_component_index(k).first >=
+ first_vector_component)
+ &&
+ (fe.face_system_to_component_index(k).first <
+ first_vector_component + dim))
+ vector_dofs.dof_indices[fe.face_system_to_component_index(k).first -
+ first_vector_component]
+ = face_dofs[k];
+
+/* for (unsigned int d=0; d<dim; ++d)
+ Assert (vector_dofs.dof_indices[d] < dof_handler.n_dofs(),
+ ExcInternalError());*/
+
+ // and enter the
+ // (dofs,(normal_vector,cell))
+ // entry into the map
+ dof_to_normals_map
+ .insert (std::make_pair (vector_dofs,
+ std::make_pair (fe_values.normal_vector(i),
+ cell)));
+ }
+ }
// Now do something with the
// collected information. To this
// constrained
for (unsigned int i=0; i<dim; ++i)
if (!constraints.is_constrained (same_dof_range[0]
- ->first.dof_indices[i]))
+ ->first.dof_indices[i])
+ &&
+ constraints.can_store_line(
+ same_dof_range[0]->first.dof_indices[i]))
{
constraints.add_line (same_dof_range[0]->first.dof_indices[i]);
// no add_entries here
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+
+#include <base/utilities.h>
+#include <lac/vector.h>
+#include <lac/block_vector.h>
+#include <lac/petsc_vector.h>
+#include <lac/petsc_block_vector.h>
+#include <lac/trilinos_vector.h>
+#include <lac/trilinos_block_vector.h>
+
+#ifdef DEAL_II_USE_P4EST
+
+#include <grid/grid_refinement.h>
+#include <grid/tria_accessor.h>
+#include <grid/tria_iterator.h>
+#include <grid/tria.h>
+
+#include <distributed/grid_refinement.h>
+
+#include <numeric>
+#include <algorithm>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace
+{
+ template <typename number>
+ inline
+ number
+ max_element (const Vector<number> &criteria)
+ {
+ return *std::max_element(criteria.begin(), criteria.end());
+ }
+
+
+
+ template <typename number>
+ inline
+ number
+ min_element (const Vector<number> &criteria)
+ {
+ return *std::min_element(criteria.begin(), criteria.end());
+ }
+
+
+ /**
+ * Compute the global max and min
+ * of the criteria vector. These
+ * are returned only on the
+ * processor with rank zero, all
+ * others get a pair of zeros.
+ */
+ template <typename number>
+ std::pair<double,double>
+ compute_global_min_and_max_at_root (const Vector<number> &criteria,
+ MPI_Comm mpi_communicator)
+ {
+ // we'd like to compute the
+ // global max and min from the
+ // local ones in one MPI
+ // communication. we can do that
+ // by taking the elementwise
+ // minimum of the local min and
+ // the negative maximum over all
+ // processors
+
+ const double local_min = min_element (criteria),
+ local_max = max_element (criteria);
+ double comp[2] = { local_min, -local_max };
+ double result[2] = { 0, 0 };
+
+ // compute the minimum on
+ // processor zero
+ MPI_Reduce (&comp, &result, 2, MPI_DOUBLE,
+ MPI_MIN, 0, mpi_communicator);
+
+ // make sure only processor zero
+ // got something
+ if (Utilities::System::get_this_mpi_process (mpi_communicator) != 0)
+ Assert ((result[0] == 0) && (result[1] == 0),
+ ExcInternalError());
+
+ return std::make_pair (result[0], -result[1]);
+ }
+
+
+
+ /**
+ * Compute the global sum over the elements
+ * of the vectors passed to this function
+ * on all processors. This number is
+ * returned only on the processor with rank
+ * zero, all others get zero.
+ */
+ template <typename number>
+ double
+ compute_global_sum (const Vector<number> &criteria,
+ MPI_Comm mpi_communicator)
+ {
+ double my_sum = std::accumulate (criteria.begin(),
+ criteria.end(),
+ /* do accumulation in the correct data type: */
+ number());
+
+ double result = 0;
+ // compute the minimum on
+ // processor zero
+ MPI_Reduce (&my_sum, &result, 1, MPI_DOUBLE,
+ MPI_SUM, 0, mpi_communicator);
+
+ // make sure only processor zero
+ // got something
+ if (Utilities::System::get_this_mpi_process (mpi_communicator) != 0)
+ Assert (result == 0, ExcInternalError());
+
+ return result;
+ }
+
+
+
+ /**
+ * Given a vector of refinement criteria
+ * for all cells of a mesh (locally owned
+ * or not), extract those that pertain to
+ * locally owned cells.
+ */
+ template <int dim, int spacedim, class Vector>
+ void
+ get_locally_owned_indicators (const parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ dealii::Vector<float> &locally_owned_indicators)
+ {
+ Assert (locally_owned_indicators.size() == tria.n_locally_owned_active_cells(),
+ ExcInternalError());
+
+ unsigned int active_index = 0;
+ unsigned int owned_index = 0;
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = tria.begin_active();
+ cell != tria.end(); ++cell, ++active_index)
+ if (cell->subdomain_id() == tria.locally_owned_subdomain())
+ {
+ locally_owned_indicators(owned_index)
+ = criteria(active_index);
+ ++owned_index;
+ }
+ Assert (owned_index == tria.n_locally_owned_active_cells(),
+ ExcInternalError());
+ Assert ((active_index == tria.dealii::Triangulation<dim,spacedim>::n_active_cells()),
+ ExcInternalError());
+ }
+
+
+ // we compute refinement
+ // thresholds by bisection of the
+ // interval spanned by the
+ // smallest and largest error
+ // indicator. this leads to a
+ // small problem: if, for
+ // example, we want to coarsen
+ // zero per cent of the cells,
+ // then we need to pick a
+ // threshold equal to the
+ // smallest indicator, but of
+ // course the bisection algorithm
+ // can never find a threshold
+ // equal to one of the end points
+ // of the interval. So we
+ // slightly increase the interval
+ // before we even start
+ void adjust_interesting_range (double (&interesting_range)[2])
+ {
+ Assert (interesting_range[0] <= interesting_range[1],
+ ExcInternalError());
+
+ if (interesting_range[0] > 0)
+ interesting_range[0] *= 0.99;
+ else
+ interesting_range[0]
+ -= 0.01 * (interesting_range[1] - interesting_range[0]);
+
+ if (interesting_range[1] > 0)
+ interesting_range[1] *= 1.01;
+ else
+ interesting_range[1]
+ += 0.01 * (interesting_range[1] - interesting_range[0]);
+ }
+
+
+
+ /**
+ * Given a vector of criteria and bottom
+ * and top thresholds for coarsening and
+ * refinement, mark all those cells that we
+ * locally own as appropriate for
+ * coarsening or refinement.
+ */
+ template <int dim, int spacedim, class Vector>
+ void
+ mark_cells (parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ const double top_threshold,
+ const double bottom_threshold)
+ {
+ dealii::GridRefinement::refine (tria, criteria, top_threshold);
+ dealii::GridRefinement::coarsen (tria, criteria, bottom_threshold);
+
+ // as a final good measure,
+ // delete all flags again
+ // from cells that we don't
+ // locally own
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = tria.begin_active();
+ cell != tria.end(); ++cell)
+ if (cell->subdomain_id() != tria.locally_owned_subdomain())
+ {
+ cell->clear_refine_flag ();
+ cell->clear_coarsen_flag ();
+ }
+ }
+
+
+
+
+ namespace RefineAndCoarsenFixedNumber
+ {
+ /**
+ * Compute a threshold value so
+ * that exactly n_target_cells have
+ * a value that is larger.
+ */
+ template <typename number>
+ number
+ master_compute_threshold (const Vector<number> &criteria,
+ const std::pair<double,double> global_min_and_max,
+ const unsigned int n_target_cells,
+ MPI_Comm mpi_communicator)
+ {
+ double interesting_range[2] = { global_min_and_max.first,
+ global_min_and_max.second };
+ adjust_interesting_range (interesting_range);
+
+ unsigned int iteration = 0;
+
+ do
+ {
+ MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ if (interesting_range[0] == interesting_range[1])
+ return interesting_range[0];
+
+ const double test_threshold
+ = (interesting_range[0] > 0
+ ?
+ std::sqrt(interesting_range[0] *
+ interesting_range[1])
+ :
+ (interesting_range[0] + interesting_range[1]) / 2);
+
+ // count how many of our own
+ // elements would be above
+ // this threshold and then
+ // add to it the number for
+ // all the others
+ unsigned int
+ my_count = std::count_if (criteria.begin(),
+ criteria.end(),
+ std::bind2nd (std::greater<double>(),
+ test_threshold));
+
+ unsigned int total_count;
+ MPI_Reduce (&my_count, &total_count, 1, MPI_UNSIGNED,
+ MPI_SUM, 0, mpi_communicator);
+
+ // now adjust the range. if
+ // we have to many cells, we
+ // take the upper half of the
+ // previous range, otherwise
+ // the lower half. if we have
+ // hit the right number, then
+ // set the range to the exact
+ // value
+ if (total_count > n_target_cells)
+ interesting_range[0] = test_threshold;
+ else if (total_count < n_target_cells)
+ interesting_range[1] = test_threshold;
+ else
+ interesting_range[0] = interesting_range[1] = test_threshold;
+
+ // terminate the iteration
+ // after 10 go-arounds. this
+ // is necessary because
+ // oftentimes error
+ // indicators on cells have
+ // exactly the same value,
+ // and so there may not be a
+ // particular value that cuts
+ // the indicators in such a
+ // way that we can achieve
+ // the desired number of
+ // cells. using a max of 10
+ // iterations means that we
+ // terminate the iteration
+ // after 10 steps if the
+ // indicators were perfectly
+ // badly distributed, and we
+ // make at most a mistake of
+ // 1/2^10 in the number of
+ // cells flagged if
+ // indicators are perfectly
+ // equidistributed
+ ++iteration;
+ if (iteration == 25)
+ interesting_range[0] = interesting_range[1] = test_threshold;
+ }
+ while (true);
+
+ Assert (false, ExcInternalError());
+ return -1;
+ }
+
+
+ /**
+ * The corresponding function to
+ * the one above, to be run on the
+ * slaves.
+ */
+ template <typename number>
+ number
+ slave_compute_threshold (const Vector<number> &criteria,
+ MPI_Comm mpi_communicator)
+ {
+ do
+ {
+ double interesting_range[2] = { -1, -1 };
+ MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ if (interesting_range[0] == interesting_range[1])
+ return interesting_range[0];
+
+ // count how many elements
+ // there are that are bigger
+ // than the following trial
+ // threshold
+ const double test_threshold
+ = (interesting_range[0] > 0
+ ?
+ std::exp((std::log(interesting_range[0]) +
+ std::log(interesting_range[1])) / 2)
+ :
+ (interesting_range[0] + interesting_range[1]) / 2);
+ unsigned int
+ my_count = std::count_if (criteria.begin(),
+ criteria.end(),
+ std::bind2nd (std::greater<double>(),
+ test_threshold));
+
+ MPI_Reduce (&my_count, 0, 1, MPI_UNSIGNED,
+ MPI_SUM, 0, mpi_communicator);
+ }
+ while (true);
+
+ Assert (false, ExcInternalError());
+ return -1;
+ }
+ }
+
+
+
+ namespace RefineAndCoarsenFixedFraction
+ {
+ /**
+ * Compute a threshold value so
+ * that exactly n_target_cells have
+ * a value that is larger.
+ */
+ template <typename number>
+ number
+ master_compute_threshold (const Vector<number> &criteria,
+ const std::pair<double,double> global_min_and_max,
+ const double target_error,
+ MPI_Comm mpi_communicator)
+ {
+ double interesting_range[2] = { global_min_and_max.first,
+ global_min_and_max.second };
+ adjust_interesting_range (interesting_range);
+
+ unsigned int iteration = 0;
+
+ do
+ {
+ MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ if (interesting_range[0] == interesting_range[1])
+ return interesting_range[0];
+
+ const double test_threshold
+ = (interesting_range[0] > 0
+ ?
+ std::exp((std::log(interesting_range[0]) +
+ std::log(interesting_range[1])) / 2)
+ :
+ (interesting_range[0] + interesting_range[1]) / 2);
+
+ // accumulate the error of those
+ // our own elements above this
+ // threshold and then add to it the
+ // number for all the others
+ double my_error = 0;
+ for (unsigned int i=0; i<criteria.size(); ++i)
+ if (criteria(i) > test_threshold)
+ my_error += criteria(i);
+
+ double total_error;
+ MPI_Reduce (&my_error, &total_error, 1, MPI_DOUBLE,
+ MPI_SUM, 0, mpi_communicator);
+
+ // now adjust the range. if
+ // we have to many cells, we
+ // take the upper half of the
+ // previous range, otherwise
+ // the lower half. if we have
+ // hit the right number, then
+ // set the range to the exact
+ // value
+ if (total_error > target_error)
+ interesting_range[0] = test_threshold;
+ else if (total_error < target_error)
+ interesting_range[1] = test_threshold;
+ else
+ interesting_range[0] = interesting_range[1] = test_threshold;
+
+ // terminate the iteration
+ // after 10 go-arounds. this
+ // is necessary because
+ // oftentimes error
+ // indicators on cells have
+ // exactly the same value,
+ // and so there may not be a
+ // particular value that cuts
+ // the indicators in such a
+ // way that we can achieve
+ // the desired number of
+ // cells. using a max of 10
+ // iterations means that we
+ // terminate the iteration
+ // after 10 steps if the
+ // indicators were perfectly
+ // badly distributed, and we
+ // make at most a mistake of
+ // 1/2^10 in the number of
+ // cells flagged if
+ // indicators are perfectly
+ // equidistributed
+ ++iteration;
+ if (iteration == 25)
+ interesting_range[0] = interesting_range[1] = test_threshold;
+ }
+ while (true);
+
+ Assert (false, ExcInternalError());
+ return -1;
+ }
+
+
+ /**
+ * The corresponding function to
+ * the one above, to be run on the
+ * slaves.
+ */
+ template <typename number>
+ number
+ slave_compute_threshold (const Vector<number> &criteria,
+ MPI_Comm mpi_communicator)
+ {
+ do
+ {
+ double interesting_range[2] = { -1, -1 };
+ MPI_Bcast (&interesting_range[0], 2, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ if (interesting_range[0] == interesting_range[1])
+ return interesting_range[0];
+
+ // count how many elements
+ // there are that are bigger
+ // than the following trial
+ // threshold
+ const double test_threshold
+ = (interesting_range[0] > 0
+ ?
+ std::exp((std::log(interesting_range[0]) +
+ std::log(interesting_range[1])) / 2)
+ :
+ (interesting_range[0] + interesting_range[1]) / 2);
+
+ double my_error = 0;
+ for (unsigned int i=0; i<criteria.size(); ++i)
+ if (criteria(i) > test_threshold)
+ my_error += criteria(i);
+
+ MPI_Reduce (&my_error, 0, 1, MPI_DOUBLE,
+ MPI_SUM, 0, mpi_communicator);
+ }
+ while (true);
+
+ Assert (false, ExcInternalError());
+ return -1;
+ }
+ }
+}
+
+
+
+namespace parallel
+{
+ namespace distributed
+ {
+ namespace GridRefinement
+ {
+ template <int dim, class Vector, int spacedim>
+ void
+ refine_and_coarsen_fixed_number (
+ parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ const double top_fraction_of_cells,
+ const double bottom_fraction_of_cells)
+ {
+ Assert ((top_fraction_of_cells>=0) && (top_fraction_of_cells<=1),
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert ((bottom_fraction_of_cells>=0) && (bottom_fraction_of_cells<=1),
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert (top_fraction_of_cells+bottom_fraction_of_cells <= 1,
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert (criteria.is_non_negative (),
+ dealii::GridRefinement::ExcNegativeCriteria());
+
+ // first extract from the
+ // vector of indicators the
+ // ones that correspond to
+ // cells that we locally own
+ dealii::Vector<float>
+ locally_owned_indicators (tria.n_locally_owned_active_cells());
+ get_locally_owned_indicators (tria,
+ criteria,
+ locally_owned_indicators);
+
+ MPI_Comm mpi_communicator = tria.get_communicator ();
+
+ // figure out the global
+ // max and min of the
+ // indicators. we don't
+ // need it here, but it's a
+ // collective communication
+ // call
+ const std::pair<double,double> global_min_and_max
+ = compute_global_min_and_max_at_root (locally_owned_indicators,
+ mpi_communicator);
+
+ // from here on designate a
+ // master and slaves
+ double top_threshold, bottom_threshold;
+ if (Utilities::System::get_this_mpi_process (mpi_communicator) == 0)
+ {
+ // this is the master
+ // processor
+ top_threshold
+ =
+ RefineAndCoarsenFixedNumber::
+ master_compute_threshold (locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>
+ (top_fraction_of_cells *
+ tria.n_global_active_cells()),
+ mpi_communicator);
+
+ // compute bottom
+ // threshold only if
+ // necessary. otherwise
+ // use a threshold lower
+ // than the smallest
+ // value we have locally
+ if (bottom_fraction_of_cells > 0)
+ bottom_threshold
+ =
+ RefineAndCoarsenFixedNumber::
+ master_compute_threshold (locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>
+ ((1-bottom_fraction_of_cells) *
+ tria.n_global_active_cells()),
+ mpi_communicator);
+ else
+ {
+ bottom_threshold = *std::min_element (criteria.begin(),
+ criteria.end());
+ bottom_threshold -= std::fabs(bottom_threshold);
+ }
+ }
+ else
+ {
+ // this is a slave
+ // processor
+ top_threshold
+ =
+ RefineAndCoarsenFixedNumber::
+ slave_compute_threshold (locally_owned_indicators,
+ mpi_communicator);
+ // compute bottom
+ // threshold only if
+ // necessary
+ if (bottom_fraction_of_cells > 0)
+ bottom_threshold
+ =
+ RefineAndCoarsenFixedNumber::
+ slave_compute_threshold (locally_owned_indicators,
+ mpi_communicator);
+ else
+ {
+ bottom_threshold = *std::min_element (criteria.begin(),
+ criteria.end());
+ bottom_threshold -= std::fabs(bottom_threshold);
+ }
+ }
+
+ // now refine the mesh
+ mark_cells (tria, criteria, top_threshold, bottom_threshold);
+ }
+
+
+ template <int dim, class Vector, int spacedim>
+ void
+ refine_and_coarsen_fixed_fraction (
+ parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const Vector &criteria,
+ const double top_fraction_of_error,
+ const double bottom_fraction_of_error)
+ {
+ Assert ((top_fraction_of_error>=0) && (top_fraction_of_error<=1),
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert ((bottom_fraction_of_error>=0) && (bottom_fraction_of_error<=1),
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert (top_fraction_of_error+bottom_fraction_of_error <= 1,
+ dealii::GridRefinement::ExcInvalidParameterValue());
+ Assert (criteria.is_non_negative (),
+ dealii::GridRefinement::ExcNegativeCriteria());
+
+ // first extract from the
+ // vector of indicators the
+ // ones that correspond to
+ // cells that we locally own
+ dealii::Vector<float>
+ locally_owned_indicators (tria.n_locally_owned_active_cells());
+ get_locally_owned_indicators (tria,
+ criteria,
+ locally_owned_indicators);
+
+ MPI_Comm mpi_communicator = tria.get_communicator ();
+
+ // figure out the global
+ // max and min of the
+ // indicators. we don't
+ // need it here, but it's a
+ // collective communication
+ // call
+ const std::pair<double,double> global_min_and_max
+ = compute_global_min_and_max_at_root (locally_owned_indicators,
+ mpi_communicator);
+
+ const double total_error
+ = compute_global_sum (locally_owned_indicators,
+ mpi_communicator);
+
+ // from here on designate a
+ // master and slaves
+ double top_threshold, bottom_threshold;
+ if (Utilities::System::get_this_mpi_process (mpi_communicator) == 0)
+ {
+ // this is the master
+ // processor
+ top_threshold
+ =
+ RefineAndCoarsenFixedFraction::
+ master_compute_threshold (locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>
+ (top_fraction_of_error *
+ total_error),
+ mpi_communicator);
+
+ // compute bottom
+ // threshold only if
+ // necessary. otherwise
+ // use a threshold lower
+ // than the smallest
+ // value we have locally
+ if (bottom_fraction_of_error > 0)
+ bottom_threshold
+ =
+ RefineAndCoarsenFixedFraction::
+ master_compute_threshold (locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>
+ ((1-bottom_fraction_of_error) *
+ total_error),
+ mpi_communicator);
+ else
+ {
+ bottom_threshold = *std::min_element (criteria.begin(),
+ criteria.end());
+ bottom_threshold -= std::fabs(bottom_threshold);
+ }
+ }
+ else
+ {
+ // this is a slave
+ // processor
+ top_threshold
+ =
+ RefineAndCoarsenFixedFraction::
+ slave_compute_threshold (locally_owned_indicators,
+ mpi_communicator);
+
+ // compute bottom
+ // threshold only if
+ // necessary. otherwise
+ // use a threshold lower
+ // than the smallest
+ // value we have locally
+ if (bottom_fraction_of_error > 0)
+ bottom_threshold
+ =
+ RefineAndCoarsenFixedFraction::
+ slave_compute_threshold (locally_owned_indicators,
+ mpi_communicator);
+ else
+ {
+ bottom_threshold = *std::min_element (criteria.begin(),
+ criteria.end());
+ bottom_threshold -= std::fabs(bottom_threshold);
+ }
+ }
+
+ // now refine the mesh
+ mark_cells (tria, criteria, top_threshold, bottom_threshold);
+ }
+ }
+ }
+}
+
+
+// explicit instantiations
+#include "grid_refinement.inst"
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+
+for (S : REAL_SCALARS)
+{
+#if deal_II_dimension != 1
+namespace parallel
+\{
+ namespace distributed
+ \{
+ namespace GridRefinement
+ \{
+ template
+ void
+ refine_and_coarsen_fixed_number<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (parallel::distributed::Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double);
+
+ template
+ void
+ refine_and_coarsen_fixed_fraction<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (parallel::distributed::Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double);
+ \}
+ \}
+\}
+#endif
+}
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+#include <base/config.h>
+
+#ifdef DEAL_II_USE_P4EST
+
+#include <lac/vector.h>
+#include <lac/petsc_vector.h>
+#include <lac/trilinos_vector.h>
+#include <lac/block_vector.h>
+#include <lac/petsc_block_vector.h>
+#include <lac/trilinos_vector.h>
+#include <lac/trilinos_block_vector.h>
+
+#include <distributed/solution_transfer.h>
+#include <distributed/tria.h>
+#include <dofs/dof_tools.h>
+#include <dofs/dof_accessor.h>
+#include <grid/tria_accessor.h>
+#include <grid/tria_iterator.h>
+
+#include <base/std_cxx1x/bind.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+ namespace distributed
+ {
+
+ namespace
+ {
+ template <class VECTOR>
+ void compress_vector_insert(VECTOR & vec)
+ {
+ vec.compress();
+ }
+
+#ifdef DEAL_II_USE_TRILINOS
+ void compress_vector_insert(TrilinosWrappers::Vector & vec)
+ {
+ vec.compress(Insert);
+ }
+
+ void compress_vector_insert(TrilinosWrappers::BlockVector & vec)
+ {
+ for (unsigned int i=0;i<vec.n_blocks();++i)
+ vec.block(i).compress(Insert);
+ }
+
+ void compress_vector_insert(TrilinosWrappers::MPI::Vector & vec)
+ {
+ vec.compress(Insert);
+ }
+
+ void compress_vector_insert(TrilinosWrappers::MPI::BlockVector & vec)
+ {
+ for (unsigned int i=0;i<vec.n_blocks();++i)
+ vec.block(i).compress(Insert);
+ }
+#endif
+ }
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ SolutionTransfer<dim, VECTOR, DH>::SolutionTransfer(const DH &dof)
+ :
+ dof_handler(&dof, typeid(*this).name())
+ {}
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ SolutionTransfer<dim, VECTOR, DH>::~SolutionTransfer()
+ {}
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ prepare_for_coarsening_and_refinement (const std::vector<const VECTOR*> &all_in)
+ {
+ Assert(all_in.size() > 0, ExcMessage("Please transfer atleast one vector!"));
+ input_vectors = all_in;
+ SolutionTransfer<dim, VECTOR, DH> *ptr = this;
+
+//TODO: casting away constness is bad
+ parallel::distributed::Triangulation<dim> * tria
+ = (dynamic_cast<parallel::distributed::Triangulation<dim>*>
+ (const_cast<dealii::Triangulation<dim>*>
+ (&dof_handler->get_tria())));
+ Assert (tria != 0, ExcInternalError());
+
+ offset
+ = tria->register_data_attach(static_cast<size_t>
+ (get_data_size() * input_vectors.size()),
+ std_cxx1x::bind(&SolutionTransfer<dim, VECTOR, DH>::pack_callback,
+ ptr,
+ _1,
+ _2,
+ _3));
+ }
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ prepare_for_coarsening_and_refinement (const VECTOR &in)
+ {
+ std::vector<const VECTOR*> all_in(1, &in);
+ prepare_for_coarsening_and_refinement(all_in);
+ }
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ interpolate (std::vector<VECTOR*> &all_out)
+ {
+ Assert(input_vectors.size()==all_out.size(),
+ ExcDimensionMismatch(input_vectors.size(), all_out.size()) );
+
+//TODO: casting away constness is bad
+ parallel::distributed::Triangulation<dim> * tria
+ = (dynamic_cast<parallel::distributed::Triangulation<dim>*>
+ (const_cast<dealii::Triangulation<dim>*>
+ (&dof_handler->get_tria())));
+ Assert (tria != 0, ExcInternalError());
+
+ tria->notify_ready_to_unpack(offset,
+ std_cxx1x::bind(&SolutionTransfer<dim, VECTOR, DH>::unpack_callback,
+ this,
+ _1,
+ _2,
+ _3,
+ std_cxx1x::ref(all_out)));
+
+ for (typename std::vector<VECTOR*>::iterator it=all_out.begin();
+ it !=all_out.end();
+ ++it)
+ compress_vector_insert(*(*it));
+
+ input_vectors.clear();
+ }
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ interpolate (VECTOR &out)
+ {
+ std::vector<VECTOR*> all_out(1, &out);
+ interpolate(all_out);
+ }
+
+
+
+ template<int dim, typename VECTOR, class DH>
+ unsigned int
+ SolutionTransfer<dim, VECTOR, DH>::
+ get_data_size() const
+ {
+ return sizeof(double)* DoFTools::max_dofs_per_cell(*dof_handler);
+ }
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ pack_callback(const typename Triangulation<dim,dim>::cell_iterator & cell_,
+ const typename Triangulation<dim,dim>::CellStatus /*status*/,
+ void* data)
+ {
+ double *data_store = reinterpret_cast<double *>(data);
+
+ typename DH::cell_iterator cell(&dof_handler->get_tria(), cell_->level(), cell_->index(), dof_handler);
+
+ const unsigned int dofs_per_cell=cell->get_fe().dofs_per_cell;
+ Vector<double> dofvalues(dofs_per_cell);
+ for (typename std::vector<const VECTOR*>::iterator it=input_vectors.begin();
+ it !=input_vectors.end();
+ ++it)
+ {
+ cell->get_interpolated_dof_values(*(*it), dofvalues);
+ std::memcpy(data_store, &dofvalues(0), sizeof(double)*dofs_per_cell);
+ data_store += dofs_per_cell;
+ }
+ }
+
+
+ template<int dim, typename VECTOR, class DH>
+ void
+ SolutionTransfer<dim, VECTOR, DH>::
+ unpack_callback(const typename Triangulation<dim,dim>::cell_iterator & cell_,
+ const typename Triangulation<dim,dim>::CellStatus /*status*/,
+ const void* data,
+ std::vector<VECTOR*> &all_out)
+ {
+ typename DH::cell_iterator
+ cell(&dof_handler->get_tria(), cell_->level(), cell_->index(), dof_handler);
+
+ const unsigned int dofs_per_cell=cell->get_fe().dofs_per_cell;
+ Vector<double> dofvalues(dofs_per_cell);
+ const double *data_store = reinterpret_cast<const double *>(data);
+
+ for (typename std::vector<VECTOR*>::iterator it = all_out.begin();
+ it != all_out.end();
+ ++it)
+ {
+ std::memcpy(&dofvalues(0), data_store, sizeof(double)*dofs_per_cell);
+ cell->set_dof_values_by_interpolation(dofvalues, *(*it));
+ data_store += dofs_per_cell;
+ }
+ }
+
+
+ }
+}
+
+
+// explicit instantiations
+
+namespace parallel
+{
+ namespace distributed
+ {
+#if deal_II_dimension > 1
+ template class SolutionTransfer<deal_II_dimension,Vector<double>, DoFHandler<deal_II_dimension> >;
+
+
+#ifdef DEAL_II_USE_PETSC
+ template class SolutionTransfer<deal_II_dimension, PETScWrappers::Vector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, PETScWrappers::BlockVector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, PETScWrappers::MPI::Vector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, PETScWrappers::MPI::BlockVector, DoFHandler<deal_II_dimension> >;
+
+#endif
+
+#ifdef DEAL_II_USE_TRILINOS
+ template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::Vector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::BlockVector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::MPI::Vector, DoFHandler<deal_II_dimension> >;
+
+ template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::MPI::BlockVector, DoFHandler<deal_II_dimension> >;
+#endif
+#endif
+ }
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+#include <base/utilities.h>
+#include <base/memory_consumption.h>
+#include <base/logstream.h>
+#include <lac/sparsity_tools.h>
+#include <lac/sparsity_pattern.h>
+#include <grid/tria.h>
+#include <grid/tria_accessor.h>
+#include <grid/tria_iterator.h>
+#include <distributed/tria.h>
+#include <grid/grid_tools.h>
+
+#ifdef DEAL_II_USE_P4EST
+# include <p4est_bits.h>
+# include <p4est_extended.h>
+# include <p4est_vtk.h>
+# include <p4est_ghost.h>
+
+# include <p8est_bits.h>
+# include <p8est_extended.h>
+# include <p8est_vtk.h>
+# include <p8est_ghost.h>
+#endif
+
+#include <algorithm>
+#include <numeric>
+
+#include <base/timer.h>
+
+# include <iostream>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+#ifdef DEAL_II_USE_P4EST
+
+namespace internal
+{
+ namespace p4est
+ {
+ /**
+ * A structure whose explicit
+ * specializations contain
+ * pointers to the relevant
+ * p4est_* and p8est_*
+ * functions. Using this
+ * structure, for example by
+ * saying
+ * functions<dim>::quadrant_compare,
+ * we can write code in a
+ * dimension independent way,
+ * either calling
+ * p4est_quadrant_compare or
+ * p8est_quadrant_compare,
+ * depending on template
+ * argument.
+ */
+ template <int dim> struct functions;
+
+ template <> struct functions<2>
+ {
+ static
+ int (&quadrant_compare) (const void *v1, const void *v2);
+
+ static
+ void (&quadrant_childrenv) (const types<2>::quadrant * q,
+ types<2>::quadrant c[]);
+
+ static
+ int (&quadrant_overlaps_tree) (types<2>::tree * tree,
+ const types<2>::quadrant * q);
+
+ static
+ void (&quadrant_set_morton) (types<2>::quadrant * quadrant,
+ int level,
+ uint64_t id);
+
+ static
+ int (&quadrant_is_equal) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2);
+
+ static
+ int (&quadrant_is_sibling) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2);
+
+ static
+ int (&quadrant_is_ancestor) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2);
+
+ static
+ types<2>::connectivity * (&connectivity_new) (types<2>::topidx num_vertices,
+ types<2>::topidx num_trees,
+ types<2>::topidx num_corners,
+ types<2>::topidx num_vtt);
+
+ static
+ void (&connectivity_destroy) (p4est_connectivity_t *connectivity);
+
+ static
+ types<2>::forest * (&new_forest) (MPI_Comm mpicomm,
+ types<2>::connectivity * connectivity,
+ types<2>::locidx min_quadrants,
+ int min_level,
+ int fill_uniform,
+ size_t data_size,
+ p4est_init_t init_fn,
+ void *user_pointer);
+
+ static
+ void (&destroy) (types<2>::forest * p4est);
+
+ static
+ void (&refine) (types<2>::forest * p4est,
+ int refine_recursive,
+ p4est_refine_t refine_fn,
+ p4est_init_t init_fn);
+
+ static
+ void (&coarsen) (types<2>::forest * p4est,
+ int coarsen_recursive,
+ p4est_coarsen_t coarsen_fn,
+ p4est_init_t init_fn);
+
+ static
+ void (&balance) (types<2>::forest * p4est,
+ types<2>::balance_type btype,
+ p4est_init_t init_fn);
+
+ static
+ void (&partition) (types<2>::forest * p4est,
+ int partition_for_coarsening,
+ p4est_weight_t weight_fn);
+
+ static
+ void (&save) (const char *filename,
+ types<2>::forest * p4est,
+ int save_data);
+
+ static
+ types<2>::forest * (&load) (const char *filename,
+ MPI_Comm mpicomm,
+ std::size_t data_size,
+ int load_data,
+ void *user_pointer,
+ types<2>::connectivity ** p4est);
+
+ static
+ void (&connectivity_save) (const char *filename,
+ types<2>::connectivity * connectivity);
+
+ static
+ types<2>::connectivity * (&connectivity_load) (const char *filename,
+ long *length);
+
+ static
+ unsigned (&checksum) (types<2>::forest * p4est);
+
+ static
+ void (&vtk_write_file) (types<2>::forest * p4est,
+ p4est_geometry_t*,
+ const char *baseName);
+
+ static
+ types<2>::ghost* (&ghost_new) (types<2>::forest * p4est,
+ types<2>::balance_type btype);
+
+ static
+ void (&ghost_destroy) (types<2>::ghost * ghost);
+
+ static
+ void (&reset_data) (types<2>::forest * p4est,
+ size_t data_size,
+ p4est_init_t init_fn,
+ void *user_pointer);
+
+ static
+ size_t (&forest_memory_used) (types<2>::forest * p4est);
+
+ static
+ size_t (&connectivity_memory_used) (types<2>::connectivity * p4est);
+ };
+
+ int (&functions<2>::quadrant_compare) (const void *v1, const void *v2)
+ = p4est_quadrant_compare;
+
+ void (&functions<2>::quadrant_childrenv) (const types<2>::quadrant * q,
+ types<2>::quadrant c[])
+ = p4est_quadrant_childrenv;
+
+ int (&functions<2>::quadrant_overlaps_tree) (types<2>::tree * tree,
+ const types<2>::quadrant * q)
+ = p4est_quadrant_overlaps_tree;
+
+ void (&functions<2>::quadrant_set_morton) (types<2>::quadrant * quadrant,
+ int level,
+ uint64_t id)
+ = p4est_quadrant_set_morton;
+
+ int (&functions<2>::quadrant_is_equal) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2)
+ = p4est_quadrant_is_equal;
+
+ int (&functions<2>::quadrant_is_sibling) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2)
+ = p4est_quadrant_is_sibling;
+
+ int (&functions<2>::quadrant_is_ancestor) (const types<2>::quadrant * q1,
+ const types<2>::quadrant * q2)
+ = p4est_quadrant_is_ancestor;
+
+ types<2>::connectivity * (&functions<2>::connectivity_new) (types<2>::topidx num_vertices,
+ types<2>::topidx num_trees,
+ types<2>::topidx num_corners,
+ types<2>::topidx num_vtt)
+ = p4est_connectivity_new;
+
+ void (&functions<2>::connectivity_destroy) (p4est_connectivity_t *connectivity)
+ = p4est_connectivity_destroy;
+
+ types<2>::forest * (&functions<2>::new_forest) (MPI_Comm mpicomm,
+ types<2>::connectivity * connectivity,
+ types<2>::locidx min_quadrants,
+ int min_level,
+ int fill_uniform,
+ size_t data_size,
+ p4est_init_t init_fn,
+ void *user_pointer)
+ = p4est_new_ext;
+
+ void (&functions<2>::destroy) (types<2>::forest * p4est)
+ = p4est_destroy;
+
+ void (&functions<2>::refine) (types<2>::forest * p4est,
+ int refine_recursive,
+ p4est_refine_t refine_fn,
+ p4est_init_t init_fn)
+ = p4est_refine;
+
+ void (&functions<2>::coarsen) (types<2>::forest * p4est,
+ int coarsen_recursive,
+ p4est_coarsen_t coarsen_fn,
+ p4est_init_t init_fn)
+ = p4est_coarsen;
+
+ void (&functions<2>::balance) (types<2>::forest * p4est,
+ types<2>::balance_type btype,
+ p4est_init_t init_fn)
+ = p4est_balance;
+
+ void (&functions<2>::partition) (types<2>::forest * p4est,
+ int partition_for_coarsening,
+ p4est_weight_t weight_fn)
+ = p4est_partition_ext;
+
+ void (&functions<2>::save) (const char *filename,
+ types<2>::forest * p4est,
+ int save_data)
+ = p4est_save;
+
+ types<2>::forest *
+ (&functions<2>::load) (const char *filename,
+ MPI_Comm mpicomm,
+ std::size_t data_size,
+ int load_data,
+ void *user_pointer,
+ types<2>::connectivity ** p4est)
+ = p4est_load;
+
+ void (&functions<2>::connectivity_save) (const char *filename,
+ types<2>::connectivity *connectivity)
+ = p4est_connectivity_save;
+
+ types<2>::connectivity *
+ (&functions<2>::connectivity_load) (const char *filename,
+ long *length)
+ = p4est_connectivity_load;
+
+ unsigned (&functions<2>::checksum) (types<2>::forest * p4est)
+ = p4est_checksum;
+
+ void (&functions<2>::vtk_write_file) (types<2>::forest * p4est,
+ p4est_geometry_t*,
+ const char *baseName)
+ = p4est_vtk_write_file;
+
+ types<2>::ghost* (&functions<2>::ghost_new) (types<2>::forest * p4est,
+ types<2>::balance_type btype)
+ = p4est_ghost_new;
+
+ void (&functions<2>::ghost_destroy) (types<2>::ghost * ghost)
+ = p4est_ghost_destroy;
+
+ void (&functions<2>::reset_data) (types<2>::forest * p4est,
+ size_t data_size,
+ p4est_init_t init_fn,
+ void *user_pointer)
+ = p4est_reset_data;
+
+ size_t (&functions<2>::forest_memory_used) (types<2>::forest * p4est)
+ = p4est_memory_used;
+
+ size_t (&functions<2>::connectivity_memory_used) (types<2>::connectivity * p4est)
+ = p4est_connectivity_memory_used;
+
+ template <> struct functions<3>
+ {
+ static
+ int (&quadrant_compare) (const void *v1, const void *v2);
+
+ static
+ void (&quadrant_childrenv) (const types<3>::quadrant * q,
+ types<3>::quadrant c[]);
+
+ static
+ int (&quadrant_overlaps_tree) (types<3>::tree * tree,
+ const types<3>::quadrant * q);
+
+ static
+ void (&quadrant_set_morton) (types<3>::quadrant * quadrant,
+ int level,
+ uint64_t id);
+
+ static
+ int (&quadrant_is_equal) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2);
+
+ static
+ int (&quadrant_is_sibling) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2);
+
+ static
+ int (&quadrant_is_ancestor) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2);
+
+ static
+ types<3>::connectivity * (&connectivity_new) (types<3>::topidx num_vertices,
+ types<3>::topidx num_trees,
+ types<3>::topidx num_edges,
+ types<3>::topidx num_ett,
+ types<3>::topidx num_corners,
+ types<3>::topidx num_ctt);
+
+ static
+ void (&connectivity_destroy) (p8est_connectivity_t *connectivity);
+
+ static
+ types<3>::forest * (&new_forest) (MPI_Comm mpicomm,
+ types<3>::connectivity * connectivity,
+ types<3>::locidx min_quadrants,
+ int min_level,
+ int fill_uniform,
+ size_t data_size,
+ p8est_init_t init_fn,
+ void *user_pointer);
+
+ static
+ void (&destroy) (types<3>::forest * p8est);
+
+ static
+ void (&refine) (types<3>::forest * p8est,
+ int refine_recursive,
+ p8est_refine_t refine_fn,
+ p8est_init_t init_fn);
+
+ static
+ void (&coarsen) (types<3>::forest * p8est,
+ int coarsen_recursive,
+ p8est_coarsen_t coarsen_fn,
+ p8est_init_t init_fn);
+
+ static
+ void (&balance) (types<3>::forest * p8est,
+ types<3>::balance_type btype,
+ p8est_init_t init_fn);
+
+ static
+ void (&partition) (types<3>::forest * p8est,
+ int partition_for_coarsening,
+ p8est_weight_t weight_fn);
+
+ static
+ void (&save) (const char *filename,
+ types<3>::forest * p4est,
+ int save_data);
+
+ static
+ types<3>::forest * (&load) (const char *filename,
+ MPI_Comm mpicomm,
+ std::size_t data_size,
+ int load_data,
+ void *user_pointer,
+ types<3>::connectivity ** p4est);
+
+ static
+ void (&connectivity_save) (const char *filename,
+ types<3>::connectivity * connectivity);
+
+ static
+ types<3>::connectivity * (&connectivity_load) (const char *filename,
+ long *length);
+
+ static
+ unsigned (&checksum) (types<3>::forest * p8est);
+
+ static
+ void (&vtk_write_file) (types<3>::forest * p8est,
+ p8est_geometry_t*,
+ const char *baseName);
+
+ static
+ types<3>::ghost* (&ghost_new) (types<3>::forest * p4est,
+ types<3>::balance_type btype);
+
+ static
+ void (&ghost_destroy) (types<3>::ghost * ghost);
+
+ static
+ void (&reset_data) (types<3>::forest * p4est,
+ size_t data_size,
+ p8est_init_t init_fn,
+ void *user_pointer);
+
+ static
+ size_t (&forest_memory_used) (types<3>::forest * p4est);
+
+ static
+ size_t (&connectivity_memory_used) (types<3>::connectivity * p4est);
+ };
+
+ int (&functions<3>::quadrant_compare) (const void *v1, const void *v2)
+ = p8est_quadrant_compare;
+
+ void (&functions<3>::quadrant_childrenv) (const types<3>::quadrant * q,
+ types<3>::quadrant c[])
+ = p8est_quadrant_childrenv;
+
+ int (&functions<3>::quadrant_overlaps_tree) (types<3>::tree * tree,
+ const types<3>::quadrant * q)
+ = p8est_quadrant_overlaps_tree;
+
+ void (&functions<3>::quadrant_set_morton) (types<3>::quadrant * quadrant,
+ int level,
+ uint64_t id)
+ = p8est_quadrant_set_morton;
+
+ int (&functions<3>::quadrant_is_equal) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2)
+ = p8est_quadrant_is_equal;
+
+ int (&functions<3>::quadrant_is_sibling) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2)
+ = p8est_quadrant_is_sibling;
+
+ int (&functions<3>::quadrant_is_ancestor) (const types<3>::quadrant * q1,
+ const types<3>::quadrant * q2)
+ = p8est_quadrant_is_ancestor;
+
+ types<3>::connectivity * (&functions<3>::connectivity_new) (types<3>::topidx num_vertices,
+ types<3>::topidx num_trees,
+ types<3>::topidx num_edges,
+ types<3>::topidx num_ett,
+ types<3>::topidx num_corners,
+ types<3>::topidx num_ctt)
+ = p8est_connectivity_new;
+
+ void (&functions<3>::connectivity_destroy) (p8est_connectivity_t *connectivity)
+ = p8est_connectivity_destroy;
+
+ types<3>::forest * (&functions<3>::new_forest) (MPI_Comm mpicomm,
+ types<3>::connectivity * connectivity,
+ types<3>::locidx min_quadrants,
+ int min_level,
+ int fill_uniform,
+ size_t data_size,
+ p8est_init_t init_fn,
+ void *user_pointer)
+ = p8est_new_ext;
+
+ void (&functions<3>::destroy) (types<3>::forest * p8est)
+ = p8est_destroy;
+
+ void (&functions<3>::refine) (types<3>::forest * p8est,
+ int refine_recursive,
+ p8est_refine_t refine_fn,
+ p8est_init_t init_fn)
+ = p8est_refine;
+
+ void (&functions<3>::coarsen) (types<3>::forest * p8est,
+ int coarsen_recursive,
+ p8est_coarsen_t coarsen_fn,
+ p8est_init_t init_fn)
+ = p8est_coarsen;
+
+ void (&functions<3>::balance) (types<3>::forest * p8est,
+ types<3>::balance_type btype,
+ p8est_init_t init_fn)
+ = p8est_balance;
+
+ void (&functions<3>::partition) (types<3>::forest * p8est,
+ int partition_for_coarsening,
+ p8est_weight_t weight_fn)
+ = p8est_partition_ext;
+
+ void (&functions<3>::save) (const char *filename,
+ types<3>::forest * p4est,
+ int save_data)
+ = p8est_save;
+
+ types<3>::forest *
+ (&functions<3>::load) (const char *filename,
+ MPI_Comm mpicomm,
+ std::size_t data_size,
+ int load_data,
+ void *user_pointer,
+ types<3>::connectivity ** p4est)
+ = p8est_load;
+
+ void (&functions<3>::connectivity_save) (const char *filename,
+ types<3>::connectivity *connectivity)
+ = p8est_connectivity_save;
+
+ types<3>::connectivity *
+ (&functions<3>::connectivity_load) (const char *filename,
+ long *length)
+ = p8est_connectivity_load;
+
+ unsigned (&functions<3>::checksum) (types<3>::forest * p8est)
+ = p8est_checksum;
+
+ void (&functions<3>::vtk_write_file) (types<3>::forest * p8est,
+ p8est_geometry_t*,
+ const char *baseName)
+ = p8est_vtk_write_file;
+
+ types<3>::ghost* (&functions<3>::ghost_new) (types<3>::forest * p4est,
+ types<3>::balance_type btype)
+ = p8est_ghost_new;
+
+ void (&functions<3>::ghost_destroy) (types<3>::ghost * ghost)
+ = p8est_ghost_destroy;
+
+ void (&functions<3>::reset_data) (types<3>::forest * p4est,
+ size_t data_size,
+ p8est_init_t init_fn,
+ void *user_pointer)
+ = p8est_reset_data;
+
+ size_t (&functions<3>::forest_memory_used) (types<3>::forest * p4est)
+ = p8est_memory_used;
+
+ size_t (&functions<3>::connectivity_memory_used) (types<3>::connectivity * p4est)
+ = p8est_connectivity_memory_used;
+
+
+
+
+ template <int dim>
+ void
+ init_quadrant_children
+ (const typename types<dim>::quadrant & p4est_cell,
+ typename types<dim>::quadrant (&p4est_children)[GeometryInfo<dim>::max_children_per_cell])
+ {
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_children[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_children[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ functions<dim>::quadrant_childrenv (&p4est_cell,
+ p4est_children);
+
+ }
+
+
+ template <int dim>
+ void
+ init_coarse_quadrant(typename types<dim>::quadrant & quad)
+ {
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&quad);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&quad);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+ functions<dim>::quadrant_set_morton (&quad,
+ /*level=*/0,
+ /*index=*/0);
+ }
+
+
+ template <int dim>
+ bool
+ quadrant_is_equal (const typename types<dim>::quadrant & q1,
+ const typename types<dim>::quadrant & q2)
+ {
+ return functions<dim>::quadrant_is_equal(&q1, &q2);
+ }
+
+
+
+ template <int dim>
+ bool
+ quadrant_is_ancestor (const typename types<dim>::quadrant & q1,
+ const typename types<dim>::quadrant & q2)
+ {
+ return functions<dim>::quadrant_is_ancestor(&q1, &q2);
+ }
+ }
+}
+
+
+namespace
+{
+ template <int dim, int spacedim>
+ void
+ get_vertex_to_cell_mappings (const Triangulation<dim,spacedim> &triangulation,
+ std::vector<unsigned int> &vertex_touch_count,
+ std::vector<std::list<
+ std::pair<typename Triangulation<dim,spacedim>::active_cell_iterator,unsigned int> > >
+ & vertex_to_cell)
+ {
+ vertex_touch_count.resize (triangulation.n_vertices());
+ vertex_to_cell.resize (triangulation.n_vertices());
+
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ {
+ ++vertex_touch_count[cell->vertex_index(v)];
+ vertex_to_cell[cell->vertex_index(v)]
+ .push_back (std::make_pair (cell, v));
+ }
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ get_edge_to_cell_mappings (const Triangulation<dim,spacedim> &triangulation,
+ std::vector<unsigned int> &edge_touch_count,
+ std::vector<std::list<
+ std::pair<typename Triangulation<dim,spacedim>::active_cell_iterator,unsigned int> > >
+ & edge_to_cell)
+ {
+ Assert (triangulation.n_levels() == 1, ExcInternalError());
+
+ edge_touch_count.resize (triangulation.n_active_lines());
+ edge_to_cell.resize (triangulation.n_active_lines());
+
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ for (unsigned int l=0; l<GeometryInfo<dim>::lines_per_cell; ++l)
+ {
+ ++edge_touch_count[cell->line(l)->index()];
+ edge_to_cell[cell->line(l)->index()]
+ .push_back (std::make_pair (cell, l));
+ }
+ }
+
+
+
+ /**
+ * Set all vertex and cell
+ * related information in the
+ * p4est connectivity structure.
+ */
+ template <int dim, int spacedim>
+ void
+ set_vertex_and_cell_info (const Triangulation<dim,spacedim> &triangulation,
+ const std::vector<unsigned int> &vertex_touch_count,
+ const std::vector<std::list<
+ std::pair<typename Triangulation<dim,spacedim>::active_cell_iterator,unsigned int> > >
+ & vertex_to_cell,
+ const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
+ const bool set_vertex_info,
+ typename internal::p4est::types<dim>::connectivity *connectivity)
+ {
+ // copy the vertices into the
+ // connectivity structure. the
+ // triangulation exports the array of
+ // vertices, but some of the entries are
+ // sometimes unused; this shouldn't be
+ // the case for a newly created
+ // triangulation, but make sure
+ //
+ // note that p4est stores coordinates as
+ // a triplet of values even in 2d
+ Assert (triangulation.get_used_vertices().size() ==
+ triangulation.get_vertices().size(),
+ ExcInternalError());
+ Assert (std::find (triangulation.get_used_vertices().begin(),
+ triangulation.get_used_vertices().end(),
+ false)
+ == triangulation.get_used_vertices().end(),
+ ExcInternalError());
+ if (set_vertex_info == true)
+ for (unsigned int v=0; v<triangulation.n_vertices(); ++v)
+ {
+ connectivity->vertices[3*v ] = triangulation.get_vertices()[v][0];
+ connectivity->vertices[3*v+1] = triangulation.get_vertices()[v][1];
+ connectivity->vertices[3*v+2] = (spacedim == 2 ?
+ 0
+ :
+ triangulation.get_vertices()[v][2]);
+ }
+
+ // next store the tree_to_vertex indices
+ // (each tree is here only a single cell
+ // in the coarse mesh). p4est requires
+ // vertex numbering in clockwise
+ // orientation
+ //
+ // while we're at it, also copy the
+ // neighborship information between cells
+ typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active(),
+ endc = triangulation.end();
+ for (; cell != endc; ++cell)
+ {
+ const unsigned int
+ index = coarse_cell_to_p4est_tree_permutation[cell->index()];
+
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ {
+ if (set_vertex_info == true)
+ connectivity->tree_to_vertex[index*GeometryInfo<dim>::vertices_per_cell+v] = cell->vertex_index(v);
+ connectivity->tree_to_corner[index*GeometryInfo<dim>::vertices_per_cell+v] = cell->vertex_index(v);
+ }
+
+ // neighborship information. if a
+ // cell is at a boundary, then enter
+ // the index of the cell itself here
+ for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+ if (cell->face(f)->at_boundary() == false)
+ connectivity->tree_to_tree[index*GeometryInfo<dim>::faces_per_cell + f]
+ = coarse_cell_to_p4est_tree_permutation[cell->neighbor(f)->index()];
+ else
+ connectivity->tree_to_tree[index*GeometryInfo<dim>::faces_per_cell + f]
+ = coarse_cell_to_p4est_tree_permutation[cell->index()];
+
+ // fill tree_to_face, which is
+ // essentially neighbor_to_neighbor;
+ // however, we have to remap the
+ // resulting face number as well
+ for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+ if (cell->face(f)->at_boundary() == false)
+ {
+ switch (dim)
+ {
+ case 2:
+ {
+ connectivity->tree_to_face[index*GeometryInfo<dim>::faces_per_cell + f]
+ = cell->neighbor_of_neighbor (f);
+ break;
+ }
+
+ case 3:
+ {
+ /*
+ * The values for
+ * tree_to_face are in
+ * 0..23 where ttf % 6
+ * gives the face
+ * number and ttf / 6
+ * the face orientation
+ * code. The
+ * orientation is
+ * determined as
+ * follows. Let
+ * my_face and
+ * other_face be the
+ * two face numbers of
+ * the connecting trees
+ * in 0..5. Then the
+ * first face vertex of
+ * the lower of my_face
+ * and other_face
+ * connects to a face
+ * vertex numbered 0..3
+ * in the higher of
+ * my_face and
+ * other_face. The
+ * face orientation is
+ * defined as this
+ * number. If my_face
+ * == other_face,
+ * treating either of
+ * both faces as the
+ * lower one leads to
+ * the same result.
+ */
+
+ connectivity->tree_to_face[index*6 + f]
+ = cell->neighbor_of_neighbor (f);
+
+ unsigned int face_idx_list[2] =
+ {f, cell->neighbor_of_neighbor (f)};
+ typename Triangulation<dim>::active_cell_iterator
+ cell_list[2] = {cell, cell->neighbor(f)};
+ unsigned int smaller_idx = 0;
+
+ if (f>cell->neighbor_of_neighbor (f))
+ smaller_idx = 1;
+
+ unsigned larger_idx = (smaller_idx+1) % 2;
+ //smaller = *_list[smaller_idx]
+ //larger = *_list[larger_idx]
+
+ unsigned int v = 0;
+
+ // global vertex index of
+ // vertex 0 on face of
+ // cell with smaller
+ // local face index
+ unsigned int g_idx =
+ cell_list[smaller_idx]->vertex_index(
+ GeometryInfo<dim>::face_to_cell_vertices(
+ face_idx_list[smaller_idx],
+ 0,
+ cell_list[smaller_idx]->face_orientation(face_idx_list[smaller_idx]),
+ cell_list[smaller_idx]->face_flip(face_idx_list[smaller_idx]),
+ cell_list[smaller_idx]->face_rotation(face_idx_list[smaller_idx]))
+ );
+
+ // loop over vertices on
+ // face from other cell
+ // and compare global
+ // vertex numbers
+ for (unsigned int i=0; i<GeometryInfo<dim>::vertices_per_face; ++i)
+ {
+ unsigned int idx
+ =
+ cell_list[larger_idx]->vertex_index(
+ GeometryInfo<dim>::face_to_cell_vertices(
+ face_idx_list[larger_idx],
+ i)
+ );
+
+ if (idx==g_idx)
+ {
+ v = i;
+ break;
+ }
+ }
+
+ connectivity->tree_to_face[index*6 + f] += 6*v;
+ break;
+ }
+
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+ }
+ else
+ connectivity->tree_to_face[index*GeometryInfo<dim>::faces_per_cell + f] = f;
+ }
+
+ // now fill the vertex information
+ connectivity->ctt_offset[0] = 0;
+ std::partial_sum (vertex_touch_count.begin(),
+ vertex_touch_count.end(),
+ &connectivity->ctt_offset[1]);
+
+ const typename internal::p4est::types<dim>::locidx
+ num_vtt = std::accumulate (vertex_touch_count.begin(),
+ vertex_touch_count.end(),
+ 0);
+ Assert (connectivity->ctt_offset[triangulation.n_vertices()] ==
+ num_vtt,
+ ExcInternalError());
+
+ for (unsigned int v=0; v<triangulation.n_vertices(); ++v)
+ {
+ Assert (vertex_to_cell[v].size() == vertex_touch_count[v],
+ ExcInternalError());
+
+ typename std::list<std::pair
+ <typename Triangulation<dim,spacedim>::active_cell_iterator,
+ unsigned int> >::const_iterator
+ p = vertex_to_cell[v].begin();
+ for (unsigned int c=0; c<vertex_touch_count[v]; ++c, ++p)
+ {
+ connectivity->corner_to_tree[connectivity->ctt_offset[v]+c]
+ = coarse_cell_to_p4est_tree_permutation[p->first->index()];
+ connectivity->corner_to_corner[connectivity->ctt_offset[v]+c]
+ = p->second;
+ }
+ }
+ }
+
+
+
+ template <int dim, int spacedim>
+ bool
+ tree_exists_locally (const typename internal::p4est::types<dim>::forest *parallel_forest,
+ const typename internal::p4est::types<dim>::topidx coarse_grid_cell)
+ {
+ Assert (coarse_grid_cell < parallel_forest->connectivity->num_trees,
+ ExcInternalError());
+ return ((coarse_grid_cell >= parallel_forest->first_local_tree)
+ &&
+ (coarse_grid_cell <= parallel_forest->last_local_tree));
+ }
+
+
+ template <int dim, int spacedim>
+ void
+ delete_all_children_and_self (const typename Triangulation<dim,spacedim>::cell_iterator &cell)
+ {
+ if (cell->has_children())
+ for (unsigned int c=0; c<cell->n_children(); ++c)
+ delete_all_children_and_self<dim,spacedim> (cell->child(c));
+ else
+ cell->set_coarsen_flag ();
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ delete_all_children (const typename Triangulation<dim,spacedim>::cell_iterator &cell)
+ {
+ if (cell->has_children())
+ for (unsigned int c=0; c<cell->n_children(); ++c)
+ delete_all_children_and_self<dim,spacedim> (cell->child(c));
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ match_tree_recursively (const typename internal::p4est::types<dim>::tree &tree,
+ const typename Triangulation<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const typename internal::p4est::types<dim>::forest &forest,
+ const types::subdomain_id_t my_subdomain)
+ {
+ // check if this cell exists in
+ // the local p4est cell
+ if (sc_array_bsearch(const_cast<sc_array_t*>(&tree.quadrants),
+ &p4est_cell,
+ internal::p4est::functions<dim>::quadrant_compare)
+ != -1)
+ {
+ // yes, cell found in local part of p4est
+ delete_all_children<dim,spacedim> (dealii_cell);
+ dealii_cell->set_subdomain_id(my_subdomain);
+ }
+ else
+ {
+ // no, cell not found in
+ // local part of
+ // p4est. this means that
+ // the local part is more
+ // refined than the current
+ // cell. if this cell has
+ // no children of its own,
+ // we need to refine it,
+ // and if it does already
+ // have children then loop
+ // over all children and
+ // see if they are locally
+ // available as well
+ if (dealii_cell->has_children () == false)
+ dealii_cell->set_refine_flag ();
+ else
+ {
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell,
+ p4est_child);
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ if (internal::p4est::functions<dim>::
+ quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree*>(&tree),
+ &p4est_child[c])
+ == false)
+ {
+ // no, this child
+ // is locally not
+ // available in
+ // the p4est.
+ // delete all
+ // its children
+ // but, because this
+ // may not be successfull,
+ // make sure to mark all
+ // children recursively as
+ // not local.
+ delete_all_children<dim,spacedim> (dealii_cell->child(c));
+ dealii_cell->child(c)
+ ->recursively_set_subdomain_id(types::artificial_subdomain_id);
+ }
+ else
+ {
+ // at least some
+ // part of the
+ // tree rooted in
+ // this child is
+ // locally
+ // available
+ match_tree_recursively<dim,spacedim> (tree,
+ dealii_cell->child(c),
+ p4est_child[c],
+ forest,
+ my_subdomain);
+ }
+ }
+ }
+ }
+
+
+ template <int dim, int spacedim>
+ void
+ match_quadrant_recursively (const typename internal::p4est::types<dim>::quadrant &this_quadrant,
+ const typename Triangulation<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename internal::p4est::types<dim>::quadrant &ghost_quadrant,
+ const typename internal::p4est::types<dim>::forest &forest,
+ unsigned int ghost_owner)
+ {
+ if (internal::p4est::functions<dim>::quadrant_is_equal(&this_quadrant, &ghost_quadrant))
+ {
+ // this is the ghostcell
+ dealii_cell->set_subdomain_id(ghost_owner);
+
+ if (dealii_cell->has_children())
+ delete_all_children<dim,spacedim> (dealii_cell);
+ else
+ dealii_cell->clear_coarsen_flag();
+ return;
+ }
+
+ if (! internal::p4est::functions<dim>::quadrant_is_ancestor ( &this_quadrant, &ghost_quadrant))
+ {
+ return;
+ }
+
+ if (dealii_cell->has_children () == false)
+ {
+ dealii_cell->clear_coarsen_flag();
+ dealii_cell->set_refine_flag ();
+ return;
+ }
+
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&this_quadrant, p4est_child);
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+
+ {
+ match_quadrant_recursively<dim,spacedim> (p4est_child[c], dealii_cell->child(c), ghost_quadrant, forest, ghost_owner);
+ }
+
+
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ attach_mesh_data_recursively (const typename internal::p4est::types<dim>::tree &tree,
+ const typename Triangulation<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const typename std::list<std::pair<unsigned int, typename std_cxx1x::function<
+ void(typename parallel::distributed::Triangulation<dim,spacedim>::cell_iterator,
+ typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus,
+ void*)
+ > > > &attached_data_pack_callbacks)
+ {
+ typedef std::list<std::pair<unsigned int, typename std_cxx1x::function<
+ void(typename parallel::distributed::Triangulation<dim,spacedim>::cell_iterator,
+ typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus,
+ void*)
+ > > > callback_list_t;
+
+ int idx = sc_array_bsearch(const_cast<sc_array_t*>(&tree.quadrants),
+ &p4est_cell,
+ internal::p4est::functions<dim>::quadrant_compare);
+
+ if (idx == -1 && (internal::p4est::functions<dim>::
+ quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree*>(&tree),
+ &p4est_cell)
+ == false))
+ return; //this quadrant and none of it's childs belongs to us.
+
+ bool p4est_has_children = (idx == -1);
+
+ if (p4est_has_children && dealii_cell->has_children())
+ {
+ //recurse further
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell, p4est_child);
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ attach_mesh_data_recursively<dim,spacedim> (tree,
+ dealii_cell->child(c),
+ p4est_child[c],
+ attached_data_pack_callbacks);
+ }
+ }
+ else if (!p4est_has_children && !dealii_cell->has_children())
+ {
+ //this active cell didn't change
+ typename internal::p4est::types<dim>::quadrant *q;
+ q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
+ sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST;
+
+ for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
+ {
+ void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
+ ((*it).second)(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST,
+ ptr);
+ }
+ }
+ else if (p4est_has_children)
+ {
+ //this cell got refined
+
+ //attach to the first child, because
+ // we can only attach to active
+ // quadrants
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell, p4est_child);
+ int child0_idx = sc_array_bsearch(const_cast<sc_array_t*>(&tree.quadrants),
+ &p4est_child[0],
+ internal::p4est::functions<dim>::quadrant_compare);
+ Assert(child0_idx != -1, ExcMessage("the first child should exist as an active quadrant!"));
+
+ typename internal::p4est::types<dim>::quadrant *q;
+ q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
+ sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), child0_idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE;
+
+ for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
+ {
+ void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
+
+ ((*it).second)(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE,
+ ptr);
+ }
+
+ //mark other childs as invalid, so that unpack only happens once
+ for (unsigned int i=1;i<GeometryInfo<dim>::max_children_per_cell; ++i)
+ {
+ int child_idx = sc_array_bsearch(const_cast<sc_array_t*>(&tree.quadrants),
+ &p4est_child[i],
+ internal::p4est::functions<dim>::quadrant_compare);
+ q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
+ sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), child_idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_INVALID;
+ }
+
+
+ }
+ else
+ {
+ //it's children got coarsened into
+ //this cell
+ typename internal::p4est::types<dim>::quadrant *q;
+ q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
+ sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
+ );
+ *static_cast<typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN;
+
+ for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin();
+ it != attached_data_pack_callbacks.end();
+ ++it)
+ {
+ void * ptr = static_cast<char*>(q->p.user_data) + (*it).first; //add offset
+ ((*it).second)(dealii_cell,
+ parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN,
+ ptr);
+ }
+ }
+ }
+
+
+ template <int dim, int spacedim>
+ void
+ post_mesh_data_recursively (const typename internal::p4est::types<dim>::tree &tree,
+ const typename Triangulation<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename Triangulation<dim,spacedim>::cell_iterator &parent_cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const unsigned int offset,
+ const typename std_cxx1x::function<
+ void(typename parallel::distributed::Triangulation<dim,spacedim>::cell_iterator, typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus, void*)
+ > &unpack_callback)
+ {
+ int idx = sc_array_bsearch(const_cast<sc_array_t*>(&tree.quadrants),
+ &p4est_cell,
+ internal::p4est::functions<dim>::quadrant_compare);
+ if (idx == -1 && (internal::p4est::functions<dim>::
+ quadrant_overlaps_tree (const_cast<typename internal::p4est::types<dim>::tree*>(&tree),
+ &p4est_cell)
+ == false))
+ // this quadrant and none of
+ // it's children belong to us.
+ return;
+
+
+ const bool p4est_has_children = (idx == -1);
+ if (p4est_has_children)
+ {
+ Assert(dealii_cell->has_children(), ExcInternalError());
+
+ //recurse further
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell, p4est_child);
+
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ post_mesh_data_recursively<dim,spacedim> (tree,
+ dealii_cell->child(c),
+ dealii_cell,
+ p4est_child[c],
+ offset,
+ unpack_callback);
+ }
+ }
+ else
+ {
+ Assert(! dealii_cell->has_children(), ExcInternalError());
+
+ typename internal::p4est::types<dim>::quadrant *q;
+ q = static_cast<typename internal::p4est::types<dim>::quadrant*> (
+ sc_array_index (const_cast<sc_array_t*>(&tree.quadrants), idx)
+ );
+
+ void * ptr = static_cast<char*>(q->p.user_data) + offset;
+ typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus
+ status = * static_cast<
+ typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*
+ >(q->p.user_data);
+ switch (status)
+ {
+ case parallel::distributed::Triangulation<dim,spacedim>::CELL_PERSIST:
+ {
+ unpack_callback(dealii_cell, status, ptr);
+ break;
+ }
+ case parallel::distributed::Triangulation<dim,spacedim>::CELL_REFINE:
+ {
+ unpack_callback(parent_cell, status, ptr);
+ break;
+ }
+ case parallel::distributed::Triangulation<dim,spacedim>::CELL_COARSEN:
+ {
+ unpack_callback(dealii_cell, status, ptr);
+ break;
+ }
+ case parallel::distributed::Triangulation<dim,spacedim>::CELL_INVALID:
+ {
+ break;
+ }
+ default:
+ throw ExcInternalError();
+ }
+ }
+ }
+
+
+
+ /**
+ * A data structure that we use to store
+ * which cells (indicated by
+ * internal::p4est::types<dim>::quadrant objects) shall be
+ * refined and which shall be coarsened.
+ */
+ template <int dim, int spacedim>
+ class RefineAndCoarsenList
+ {
+ public:
+ RefineAndCoarsenList (const Triangulation<dim,spacedim> &triangulation,
+ const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
+ const types::subdomain_id_t my_subdomain,
+ typename internal::p4est::types<dim>::forest &forest);
+
+ /**
+ * A callback function that we
+ * pass to the p4est data
+ * structures when a forest is
+ * to be refined. The p4est
+ * functions call it back with
+ * a tree (the index of the
+ * tree that grows out of a
+ * given coarse cell) and a
+ * refinement path from that
+ * coarse cell to a
+ * terminal/leaf cell. The
+ * function returns whether the
+ * corresponding cell in the
+ * deal.II triangulation has
+ * the refined flag set.
+ */
+ static
+ int
+ refine_callback (typename internal::p4est::types<dim>::forest *forest,
+ typename internal::p4est::types<dim>::topidx coarse_cell_index,
+ typename internal::p4est::types<dim>::quadrant *quadrant);
+
+ /**
+ * Same as the refine_callback
+ * function, but return whether
+ * all four of the given
+ * children of a non-terminal
+ * cell are to be coarsened
+ * away.
+ */
+ static
+ int
+ coarsen_callback (typename internal::p4est::types<dim>::forest *forest,
+ typename internal::p4est::types<dim>::topidx coarse_cell_index,
+ typename internal::p4est::types<dim>::quadrant *children[]);
+
+ bool pointers_are_at_end () const;
+
+ private:
+ std::vector<typename internal::p4est::types<dim>::quadrant> refine_list;
+ typename std::vector<typename internal::p4est::types<dim>::quadrant>::const_iterator current_refine_pointer;
+
+ std::vector<typename internal::p4est::types<dim>::quadrant> coarsen_list;
+ typename std::vector<typename internal::p4est::types<dim>::quadrant>::const_iterator current_coarsen_pointer;
+
+ typename internal::p4est::types<dim>::forest forest;
+
+ void build_lists (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const unsigned int myid);
+ };
+
+
+
+ template <int dim, int spacedim>
+ bool
+ RefineAndCoarsenList<dim,spacedim>::
+ pointers_are_at_end () const
+ {
+ return ((current_refine_pointer == refine_list.end())
+ &&
+ (current_coarsen_pointer == coarsen_list.end()));
+ }
+
+
+
+ template <int dim, int spacedim>
+ RefineAndCoarsenList<dim,spacedim>::
+ RefineAndCoarsenList (const Triangulation<dim,spacedim> &triangulation,
+ const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
+ const types::subdomain_id_t my_subdomain,
+ typename internal::p4est::types<dim>::forest &forest)
+ :
+ forest(forest)
+ {
+ // count how many flags are set and
+ // allocate that much memory
+ unsigned int n_refine_flags = 0,
+ n_coarsen_flags = 0;
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ //skip cells that are not local
+ if (cell->subdomain_id() != my_subdomain)
+ continue;
+
+ if (cell->refine_flag_set())
+ ++n_refine_flags;
+ else if (cell->coarsen_flag_set())
+ ++n_coarsen_flags;
+ }
+
+ refine_list.reserve (n_refine_flags);
+ coarsen_list.reserve (n_coarsen_flags);
+
+
+ // now build the lists of cells that
+ // are flagged. note that p4est will
+ // traverse its cells in the order in
+ // which trees appear in the
+ // forest. this order is not the same
+ // as the order of coarse cells in the
+ // deal.II Triangulation because we
+ // have translated everything by the
+ // coarse_cell_to_p4est_tree_permutation
+ // permutation. in order to make sure
+ // that the output array is already in
+ // the correct order, traverse our
+ // coarse cells in the same order in
+ // which p4est will:
+ for (unsigned int c=0; c<triangulation.n_cells(0); ++c)
+ {
+ unsigned int coarse_cell_index =
+ p4est_tree_to_coarse_cell_permutation[c];
+
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ cell (&triangulation, 0, coarse_cell_index);
+
+ typename internal::p4est::types<dim>::quadrant p4est_cell;
+ internal::p4est::functions<dim>::
+ quadrant_set_morton (&p4est_cell,
+ /*level=*/0,
+ /*index=*/0);
+ p4est_cell.p.which_tree = c;
+ build_lists (cell, p4est_cell, my_subdomain);
+ }
+
+
+ Assert(refine_list.size() == n_refine_flags,
+ ExcInternalError());
+ Assert(coarsen_list.size() == n_coarsen_flags,
+ ExcInternalError());
+
+ // make sure that our ordering in fact
+ // worked
+ for (unsigned int i=1; i<refine_list.size(); ++i)
+ Assert (refine_list[i].p.which_tree >=
+ refine_list[i-1].p.which_tree,
+ ExcInternalError());
+ for (unsigned int i=1; i<coarsen_list.size(); ++i)
+ Assert (coarsen_list[i].p.which_tree >=
+ coarsen_list[i-1].p.which_tree,
+ ExcInternalError());
+
+ current_refine_pointer = refine_list.begin();
+ current_coarsen_pointer = coarsen_list.begin();
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ RefineAndCoarsenList<dim,spacedim>::
+ build_lists (const typename Triangulation<dim,spacedim>::cell_iterator &cell,
+ const typename internal::p4est::types<dim>::quadrant &p4est_cell,
+ const types::subdomain_id_t my_subdomain)
+ {
+ if (!cell->has_children())
+ {
+ if (cell->subdomain_id() == my_subdomain)
+ {
+ if (cell->refine_flag_set())
+ refine_list.push_back (p4est_cell);
+ else if (cell->coarsen_flag_set())
+ coarsen_list.push_back (p4est_cell);
+ }
+ }
+ else
+ {
+ typename internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ switch (dim)
+ {
+ case 2:
+ P4EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ case 3:
+ P8EST_QUADRANT_INIT(&p4est_child[c]);
+ break;
+ default:
+ Assert (false, ExcNotImplemented());
+ }
+ internal::p4est::functions<dim>::
+ quadrant_childrenv (&p4est_cell,
+ p4est_child);
+ for (unsigned int c=0;
+ c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ p4est_child[c].p.which_tree = p4est_cell.p.which_tree;
+ build_lists (cell->child(c),
+ p4est_child[c],
+ my_subdomain);
+ }
+ }
+ }
+
+
+ template <int dim, int spacedim>
+ int
+ RefineAndCoarsenList<dim,spacedim>::
+ refine_callback (typename internal::p4est::types<dim>::forest *forest,
+ typename internal::p4est::types<dim>::topidx coarse_cell_index,
+ typename internal::p4est::types<dim>::quadrant *quadrant)
+ {
+ RefineAndCoarsenList<dim,spacedim> *this_object
+ = reinterpret_cast<RefineAndCoarsenList<dim,spacedim>*>(forest->user_pointer);
+
+ // if there are no more cells in our
+ // list the current cell can't be
+ // flagged for refinement
+ if (this_object->current_refine_pointer == this_object->refine_list.end())
+ return false;
+
+ Assert (coarse_cell_index <=
+ this_object->current_refine_pointer->p.which_tree,
+ ExcInternalError());
+
+ // if p4est hasn't yet reached the tree
+ // of the next flagged cell the current
+ // cell can't be flagged for refinement
+ if (coarse_cell_index <
+ this_object->current_refine_pointer->p.which_tree)
+ return false;
+
+ // now we're in the right tree in the
+ // forest
+ Assert (coarse_cell_index <=
+ this_object->current_refine_pointer->p.which_tree,
+ ExcInternalError());
+
+ // make sure that the p4est loop over
+ // cells hasn't gotten ahead of our own
+ // pointer
+ Assert (internal::p4est::functions<dim>::
+ quadrant_compare (quadrant,
+ &*this_object->current_refine_pointer) <= 0,
+ ExcInternalError());
+
+ // now, if the p4est cell is one in the
+ // list, it is supposed to be refined
+ if (internal::p4est::functions<dim>::
+ quadrant_is_equal (quadrant, &*this_object->current_refine_pointer))
+ {
+ ++this_object->current_refine_pointer;
+ return true;
+ }
+
+ // p4est cell is not in list
+ return false;
+ }
+
+
+
+ template <int dim, int spacedim>
+ int
+ RefineAndCoarsenList<dim,spacedim>::
+ coarsen_callback (typename internal::p4est::types<dim>::forest *forest,
+ typename internal::p4est::types<dim>::topidx coarse_cell_index,
+ typename internal::p4est::types<dim>::quadrant *children[])
+ {
+ RefineAndCoarsenList<dim,spacedim> *this_object
+ = reinterpret_cast<RefineAndCoarsenList<dim,spacedim>*>(forest->user_pointer);
+
+ // if there are no more cells in our
+ // list the current cell can't be
+ // flagged for coarsening
+ if (this_object->current_coarsen_pointer ==
+ this_object->coarsen_list.end())
+ return false;
+
+ Assert (coarse_cell_index <=
+ this_object->current_coarsen_pointer->p.which_tree,
+ ExcInternalError());
+
+ // if p4est hasn't yet reached the tree
+ // of the next flagged cell the current
+ // cell can't be flagged for coarsening
+ if (coarse_cell_index <
+ this_object->current_coarsen_pointer->p.which_tree)
+ return false;
+
+ // now we're in the right tree in the
+ // forest
+ Assert (coarse_cell_index <=
+ this_object->current_coarsen_pointer->p.which_tree,
+ ExcInternalError());
+
+ // make sure that the p4est loop over
+ // cells hasn't gotten ahead of our own
+ // pointer
+ Assert (internal::p4est::functions<dim>::
+ quadrant_compare (children[0],
+ &*this_object->current_coarsen_pointer) <= 0,
+ ExcInternalError());
+
+ // now, if the p4est cell is one in the
+ // list, it is supposed to be coarsened
+ if (internal::p4est::functions<dim>::
+ quadrant_is_equal (children[0],
+ &*this_object->current_coarsen_pointer))
+ {
+ // move current pointer one up
+ ++this_object->current_coarsen_pointer;
+
+ // note that the next 3 cells in
+ // our list need to correspond to
+ // the other siblings of the cell
+ // we have just found
+ for (unsigned int c=1; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ {
+ Assert (internal::p4est::functions<dim>::
+ quadrant_is_equal (children[c],
+ &*this_object->current_coarsen_pointer),
+ ExcInternalError());
+ ++this_object->current_coarsen_pointer;
+ }
+
+ return true;
+ }
+
+ // p4est cell is not in list
+ return false;
+ }
+}
+
+
+// initialize p4est
+namespace internal
+{
+ namespace p4est
+ {
+ struct InitFinalize
+ {
+ private:
+ struct Singleton
+ {
+ Singleton ()
+ {
+ // ensure that the
+ // initialization
+ // code is run only
+ // once, even if we
+ // link with 1d, 2d,
+ // and 3d libraries
+ static bool initialized = false;
+
+ if (initialized == false)
+ {
+ sc_init (MPI_COMM_WORLD,
+ 0, 0, 0, SC_LP_STATISTICS);
+ p4est_init (0, SC_LP_STATISTICS);
+
+ initialized = true;
+ }
+ }
+
+ ~Singleton ()
+ {
+ // same here
+ static bool deinitialized = false;
+
+ if (deinitialized == false)
+ {
+ // p4est has no
+ // p4est_finalize
+ // function
+ sc_finalize ();
+
+ deinitialized = true;
+ }
+ }
+ };
+
+ public:
+ // do run the initialization code, at least the first time around we
+ // get to this function
+ static void do_initialize ()
+ {
+ static Singleton singleton;
+ }
+ };
+ }
+}
+
+
+namespace parallel
+{
+ namespace distributed
+ {
+
+/* ---------------------- class Triangulation<dim,spacedim> ------------------------------ */
+
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::
+ Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid)
+ :
+ // do not check
+ // for distorted
+ // cells
+ dealii::Triangulation<dim,spacedim>
+ (smooth_grid,
+ false),
+ mpi_communicator (Utilities::System::
+ duplicate_communicator(mpi_communicator)),
+ my_subdomain (Utilities::System::get_this_mpi_process (this->mpi_communicator)),
+ triangulation_has_content (false),
+ connectivity (0),
+ parallel_forest (0),
+ refinement_in_progress (false),
+ attached_data_size(0),
+ n_attached_datas(0)
+ {
+ // initialize p4est. do this in a separate function since it has
+ // to happen only once, even if we have triangulation objects
+ // for several different space dimensions
+ internal::p4est::InitFinalize::do_initialize ();
+
+ number_cache.n_locally_owned_active_cells
+ .resize (Utilities::System::get_n_mpi_processes (mpi_communicator));
+ }
+
+
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::~Triangulation ()
+ {
+ clear ();
+
+ Assert (triangulation_has_content == false,
+ ExcInternalError());
+ Assert (connectivity == 0, ExcInternalError());
+ Assert (parallel_forest == 0, ExcInternalError());
+ Assert (refinement_in_progress == false, ExcInternalError());
+
+ // get rid of the unique
+ // communicator used here again
+ MPI_Comm_free (&mpi_communicator);
+ }
+
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::
+ create_triangulation (const std::vector<Point<spacedim> > &vertices,
+ const std::vector<CellData<dim> > &cells,
+ const SubCellData &subcelldata)
+ {
+ try
+ {
+ dealii::Triangulation<dim,spacedim>::
+ create_triangulation (vertices, cells, subcelldata);
+ }
+ catch (const typename dealii::Triangulation<dim,spacedim>::DistortedCellList &)
+ {
+ // the underlying
+ // triangulation should not
+ // be checking for
+ // distorted cells
+ AssertThrow (false, ExcInternalError());
+ }
+
+ // note that now we have some content in
+ // the p4est objects and call the
+ // functions that do the actual work
+ // (which are dimension dependent, so
+ // separate)
+ triangulation_has_content = true;
+
+ setup_coarse_cell_to_p4est_tree_permutation ();
+
+ copy_new_triangulation_to_p4est (internal::int2type<dim>());
+
+ try
+ {
+ copy_local_forest_to_triangulation ();
+ }
+ catch (const typename Triangulation<dim>::DistortedCellList &)
+ {
+ // the underlying
+ // triangulation should not
+ // be checking for
+ // distorted cells
+ AssertThrow (false, ExcInternalError());
+ }
+
+ update_number_cache ();
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::clear ()
+ {
+ triangulation_has_content = false;
+
+ if (parallel_forest != 0)
+ {
+ internal::p4est::functions<dim>::destroy (parallel_forest);
+ parallel_forest = 0;
+ }
+
+ if (connectivity != 0)
+ {
+ internal::p4est::functions<dim>::connectivity_destroy (connectivity);
+ connectivity = 0;
+ }
+
+ coarse_cell_to_p4est_tree_permutation.resize (0);
+ p4est_tree_to_coarse_cell_permutation.resize (0);
+
+ dealii::Triangulation<dim,spacedim>::clear ();
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::setup_coarse_cell_to_p4est_tree_permutation ()
+ {
+ SparsityPattern cell_connectivity;
+ GridTools::get_face_connectivity_of_cells (*this, cell_connectivity);
+ coarse_cell_to_p4est_tree_permutation.resize (this->n_cells(0));
+ SparsityTools::
+ reorder_Cuthill_McKee (cell_connectivity,
+ coarse_cell_to_p4est_tree_permutation);
+
+ p4est_tree_to_coarse_cell_permutation
+ = Utilities::invert_permutation (coarse_cell_to_p4est_tree_permutation);
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::write_mesh_vtk (const char *file_basename) const
+ {
+ Assert (parallel_forest != 0,
+ ExcMessage ("Can't produce output when no forest is created yet."));
+ internal::p4est::functions<dim>::
+ vtk_write_file (parallel_forest, 0, file_basename);
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::get_checksum () const
+ {
+ Assert (parallel_forest != 0,
+ ExcMessage ("Can't produce a check sum when no forest is created yet."));
+ return internal::p4est::functions<dim>::checksum (parallel_forest);
+ }
+
+
+ template <int dim, int spacedim>
+ typename dealii::internal::p4est::types<dim>::tree *
+ Triangulation<dim,spacedim>::
+ init_tree(const int dealii_coarse_cell_index) const
+ {
+ const unsigned int tree_index
+ = coarse_cell_to_p4est_tree_permutation[dealii_coarse_cell_index];
+ typename dealii::internal::p4est::types<dim>::tree *tree
+ = static_cast<typename dealii::internal::p4est::types<dim>::tree*>
+ (sc_array_index (parallel_forest->trees,
+ tree_index));
+
+ return tree;
+ }
+
+
+
+#if deal_II_dimension == 2
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::copy_new_triangulation_to_p4est (internal::int2type<2>)
+ {
+ Assert (this->n_cells(0) > 0, ExcInternalError());
+ Assert (this->n_levels() == 1, ExcInternalError());
+
+ // data structures that counts how many
+ // cells touch each vertex
+ // (vertex_touch_count), and which cells
+ // touch a given vertex (together with
+ // the local numbering of that vertex
+ // within the cells that touch it)
+ std::vector<unsigned int> vertex_touch_count;
+ std::vector<
+ std::list<
+ std::pair<typename Triangulation<dim,spacedim>::active_cell_iterator,
+ unsigned int> > >
+ vertex_to_cell;
+ get_vertex_to_cell_mappings (*this,
+ vertex_touch_count,
+ vertex_to_cell);
+ const internal::p4est::types<2>::locidx
+ num_vtt = std::accumulate (vertex_touch_count.begin(),
+ vertex_touch_count.end(),
+ 0);
+
+ // now create a connectivity
+ // object with the right sizes
+ // for all arrays. set vertex
+ // information only in debug
+ // mode (saves a few bytes in
+ // optimized mode)
+ const bool set_vertex_info
+#ifdef DEBUG
+ = true
+#else
+ = false
+#endif
+ ;
+
+ connectivity
+ = internal::p4est::functions<2>::
+ connectivity_new ((set_vertex_info == true ? this->n_vertices() : 0),
+ this->n_cells(0),
+ this->n_vertices(),
+ num_vtt);
+
+ set_vertex_and_cell_info (*this,
+ vertex_touch_count,
+ vertex_to_cell,
+ coarse_cell_to_p4est_tree_permutation,
+ set_vertex_info,
+ connectivity);
+
+ Assert (p4est_connectivity_is_valid (connectivity) == 1,
+ ExcInternalError());
+
+ // now create a forest out of the
+ // connectivity data structure
+ parallel_forest
+ = internal::p4est::functions<2>::
+ new_forest (mpi_communicator,
+ connectivity,
+ /* minimum initial number of quadrants per tree */ 0,
+ /* minimum level of upfront refinement */ 0,
+ /* use uniform upfront refinement */ 1,
+ /* user_data_size = */ 0,
+ /* user_data_constructor = */ NULL,
+ /* user_pointer */ this);
+ }
+
+#endif
+
+#if deal_II_dimension == 3
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::copy_new_triangulation_to_p4est (internal::int2type<3>)
+ {
+ Assert (this->n_cells(0) > 0, ExcInternalError());
+ Assert (this->n_levels() == 1, ExcInternalError());
+
+ // data structures that counts how many
+ // cells touch each vertex
+ // (vertex_touch_count), and which cells
+ // touch a given vertex (together with
+ // the local numbering of that vertex
+ // within the cells that touch it)
+ std::vector<unsigned int> vertex_touch_count;
+ std::vector<
+ std::list<
+ std::pair<Triangulation<3>::active_cell_iterator,
+ unsigned int> > >
+ vertex_to_cell;
+ get_vertex_to_cell_mappings (*this,
+ vertex_touch_count,
+ vertex_to_cell);
+ const internal::p4est::types<2>::locidx
+ num_vtt = std::accumulate (vertex_touch_count.begin(),
+ vertex_touch_count.end(),
+ 0);
+
+ std::vector<unsigned int> edge_touch_count;
+ std::vector<
+ std::list<
+ std::pair<Triangulation<3>::active_cell_iterator,
+ unsigned int> > >
+ edge_to_cell;
+ get_edge_to_cell_mappings (*this,
+ edge_touch_count,
+ edge_to_cell);
+ const internal::p4est::types<2>::locidx
+ num_ett = std::accumulate (edge_touch_count.begin(),
+ edge_touch_count.end(),
+ 0);
+ // now create a connectivity object with
+ // the right sizes for all arrays
+
+ const bool set_vertex_info
+#ifdef DEBUG
+ = true
+#else
+ = false
+#endif
+ ;
+
+ connectivity
+ = internal::p4est::functions<3>::
+ connectivity_new ((set_vertex_info == true ? this->n_vertices() : 0),
+ this->n_cells(0),
+ this->n_active_lines(),
+ num_ett,
+ this->n_vertices(),
+ num_vtt);
+
+ set_vertex_and_cell_info (*this,
+ vertex_touch_count,
+ vertex_to_cell,
+ coarse_cell_to_p4est_tree_permutation,
+ set_vertex_info,
+ connectivity);
+
+ // next to tree-to-edge
+ // data. note that in p4est lines
+ // are ordered as follows
+ // *---3---* *---3---*
+ // /| | / /|
+ // 6 | 11 6 7 11
+ // / 10 | / / |
+ // * | | *---2---* |
+ // | *---1---* | | *
+ // | / / | 9 /
+ // 8 4 5 8 | 5
+ // |/ / | |/
+ // *---0---* *---0---*
+ // whereas in deal.II they are like this:
+ // *---7---* *---7---*
+ // /| | / /|
+ // 4 | 11 4 5 11
+ // / 10 | / / |
+ // * | | *---6---* |
+ // | *---3---* | | *
+ // | / / | 9 /
+ // 8 0 1 8 | 1
+ // |/ / | |/
+ // *---2---* *---2---*
+
+ const unsigned int deal_to_p4est_line_index[12]
+ = { 4, 5, 0, 1, 6, 7, 2, 3, 8, 9, 10, 11 } ;
+
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ {
+ const unsigned int
+ index = coarse_cell_to_p4est_tree_permutation[cell->index()];
+ for (unsigned int e=0; e<GeometryInfo<3>::lines_per_cell; ++e)
+ connectivity->tree_to_edge[index*GeometryInfo<3>::lines_per_cell+
+ deal_to_p4est_line_index[e]]
+ = cell->line(e)->index();
+ }
+
+ // now also set edge-to-tree
+ // information
+ connectivity->ett_offset[0] = 0;
+ std::partial_sum (edge_touch_count.begin(),
+ edge_touch_count.end(),
+ &connectivity->ett_offset[1]);
+
+ Assert (connectivity->ett_offset[this->n_active_lines()] ==
+ num_ett,
+ ExcInternalError());
+
+ for (unsigned int v=0; v<this->n_active_lines(); ++v)
+ {
+ Assert (edge_to_cell[v].size() == edge_touch_count[v],
+ ExcInternalError());
+
+ typename std::list<std::pair
+ <typename Triangulation<dim,spacedim>::active_cell_iterator,
+ unsigned int> >::const_iterator
+ p = edge_to_cell[v].begin();
+ for (unsigned int c=0; c<edge_touch_count[v]; ++c, ++p)
+ {
+ connectivity->edge_to_tree[connectivity->ett_offset[v]+c]
+ = coarse_cell_to_p4est_tree_permutation[p->first->index()];
+ connectivity->edge_to_edge[connectivity->ett_offset[v]+c]
+ = deal_to_p4est_line_index[p->second];
+ }
+ }
+
+ Assert (p8est_connectivity_is_valid (connectivity) == 1,
+ ExcInternalError());
+
+ // now create a forest out of the
+ // connectivity data structure
+ parallel_forest
+ = internal::p4est::functions<3>::
+ new_forest (mpi_communicator,
+ connectivity,
+ /* minimum initial number of quadrants per tree */ 0,
+ /* minimum level of upfront refinement */ 0,
+ /* use uniform upfront refinement */ 1,
+ /* user_data_size = */ 0,
+ /* user_data_constructor = */ NULL,
+ /* user_pointer */ this);
+ }
+
+#endif
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::copy_local_forest_to_triangulation ()
+ {
+ // disable mesh smoothing for
+ // recreating the deal.II
+ // triangulation, otherwise we might
+ // not be able to reproduce the p4est
+ // mesh exactly. We restore the
+ // original smoothing at the end of
+ // this function. Note that the
+ // smoothing flag is used in the normal
+ // refinement process.
+ typename Triangulation<dim,spacedim>::MeshSmoothing
+ save_smooth = this->smooth_grid;
+ this->smooth_grid = dealii::Triangulation<dim,spacedim>::none;
+ bool mesh_changed = false;
+
+ // query p4est for the ghost cells
+ typename internal::p4est::types<dim>::ghost * ghostlayer;
+ ghostlayer = internal::p4est::functions<dim>::ghost_new (parallel_forest,
+ (dim == 2
+ ?
+ typename internal::p4est::types<dim>::
+ balance_type(P4EST_BALANCE_CORNER)
+ :
+ typename internal::p4est::types<dim>::
+ balance_type(P8EST_BALANCE_CORNER)));
+
+ Assert (ghostlayer, ExcInternalError());
+
+
+ //set all cells to artificial
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell = this->begin(0);
+ cell != this->end(0);
+ ++cell)
+ cell->recursively_set_subdomain_id(types::artificial_subdomain_id);
+
+ do
+ {
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell = this->begin(0);
+ cell != this->end(0);
+ ++cell)
+ {
+ // if this processor
+ // stores no part of the
+ // forest that comes out
+ // of this coarse grid
+ // cell, then we need to
+ // delete all children of
+ // this cell (the coarse
+ // grid cell remains)
+ if (tree_exists_locally<dim,spacedim>(parallel_forest,
+ coarse_cell_to_p4est_tree_permutation[cell->index()])
+ == false)
+ {
+ delete_all_children<dim,spacedim> (cell);
+ cell->set_subdomain_id (types::artificial_subdomain_id);
+ }
+
+ else
+ {
+
+ // this processor
+ // stores at least a
+ // part of the tree
+ // that comes out of
+ // this cell.
+
+
+ typename internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ typename internal::p4est::types<dim>::tree *tree =
+ init_tree(cell->index());
+
+ internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
+
+ match_tree_recursively<dim,spacedim> (*tree, cell,
+ p4est_coarse_cell,
+ *parallel_forest,
+ my_subdomain);
+ }
+ }
+
+ // check mesh for ghostcells,
+ // refine as neccessary.
+ // iterate over every ghostquadrant,
+ // find corresponding deal coarsecell
+ // and recurse.
+ typename internal::p4est::types<dim>::quadrant * quadr;
+ unsigned int ghost_owner=0;
+
+ for (unsigned int g_idx=0;g_idx<ghostlayer->ghosts.elem_count;++g_idx)
+ {
+ while (g_idx >= (unsigned int)ghostlayer->proc_offsets[ghost_owner+1])
+ ++ghost_owner;
+
+ quadr = static_cast<typename internal::p4est::types<dim>::quadrant *>
+ ( sc_array_index(&ghostlayer->ghosts, g_idx) );
+
+ unsigned int coarse_cell_index =
+ p4est_tree_to_coarse_cell_permutation[quadr->p.piggy3.which_tree];
+
+ const typename Triangulation<dim,spacedim>::cell_iterator
+ cell (this, 0U, coarse_cell_index);
+
+ typename internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ internal::p4est::init_coarse_quadrant<dim> (p4est_coarse_cell);
+
+ match_quadrant_recursively<dim,spacedim> (p4est_coarse_cell, cell, *quadr,
+ *parallel_forest, ghost_owner);
+ }
+
+ // fix all the flags to make
+ // sure we have a consistent
+ // mesh
+ this->prepare_coarsening_and_refinement ();
+
+ // see if any flags are still set
+ mesh_changed = false;
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end();
+ ++cell)
+ if (cell->refine_flag_set() || cell->coarsen_flag_set())
+ {
+ mesh_changed = true;
+ break;
+ }
+
+ // actually do the refinement
+ // but prevent the refinement
+ // hook below from taking
+ // over
+ const bool saved_refinement_in_progress = refinement_in_progress;
+ refinement_in_progress = true;
+
+ try
+ {
+ this->execute_coarsening_and_refinement();
+ }
+ catch (const typename Triangulation<dim,spacedim>::DistortedCellList &)
+ {
+ // the underlying
+ // triangulation should not
+ // be checking for
+ // distorted cells
+ AssertThrow (false, ExcInternalError());
+ }
+
+ refinement_in_progress = saved_refinement_in_progress;
+ }
+ while (mesh_changed);
+
+#ifdef DEBUG
+ // check if correct number of ghosts is created
+ unsigned int num_ghosts = 0;
+
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end();
+ ++cell)
+ {
+ if (cell->subdomain_id() != my_subdomain
+ &&
+ cell->subdomain_id() != types::artificial_subdomain_id)
+ ++num_ghosts;
+ }
+
+ Assert( num_ghosts == ghostlayer->ghosts.elem_count, ExcInternalError());
+#endif
+
+
+
+ // check that our local copy has
+ // exactly as many cells as the
+ // p4est original (at least if we
+ // are on only one processor);
+ // for parallel computations, we
+ // want to check that we have at
+ // least as many as p4est stores
+ // locally (in the future we
+ // should check that we have
+ // exactly as many non-artificial
+ // cells as
+ // parallel_forest->local_num_quadrants)
+ {
+ const unsigned int total_local_cells = this->n_active_cells();
+
+ if (Utilities::System::get_n_mpi_processes (mpi_communicator) == 1)
+ Assert (static_cast<unsigned int>(parallel_forest->local_num_quadrants) ==
+ total_local_cells,
+ ExcInternalError())
+ else
+ Assert (static_cast<unsigned int>(parallel_forest->local_num_quadrants) <=
+ total_local_cells,
+ ExcInternalError());
+
+ // count the number of owned, active
+ // cells and compare with p4est.
+ unsigned int n_owned = 0;
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ {
+ if (cell->subdomain_id() == my_subdomain)
+ ++n_owned;
+ }
+
+ Assert(static_cast<unsigned int>(parallel_forest->local_num_quadrants) ==
+ n_owned, ExcInternalError());
+
+ }
+
+ internal::p4est::functions<dim>::ghost_destroy (ghostlayer);
+
+ this->smooth_grid = save_smooth;
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim, spacedim>::execute_coarsening_and_refinement ()
+ {
+ // first make sure that recursive
+ // calls are handled correctly
+ if (refinement_in_progress == true)
+ {
+ dealii::Triangulation<dim,spacedim>::execute_coarsening_and_refinement ();
+ return;
+ }
+
+ Timer t(MPI_COMM_WORLD, true);
+ t.start();
+
+
+
+ // now do the work we're
+ // supposed to do when we are
+ // in charge
+ refinement_in_progress = true;
+ this->prepare_coarsening_and_refinement ();
+
+ // make sure all flags are
+ // cleared on cells we don't
+ // own, since nothing good can
+ // come of that if they are
+ // still around
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ if (cell->is_ghost() || cell->is_artificial())
+ {
+ cell->clear_refine_flag ();
+ cell->clear_coarsen_flag ();
+ }
+
+
+
+ // count how many cells will be refined
+ // and coarsened, and allocate that much
+ // memory
+ RefineAndCoarsenList<dim,spacedim>
+ refine_and_coarsen_list (*this,
+ p4est_tree_to_coarse_cell_permutation,
+ my_subdomain,
+ *parallel_forest);
+
+ // copy refine and coarsen flags into
+ // p4est and execute the refinement and
+ // coarsening. this uses the
+ // refine_and_coarsen_list just built,
+ // which is communicated to the callback
+ // functions through the
+ // user_pointer
+ Assert (parallel_forest->user_pointer == this,
+ ExcInternalError());
+ parallel_forest->user_pointer = &refine_and_coarsen_list;
+
+ internal::p4est::functions<dim>::
+ refine (parallel_forest, /* refine_recursive */ false,
+ &RefineAndCoarsenList<dim,spacedim>::refine_callback,
+ /*init_callback=*/NULL);
+ internal::p4est::functions<dim>::
+ coarsen (parallel_forest, /* coarsen_recursive */ false,
+ &RefineAndCoarsenList<dim,spacedim>::coarsen_callback,
+ /*init_callback=*/NULL);
+
+ t.stop();
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (my_subdomain==0)
+ {
+ deallog << "_p4est::ref&coarsen ";
+ t.print_data(deallog);
+ }
+#endif
+
+ t.reset();
+ t.start();
+
+
+ // make sure all cells in the lists have
+ // been consumed
+ Assert (refine_and_coarsen_list.pointers_are_at_end(),
+ ExcInternalError());
+
+ // reset the pointer
+ parallel_forest->user_pointer = this;
+
+ // enforce 2:1 hanging node condition
+ internal::p4est::functions<dim>::
+ balance (parallel_forest,
+ /* face and corner balance */
+ (dim == 2
+ ?
+ typename internal::p4est::types<dim>::
+ balance_type(P4EST_BALANCE_FULL)
+ :
+ typename internal::p4est::types<dim>::
+ balance_type(P8EST_BALANCE_FULL)),
+ /*init_callback=*/NULL);
+
+
+ t.stop();
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (my_subdomain==0)
+ {
+ deallog << "_p4est::balance ";
+ t.print_data(deallog);
+ }
+#endif
+ t.reset();
+ t.start();
+
+
+ // before repartitioning the mesh let
+ // others attach mesh related info
+ // (such as SolutionTransfer data) to
+ // the p4est
+ attach_mesh_data();
+
+
+ t.stop();
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (my_subdomain==0)
+ {
+ deallog << "_attach_mesh_data ";
+ t.print_data(deallog);
+ }
+#endif
+ t.reset();
+ t.start();
+
+ // partition the new mesh between
+ // all processors
+ internal::p4est::functions<dim>::
+ partition (parallel_forest,
+ /* prepare coarsening */ 1,
+ /* weight_callback */ NULL);
+
+ t.stop();
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (my_subdomain==0)
+ {
+ deallog << "_p4est::partition ";
+ t.print_data(deallog);
+ }
+#endif
+ t.reset();
+ t.start();
+
+ // finally copy back from local
+ // part of tree to deal.II
+ // triangulation. before doing
+ // so, make sure there are no
+ // refine or coarsen flags
+ // pending
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ {
+ cell->clear_refine_flag();
+ cell->clear_coarsen_flag();
+ }
+
+ try
+ {
+ copy_local_forest_to_triangulation ();
+ }
+ catch (const typename Triangulation<dim>::DistortedCellList &)
+ {
+ // the underlying
+ // triangulation should not
+ // be checking for
+ // distorted cells
+ AssertThrow (false, ExcInternalError());
+ }
+
+
+ refinement_in_progress = false;
+
+ update_number_cache ();
+
+ t.stop();
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (my_subdomain==0)
+ {
+ deallog << "_copy_to_deal ";
+ t.print_data(deallog);
+ }
+#endif
+ t.reset();
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::update_number_cache ()
+ {
+ Assert (number_cache.n_locally_owned_active_cells.size()
+ ==
+ Utilities::System::get_n_mpi_processes (mpi_communicator),
+ ExcInternalError());
+
+ std::fill (number_cache.n_locally_owned_active_cells.begin(),
+ number_cache.n_locally_owned_active_cells.end(),
+ 0);
+
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ if (cell->subdomain_id() == my_subdomain)
+ ++number_cache.n_locally_owned_active_cells[my_subdomain];
+
+ unsigned int send_value
+ = number_cache.n_locally_owned_active_cells[my_subdomain];
+ MPI_Allgather (&send_value,
+ 1,
+ MPI_UNSIGNED,
+ &number_cache.n_locally_owned_active_cells[0],
+ 1,
+ MPI_UNSIGNED,
+ mpi_communicator);
+
+ number_cache.n_global_active_cells
+ = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
+ number_cache.n_locally_owned_active_cells.end(),
+ 0);
+ }
+
+
+
+ template <int dim, int spacedim>
+ types::subdomain_id_t
+ Triangulation<dim,spacedim>::locally_owned_subdomain () const
+ {
+ return my_subdomain;
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::n_locally_owned_active_cells () const
+ {
+ return number_cache.n_locally_owned_active_cells[my_subdomain];
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::n_global_active_cells () const
+ {
+ return number_cache.n_global_active_cells;
+ }
+
+
+
+ template <int dim, int spacedim>
+ const std::vector<unsigned int> &
+ Triangulation<dim,spacedim>::n_locally_owned_active_cells_per_processor () const
+ {
+ return number_cache.n_locally_owned_active_cells;
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::
+ register_data_attach (const size_t size,
+ const std_cxx1x::function<void(const cell_iterator&,
+ const CellStatus,
+ void*)> & pack_callback)
+ {
+ Assert(size>0, ExcMessage("register_data_attach(), size==0"));
+ Assert(attached_data_pack_callbacks.size()==n_attached_datas,
+ ExcMessage("register_data_attach(), not all data has been unpacked last time?"));
+
+ unsigned int offset = attached_data_size+sizeof(CellStatus);
+ ++n_attached_datas;
+ attached_data_size+=size;
+ attached_data_pack_callbacks.push_back(
+ std::pair<unsigned int, pack_callback_t> (offset, pack_callback)
+ );
+ return offset;
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::
+ notify_ready_to_unpack (const unsigned int offset,
+ const std_cxx1x::function<void (const cell_iterator&,
+ const CellStatus,
+ const void*)> & unpack_callback)
+ {
+ Assert(offset < attached_data_size, ExcMessage("invalid offset in notify_ready_to_unpack()"));
+ Assert(n_attached_datas>0, ExcMessage("notify_ready_to_unpack() called too often"));
+
+ // Recurse over p4est and hand the caller the data back
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell = this->begin(0);
+ cell != this->end(0);
+ ++cell)
+ {
+ //skip coarse cells, that are not ours
+ if (tree_exists_locally<dim,spacedim>(parallel_forest,
+ coarse_cell_to_p4est_tree_permutation[cell->index()])
+ == false)
+ continue;
+
+ typename internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ typename internal::p4est::types<dim>::tree *tree =
+ init_tree(cell->index());
+
+ internal::p4est::init_coarse_quadrant<dim> (p4est_coarse_cell);
+
+ // parent_cell is not correct here,
+ // but is only used in a refined
+ // cell
+ post_mesh_data_recursively<dim,spacedim>(*tree,
+ cell,
+ cell,
+ p4est_coarse_cell,
+ offset,
+ unpack_callback);
+ }
+
+ --n_attached_datas;
+
+ if (!n_attached_datas)
+ {
+ // everybody got his data, time for cleanup!
+ attached_data_size=0;
+ attached_data_pack_callbacks.clear();
+
+ // and release the data
+ void * userptr = parallel_forest->user_pointer;
+ internal::p4est::functions<dim>::reset_data (parallel_forest, 0, NULL, NULL);
+ parallel_forest->user_pointer = userptr;
+ }
+ }
+
+
+
+ template <int dim, int spacedim>
+ MPI_Comm
+ Triangulation<dim,spacedim>::get_communicator () const
+ {
+ return mpi_communicator;
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::memory_consumption () const
+ {
+ unsigned int mem=
+ this->dealii::Triangulation<dim,spacedim>::memory_consumption()
+ + MemoryConsumption::memory_consumption(mpi_communicator)
+ + MemoryConsumption::memory_consumption(my_subdomain)
+ + MemoryConsumption::memory_consumption(triangulation_has_content)
+ + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
+ + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
+ + MemoryConsumption::memory_consumption(connectivity)
+ + MemoryConsumption::memory_consumption(parallel_forest)
+ + MemoryConsumption::memory_consumption(refinement_in_progress)
+ + MemoryConsumption::memory_consumption(attached_data_size)
+ + MemoryConsumption::memory_consumption(n_attached_datas)
+// + MemoryConsumption::memory_consumption(attached_data_pack_callbacks) //TODO[TH]: how?
+ + MemoryConsumption::memory_consumption(coarse_cell_to_p4est_tree_permutation)
+ + MemoryConsumption::memory_consumption(p4est_tree_to_coarse_cell_permutation)
+ + memory_consumption_p4est()
+ ;
+
+ return mem;
+ }
+
+
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::memory_consumption_p4est () const
+ {
+ return internal::p4est::functions<dim>::forest_memory_used(parallel_forest)
+ + internal::p4est::functions<dim>::connectivity_memory_used(connectivity);
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::
+ copy_triangulation (const dealii::Triangulation<dim, spacedim> &)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::
+ attach_mesh_data()
+ {
+ // determine size of memory in bytes to
+ // attach to each cell. This needs to
+ // be constant because of p4est.
+ if (attached_data_size==0)
+ {
+ Assert(n_attached_datas==0, ExcInternalError());
+
+ //nothing to do
+ return;
+ }
+
+ // realloc user_data in p4est
+ void * userptr = parallel_forest->user_pointer;
+ internal::p4est::functions<dim>::reset_data (parallel_forest,
+ attached_data_size+sizeof(CellStatus),
+ NULL, NULL);
+ parallel_forest->user_pointer = userptr;
+
+
+ // Recurse over p4est and Triangulation
+ // to find refined/coarsened/kept
+ // cells. Then query and attach the data.
+ for (typename Triangulation<dim,spacedim>::cell_iterator
+ cell = this->begin(0);
+ cell != this->end(0);
+ ++cell)
+ {
+ //skip coarse cells, that are not ours
+ if (tree_exists_locally<dim,spacedim>(parallel_forest,
+ coarse_cell_to_p4est_tree_permutation[cell->index()])
+ == false)
+ continue;
+
+ typename internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ typename internal::p4est::types<dim>::tree *tree =
+ init_tree(cell->index());
+
+ internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
+
+ attach_mesh_data_recursively<dim,spacedim>(*tree,
+ cell,
+ p4est_coarse_cell,
+ attached_data_pack_callbacks);
+ }
+
+
+
+
+ }
+
+
+
+
+#if deal_II_dimension == 1
+ template <int spacedim>
+ Triangulation<1,spacedim>::Triangulation (MPI_Comm)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ template <int spacedim>
+ Triangulation<1,spacedim>::~Triangulation ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+
+ template <int spacedim>
+ types::subdomain_id_t
+ Triangulation<1,spacedim>::locally_owned_subdomain () const
+ {
+ Assert (false, ExcNotImplemented());
+ return 0;
+ }
+
+
+ template <int spacedim>
+ MPI_Comm
+ Triangulation<1,spacedim>::get_communicator () const
+ {
+ return MPI_COMM_WORLD;
+ }
+#endif
+
+ }
+}
+
+
+#else // DEAL_II_USE_P4EST
+
+namespace parallel
+{
+ namespace distributed
+ {
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::Triangulation ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::~Triangulation ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+
+ template <int dim, int spacedim>
+ types::subdomain_id_t
+ Triangulation<dim,spacedim>::locally_owned_subdomain () const
+ {
+ Assert (false, ExcNotImplemented());
+ return 0;
+ }
+ }
+}
+
+
+#endif // DEAL_II_USE_P4EST
+
+
+
+// explicit instantiations
+namespace internal
+{
+ namespace p4est
+ {
+# if deal_II_dimension > 1
+ template
+ void
+ init_quadrant_children<deal_II_dimension>
+ (const types<deal_II_dimension>::quadrant & p4est_cell,
+ types<deal_II_dimension>::quadrant (&p4est_children)[GeometryInfo<deal_II_dimension>::max_children_per_cell]);
+
+ template
+ void
+ init_coarse_quadrant<deal_II_dimension>
+ (types<deal_II_dimension>::quadrant & quad);
+
+ template
+ bool
+ quadrant_is_equal<deal_II_dimension>
+ (const types<deal_II_dimension>::quadrant & q1,
+ const types<deal_II_dimension>::quadrant & q2);
+
+ template
+ bool
+ quadrant_is_ancestor<deal_II_dimension>
+ (const types<deal_II_dimension>::quadrant & q1,
+ const types<deal_II_dimension>::quadrant & q2);
+#endif
+ }
+}
+namespace parallel
+{
+ namespace distributed
+ {
+ template class Triangulation<deal_II_dimension>;
+# if deal_II_dimension < 3
+ template class Triangulation<deal_II_dimension, deal_II_dimension+1>;
+# endif
+ }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+
{
Assert (static_cast<unsigned int>(this->present_level) < this->dof_handler->levels.size(),
ExcMessage ("DoFHandler not initialized"));
-
+
+ Assert (this->dof_handler != 0, typename BaseClass::ExcInvalidObject());
+ Assert (&this->get_fe() != 0, typename BaseClass::ExcInvalidObject());
+
+ internal::DoFCellAccessor::Implementation::
+ update_cell_dof_indices_cache (*this);
+}
+
+
+
+template <class DH>
+void
+DoFCellAccessor<DH>::set_dof_indices (const std::vector<unsigned int> &local_dof_indices)
+{
+ Assert (static_cast<unsigned int>(this->present_level) < this->dof_handler->levels.size(),
+ ExcMessage ("DoFHandler not initialized"));
+
Assert (this->dof_handler != 0, typename BaseClass::ExcInvalidObject());
Assert (&this->get_fe() != 0, typename BaseClass::ExcInvalidObject());
- internal::DoFCellAccessor::Implementation::update_cell_dof_indices_cache (*this);
+ internal::DoFCellAccessor::Implementation::
+ set_dof_indices (*this, local_dof_indices);
}
+
template <class DH>
typename internal::DoFHandler::Iterators<DH>::cell_iterator
DoFCellAccessor<DH>::neighbor_child_on_subface (const unsigned int face,
{
const FiniteElement<dim,spacedim> &fe = this->get_fe();
const unsigned int dofs_per_cell = fe.dofs_per_cell;
-
+
Assert (this->dof_handler != 0,
typename BaseClass::ExcInvalidObject());
Assert (&fe != 0,
{
Vector<number> tmp1(dofs_per_cell);
Vector<number> tmp2(dofs_per_cell);
-
+
interpolated_values = 0;
// later on we will have to
std::vector<bool> restriction_is_additive (dofs_per_cell);
for (unsigned int i=0; i<dofs_per_cell; ++i)
restriction_is_additive[i] = fe.restriction_is_additive(i);
-
+
for (unsigned int child=0; child<this->n_children(); ++child)
{
// get the values from the present
// and add up or set them
// in the output vector
for (unsigned int i=0; i<dofs_per_cell; ++i)
- if (restriction_is_additive[i])
+ if (restriction_is_additive[i])
interpolated_values(i) += tmp2(i);
else
if (tmp2(i) != number())
// otherwise distribute them to the children
{
Vector<number> tmp(dofs_per_cell);
-
+
for (unsigned int child=0; child<this->n_children(); ++child)
{
// prolong the given data
// to the present cell
this->get_fe().get_prolongation_matrix(child, this->refinement_case())
.vmult (tmp, local_values);
-
+
this->child(child)->set_dof_values_by_interpolation (tmp, values);
}
}
#if deal_II_dimension != 3
template class DoFCellAccessor<DoFHandler<deal_II_dimension,deal_II_dimension+1> >;
-template class
+template class
TriaRawIterator <DoFCellAccessor<DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
-template class
+template class
TriaIterator <DoFCellAccessor<DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
-template class
+template class
TriaActiveIterator<DoFCellAccessor<DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
#endif
#if deal_II_dimension != 3
template class DoFCellAccessor<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> >;
-template class
+template class
TriaRawIterator <DoFCellAccessor<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
-template class
+template class
TriaIterator <DoFCellAccessor<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
-template class
+template class
TriaActiveIterator<DoFCellAccessor<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> > >;
#endif
#include <base/memory_consumption.h>
#include <dofs/dof_handler.h>
+#include <dofs/dof_handler_policy.h>
#include <dofs/dof_levels.h>
#include <dofs/dof_faces.h>
#include <dofs/dof_accessor.h>
// namespace internal::DoFHandler
using dealii::DoFHandler;
+
/**
* A class with the same purpose as the similarly named class of the
* Triangulation class. See there for more information.
*/
struct Implementation
{
- /**
- * Distribute dofs on the given cell,
- * with new dofs starting with index
- * @p next_free_dof. Return the next
- * unused index number. The finite
- * element used is the one given to
- * @p distribute_dofs, which is copied
- * to @p selected_fe.
- *
- * This function is excluded from the
- * @p distribute_dofs function since
- * it can not be implemented dimension
- * independent.
- */
- template <int spacedim>
- static
- unsigned int
- distribute_dofs_on_cell (const DoFHandler<1,spacedim> &dof_handler,
- typename DoFHandler<1,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
- {
-
- // distribute dofs of vertices
- for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
- {
- typename DoFHandler<1,spacedim>::cell_iterator
- neighbor = cell->neighbor(v);
-
- if (neighbor.state() == IteratorState::valid)
- {
- // find true neighbor; may be its
- // a child of @p{neighbor}
- while (neighbor->has_children())
- neighbor = neighbor->child(v==0 ? 1 : 0);
-
- // has neighbor already been processed?
- if (neighbor->user_flag_set())
- // copy dofs
- {
- if (v==0)
- for (unsigned int d=0;
- d<dof_handler.selected_fe->dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (0, d,
- neighbor->vertex_dof_index (1, d));
- else
- for (unsigned int d=0;
- d<dof_handler.selected_fe->dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (1, d,
- neighbor->vertex_dof_index (0, d));
-
- // next neighbor
- continue;
- }
- }
-
- // otherwise: create dofs newly
- for (unsigned int d=0;
- d<dof_handler.selected_fe->dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (v, d, next_free_dof++);
- }
-
- // dofs of line
- for (unsigned int d=0;
- d<dof_handler.selected_fe->dofs_per_line; ++d)
- cell->set_dof_index (d, next_free_dof++);
-
- // note that this cell has been
- // processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
-
- template <int spacedim>
- static
- unsigned int
- distribute_dofs_on_cell (const DoFHandler<2,spacedim> &dof_handler,
- typename DoFHandler<2,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
- {
- if (dof_handler.selected_fe->dofs_per_vertex > 0)
- // number dofs on vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
- // check whether dofs for this
- // vertex have been distributed
- // (only check the first dof)
- if (cell->vertex_dof_index(vertex, 0) == DoFHandler<2,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (vertex, d, next_free_dof++);
-
- // for the four sides
- if (dof_handler.selected_fe->dofs_per_line > 0)
- for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
- {
- typename DoFHandler<2,spacedim>::line_iterator
- line = cell->line(side);
-
- // distribute dofs if necessary:
- // check whether line dof is already
- // numbered (check only first dof)
- if (line->dof_index(0) == DoFHandler<2,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_line; ++d)
- line->set_dof_index (d, next_free_dof++);
- }
-
-
- // dofs of quad
- if (dof_handler.selected_fe->dofs_per_quad > 0)
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_quad; ++d)
- cell->set_dof_index (d, next_free_dof++);
-
-
- // note that this cell has been processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
- template <int spacedim>
- static
- unsigned int
- distribute_dofs_on_cell (const DoFHandler<3,spacedim> &dof_handler,
- typename DoFHandler<3,spacedim>::active_cell_iterator &cell,
- unsigned int next_free_dof)
- {
- if (dof_handler.selected_fe->dofs_per_vertex > 0)
- // number dofs on vertices
- for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
- // check whether dofs for this
- // vertex have been distributed
- // (only check the first dof)
- if (cell->vertex_dof_index(vertex, 0) == DoFHandler<3,spacedim>::invalid_dof_index)
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_vertex; ++d)
- cell->set_vertex_dof_index (vertex, d, next_free_dof++);
-
- // for the lines
- if (dof_handler.selected_fe->dofs_per_line > 0)
- for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
- {
- typename DoFHandler<3,spacedim>::line_iterator
- line = cell->line(l);
-
- // distribute dofs if necessary:
- // check whether line dof is already
- // numbered (check only first dof)
- if (line->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_line; ++d)
- line->set_dof_index (d, next_free_dof++);
- }
-
- // for the quads
- if (dof_handler.selected_fe->dofs_per_quad > 0)
- for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
- {
- typename DoFHandler<3,spacedim>::quad_iterator
- quad = cell->quad(q);
-
- // distribute dofs if necessary:
- // check whether quad dof is already
- // numbered (check only first dof)
- if (quad->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
- // if not: distribute dofs
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_quad; ++d)
- quad->set_dof_index (d, next_free_dof++);
- }
-
-
- // dofs of hex
- if (dof_handler.selected_fe->dofs_per_hex > 0)
- for (unsigned int d=0; d<dof_handler.selected_fe->dofs_per_hex; ++d)
- cell->set_dof_index (d, next_free_dof++);
-
-
- // note that this cell has been
- // processed
- cell->set_user_flag ();
-
- return next_free_dof;
- }
-
-
- /**
- * Implementation of the general template
- * of same name.
- */
- template <int spacedim>
- static
- void renumber_dofs (const std::vector<unsigned int> &new_numbers,
- DoFHandler<1,spacedim> &dof_handler)
- {
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- else
- // if index is
- // invalid_dof_index:
- // check if this one
- // really is unused
- Assert (dof_handler.tria
- ->vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
-
- for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- for (std::vector<unsigned int>::iterator
- i=dof_handler.levels[level]->lines.dofs.begin();
- i!=dof_handler.levels[level]->lines.dofs.end(); ++i)
- if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- }
-
-
-
- template <int spacedim>
- static
- void renumber_dofs (const std::vector<unsigned int> &new_numbers,
- DoFHandler<2,spacedim> &dof_handler)
- {
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- else
- // if index is invalid_dof_index:
- // check if this one really is
- // unused
- Assert (dof_handler.tria
- ->vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
-
- for (std::vector<unsigned int>::iterator
- i=dof_handler.faces->lines.dofs.begin();
- i!=dof_handler.faces->lines.dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
-
- for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- {
- for (std::vector<unsigned int>::iterator
- i=dof_handler.levels[level]->quads.dofs.begin();
- i!=dof_handler.levels[level]->quads.dofs.end(); ++i)
- if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- }
- }
-
-
- template <int spacedim>
- static
- void renumber_dofs (const std::vector<unsigned int> &new_numbers,
- DoFHandler<3,spacedim> &dof_handler)
- {
- // note that we can not use cell
- // iterators in this function since
- // then we would renumber the dofs on
- // the interface of two cells more
- // than once. Anyway, this way it's
- // not only more correct but also
- // faster; note, however, that dof
- // numbers may be invalid_dof_index,
- // namely when the appropriate
- // vertex/line/etc is unused
- for (std::vector<unsigned int>::iterator
- i=dof_handler.vertex_dofs.begin();
- i!=dof_handler.vertex_dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- else
- // if index is invalid_dof_index:
- // check if this one really is
- // unused
- Assert (dof_handler.tria
- ->vertex_used((i-dof_handler.vertex_dofs.begin()) /
- dof_handler.selected_fe->dofs_per_vertex)
- == false,
- ExcInternalError ());
-
- for (std::vector<unsigned int>::iterator
- i=dof_handler.faces->lines.dofs.begin();
- i!=dof_handler.faces->lines.dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- for (std::vector<unsigned int>::iterator
- i=dof_handler.faces->quads.dofs.begin();
- i!=dof_handler.faces->quads.dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
-
- for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
- {
- for (std::vector<unsigned int>::iterator
- i=dof_handler.levels[level]->hexes.dofs.begin();
- i!=dof_handler.levels[level]->hexes.dofs.end(); ++i)
- if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
- *i = new_numbers[*i];
- }
- }
-
-
-
/**
* Implement the function of same name in
* the mother class.
:
tria(&tria, typeid(*this).name()),
selected_fe(0, typeid(*this).name()),
- faces(NULL),
- used_dofs (0)
-{}
+ faces(NULL)
+{
+ // decide whether we need a
+ // sequential or a parallel
+ // distributed policy
+ if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&tria)
+ == 0)
+ policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
+ else
+ policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
+}
template<int dim, int spacedim>
:
tria(0, typeid(*this).name()),
selected_fe(0, typeid(*this).name()),
- faces(NULL),
- used_dofs (0)
+ faces(NULL)
{}
const FiniteElement<dim,spacedim>& fe)
{
tria = &t;
- faces = NULL;
- used_dofs = 0;
+ faces = 0;
+ number_cache.n_global_dofs = 0;
+
+ // decide whether we need a
+ // sequential or a parallel
+ // distributed policy
+ if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&t)
+ == 0)
+ policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
+ else
+ policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
+
distribute_dofs(fe);
}
MemoryConsumption::memory_consumption (levels) +
MemoryConsumption::memory_consumption (*faces) +
MemoryConsumption::memory_consumption (faces) +
- MemoryConsumption::memory_consumption (used_dofs) +
+ sizeof (number_cache) +
MemoryConsumption::memory_consumption (vertex_dofs));
for (unsigned int i=0; i<levels.size(); ++i)
mem += MemoryConsumption::memory_consumption (*levels[i]);
template<int dim, int spacedim>
void DoFHandler<dim,spacedim>::
- distribute_dofs (const FiniteElement<dim,spacedim> &ff,
- const unsigned int offset)
+distribute_dofs (const FiniteElement<dim,spacedim> &ff,
+ const unsigned int offset)
{
- Assert (tria->n_levels() > 0, ExcInvalidTriangulation());
-
selected_fe = &ff;
- // delete all levels and set them up
- // newly, since vectors are
- // troublesome if you want to change
- // their size
+ // delete all levels and set them
+ // up newly. note that we still
+ // have to allocate space for all
+ // degrees of freedom on this mesh
+ // (including ghost and cells that
+ // are entirely stored on different
+ // processors), though we may not
+ // assign numbers to some of them
+ // (i.e. they will remain at
+ // invalid_dof_index). We need to
+ // allocate the space because we
+ // will want to be able to query
+ // the dof_indices on each cell,
+ // and simply be told that we don't
+ // know them on some cell (i.e. get
+ // back invalid_dof_index)
clear_space ();
internal::DoFHandler::Implementation::reserve_space (*this);
- // Clear user flags because we will
- // need them. But first we save
- // them and make sure that we
- // restore them later such that at
- // the end of this function the
- // Triangulation will be in the
- // same state as it was at the
- // beginning of this function.
- std::vector<bool> user_flags;
- tria->save_user_flags(user_flags);
- const_cast<Triangulation<dim,spacedim> &>(*tria).clear_user_flags ();
-
- unsigned int next_free_dof = offset;
- active_cell_iterator cell = begin_active(),
- endc = end();
-
- for (; cell != endc; ++cell)
- next_free_dof
- = internal::DoFHandler::Implementation::
- distribute_dofs_on_cell (*this, cell, next_free_dof);
+ // hand things off to the policy
+ number_cache = policy->distribute_dofs (offset,
+ *this);
- used_dofs = next_free_dof;
-
- // update the cache used
- // for cell dof indices
- for (cell_iterator cell = begin(); cell != end(); ++cell)
- cell->update_cell_dof_indices_cache ();
-
- // finally restore the user flags
- const_cast<Triangulation<dim,spacedim> &>(*tria).load_user_flags(user_flags);
-
- block_info_object.initialize(*this);
+ // initialize the block info object
+ // only if this is a sequential
+ // triangulation. it doesn't work
+ // correctly yet if it is parallel
+ if (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>(&*tria) == 0)
+ block_info_object.initialize(*this);
}
+
template<int dim, int spacedim>
void DoFHandler<dim,spacedim>::initialize_local_block_info ()
{
}
+
template<int dim, int spacedim>
void DoFHandler<dim,spacedim>::clear ()
{
DoFHandler<dim,spacedim>::
renumber_dofs (const std::vector<unsigned int> &new_numbers)
{
- Assert (new_numbers.size() == n_dofs(), ExcRenumberingIncomplete());
+ Assert (new_numbers.size() == n_locally_owned_dofs(),
+ ExcRenumberingIncomplete());
#ifdef DEBUG
// assert that the new indices are
- // consecutively numbered
- if (true)
+ // consecutively numbered if we are
+ // working on a single
+ // processor. this doesn't need to
+ // hold in the case of a parallel
+ // mesh since we map the interval
+ // [0...n_dofs()) into itself but
+ // only globally, not on each
+ // processor
+ if (n_locally_owned_dofs() == n_dofs())
{
std::vector<unsigned int> tmp(new_numbers);
std::sort (tmp.begin(), tmp.end());
for (; p!=tmp.end(); ++p, ++i)
Assert (*p == i, ExcNewNumbersNotConsecutive(i));
}
+ else
+ for (unsigned int i=0; i<new_numbers.size(); ++i)
+ Assert (new_numbers[i] < n_dofs(),
+ ExcMessage ("New DoF index is not less than the total number of dofs."));
#endif
-
- internal::DoFHandler::Implementation::renumber_dofs (new_numbers, *this);
-
- // update the cache used for cell dof
- // indices
- for (cell_iterator cell = begin(); cell != end(); ++cell)
- cell->update_cell_dof_indices_cache ();
-
+ number_cache = policy->renumber_dofs (new_numbers, *this);
}
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <base/geometry_info.h>
+#include <base/utilities.h>
+#include <base/memory_consumption.h>
+#include <grid/tria.h>
+#include <grid/tria_iterator.h>
+#include <dofs/dof_handler.h>
+#include <dofs/dof_accessor.h>
+#include <dofs/dof_handler_policy.h>
+#include <fe/fe.h>
+
+#include <set>
+#include <algorithm>
+#include <numeric>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ namespace Policy
+ {
+ // use class dealii::DoFHandler instead
+ // of namespace internal::DoFHandler in
+ // the following
+ using dealii::DoFHandler;
+
+ struct Implementation
+ {
+
+/* -------------- distribute_dofs functionality ------------- */
+
+ /**
+ * Distribute dofs on the given cell,
+ * with new dofs starting with index
+ * @p next_free_dof. Return the next
+ * unused index number.
+ *
+ * This function is excluded from the
+ * @p distribute_dofs function since
+ * it can not be implemented dimension
+ * independent.
+ */
+ template <int spacedim>
+ static
+ unsigned int
+ distribute_dofs_on_cell (const DoFHandler<1,spacedim> &dof_handler,
+ const typename DoFHandler<1,spacedim>::active_cell_iterator &cell,
+ unsigned int next_free_dof)
+ {
+
+ // distribute dofs of vertices
+ for (unsigned int v=0; v<GeometryInfo<1>::vertices_per_cell; ++v)
+ {
+ typename DoFHandler<1,spacedim>::cell_iterator
+ neighbor = cell->neighbor(v);
+
+ if (neighbor.state() == IteratorState::valid)
+ {
+ // find true neighbor; may be its
+ // a child of @p{neighbor}
+ while (neighbor->has_children())
+ neighbor = neighbor->child(v==0 ? 1 : 0);
+
+ // has neighbor already been processed?
+ if (neighbor->user_flag_set())
+ // copy dofs
+ {
+ if (v==0)
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (0, d,
+ neighbor->vertex_dof_index (1, d));
+ else
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (1, d,
+ neighbor->vertex_dof_index (0, d));
+
+ // next neighbor
+ continue;
+ }
+ }
+
+ // otherwise: create dofs newly
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (v, d, next_free_dof++);
+ }
+
+ // dofs of line
+ for (unsigned int d=0;
+ d<dof_handler.get_fe().dofs_per_line; ++d)
+ cell->set_dof_index (d, next_free_dof++);
+
+ // note that this cell has been
+ // processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ distribute_dofs_on_cell (const DoFHandler<2,spacedim> &dof_handler,
+ const typename DoFHandler<2,spacedim>::active_cell_iterator &cell,
+ unsigned int next_free_dof)
+ {
+ if (dof_handler.get_fe().dofs_per_vertex > 0)
+ // number dofs on vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<2>::vertices_per_cell; ++vertex)
+ // check whether dofs for this
+ // vertex have been distributed
+ // (only check the first dof)
+ if (cell->vertex_dof_index(vertex, 0) == DoFHandler<2,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof++);
+
+ // for the four sides
+ if (dof_handler.get_fe().dofs_per_line > 0)
+ for (unsigned int side=0; side<GeometryInfo<2>::faces_per_cell; ++side)
+ {
+ const typename DoFHandler<2,spacedim>::line_iterator
+ line = cell->line(side);
+
+ // distribute dofs if necessary:
+ // check whether line dof is already
+ // numbered (check only first dof)
+ if (line->dof_index(0) == DoFHandler<2,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
+ line->set_dof_index (d, next_free_dof++);
+ }
+
+
+ // dofs of quad
+ if (dof_handler.get_fe().dofs_per_quad > 0)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
+ cell->set_dof_index (d, next_free_dof++);
+
+
+ // note that this cell has been processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+ template <int spacedim>
+ static
+ unsigned int
+ distribute_dofs_on_cell (const DoFHandler<3,spacedim> &dof_handler,
+ const typename DoFHandler<3,spacedim>::active_cell_iterator &cell,
+ unsigned int next_free_dof)
+ {
+ if (dof_handler.get_fe().dofs_per_vertex > 0)
+ // number dofs on vertices
+ for (unsigned int vertex=0; vertex<GeometryInfo<3>::vertices_per_cell; ++vertex)
+ // check whether dofs for this
+ // vertex have been distributed
+ // (only check the first dof)
+ if (cell->vertex_dof_index(vertex, 0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_vertex; ++d)
+ cell->set_vertex_dof_index (vertex, d, next_free_dof++);
+
+ // for the lines
+ if (dof_handler.get_fe().dofs_per_line > 0)
+ for (unsigned int l=0; l<GeometryInfo<3>::lines_per_cell; ++l)
+ {
+ const typename DoFHandler<3,spacedim>::line_iterator
+ line = cell->line(l);
+
+ // distribute dofs if necessary:
+ // check whether line dof is already
+ // numbered (check only first dof)
+ if (line->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_line; ++d)
+ line->set_dof_index (d, next_free_dof++);
+ }
+
+ // for the quads
+ if (dof_handler.get_fe().dofs_per_quad > 0)
+ for (unsigned int q=0; q<GeometryInfo<3>::quads_per_cell; ++q)
+ {
+ const typename DoFHandler<3,spacedim>::quad_iterator
+ quad = cell->quad(q);
+
+ // distribute dofs if necessary:
+ // check whether quad dof is already
+ // numbered (check only first dof)
+ if (quad->dof_index(0) == DoFHandler<3,spacedim>::invalid_dof_index)
+ // if not: distribute dofs
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_quad; ++d)
+ quad->set_dof_index (d, next_free_dof++);
+ }
+
+
+ // dofs of hex
+ if (dof_handler.get_fe().dofs_per_hex > 0)
+ for (unsigned int d=0; d<dof_handler.get_fe().dofs_per_hex; ++d)
+ cell->set_dof_index (d, next_free_dof++);
+
+
+ // note that this cell has been
+ // processed
+ cell->set_user_flag ();
+
+ return next_free_dof;
+ }
+
+
+ /**
+ * Distribute degrees of
+ * freedom on all cells, or
+ * on cells with the
+ * correct subdomain_id if
+ * the corresponding
+ * argument is not equal to
+ * types::invalid_subdomain_id. Return
+ * the total number of dofs
+ * returned.
+ */
+ template <int dim, int spacedim>
+ static
+ unsigned int
+ distribute_dofs (const unsigned int offset,
+ const unsigned int subdomain_id,
+ DoFHandler<dim,spacedim> &dof_handler)
+ {
+ const dealii::Triangulation<dim,spacedim> & tria
+ = dof_handler.get_tria();
+ Assert (tria.n_levels() > 0, ExcMessage("Empty triangulation"));
+
+ // Clear user flags because we will
+ // need them. But first we save
+ // them and make sure that we
+ // restore them later such that at
+ // the end of this function the
+ // Triangulation will be in the
+ // same state as it was at the
+ // beginning of this function.
+ std::vector<bool> user_flags;
+ tria.save_user_flags(user_flags);
+ const_cast<dealii::Triangulation<dim,spacedim> &>(tria).clear_user_flags ();
+
+ unsigned int next_free_dof = offset;
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell != endc; ++cell)
+ if ((subdomain_id == types::invalid_subdomain_id)
+ ||
+ (cell->subdomain_id() == subdomain_id))
+ next_free_dof
+ = Implementation::distribute_dofs_on_cell (dof_handler, cell, next_free_dof);
+
+ // update the cache used
+ // for cell dof indices
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell = dof_handler.begin(); cell != dof_handler.end(); ++cell)
+ if (cell->subdomain_id() != types::artificial_subdomain_id)
+ cell->update_cell_dof_indices_cache ();
+
+ // finally restore the user flags
+ const_cast<dealii::Triangulation<dim,spacedim> &>(tria).load_user_flags(user_flags);
+
+ return next_free_dof;
+ }
+
+
+/* --------------------- renumber_dofs functionality ---------------- */
+
+
+ /**
+ * Implementation of the
+ * general template of same
+ * name.
+ *
+ * If the second argument
+ * has any elements set,
+ * elements of the then the
+ * vector of new numbers do
+ * not relate to the old
+ * DoF number but instead
+ * to the index of the old
+ * DoF number within the
+ * set of locally owned
+ * DoFs.
+ */
+ template <int spacedim>
+ static
+ void
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ const IndexSet &,
+ DoFHandler<1,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
+ *i = new_numbers[*i];
+ else if (check_validity)
+ // if index is
+ // invalid_dof_index:
+ // check if this one
+ // really is unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.levels[level]->lines.dofs.begin();
+ i!=dof_handler.levels[level]->lines.dofs.end(); ++i)
+ if (*i != DoFHandler<1,spacedim>::invalid_dof_index)
+ *i = new_numbers[*i];
+
+ // update the cache
+ // used for cell dof
+ // indices
+ for (typename DoFHandler<1,spacedim>::cell_iterator
+ cell = dof_handler.begin();
+ cell != dof_handler.end(); ++cell)
+ cell->update_cell_dof_indices_cache ();
+ }
+
+
+
+ template <int spacedim>
+ static
+ void
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ const IndexSet &indices,
+ DoFHandler<2,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
+ *i = (indices.n_elements() == 0)?
+ (new_numbers[*i]) :
+ (new_numbers[indices.index_within_set(*i)]);
+ else if (check_validity)
+ // if index is invalid_dof_index:
+ // check if this one really is
+ // unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.faces->lines.dofs.begin();
+ i!=dof_handler.faces->lines.dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
+ {
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.levels[level]->quads.dofs.begin();
+ i!=dof_handler.levels[level]->quads.dofs.end(); ++i)
+ if (*i != DoFHandler<2,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+ }
+
+ // update the cache
+ // used for cell dof
+ // indices
+ for (typename DoFHandler<2,spacedim>::cell_iterator
+ cell = dof_handler.begin();
+ cell != dof_handler.end(); ++cell)
+ cell->update_cell_dof_indices_cache ();
+ }
+
+
+ template <int spacedim>
+ static
+ void
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ const IndexSet &indices,
+ DoFHandler<3,spacedim> &dof_handler,
+ const bool check_validity)
+ {
+ // note that we can not use cell
+ // iterators in this function since
+ // then we would renumber the dofs on
+ // the interface of two cells more
+ // than once. Anyway, this way it's
+ // not only more correct but also
+ // faster; note, however, that dof
+ // numbers may be invalid_dof_index,
+ // namely when the appropriate
+ // vertex/line/etc is unused
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.vertex_dofs.begin();
+ i!=dof_handler.vertex_dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+ else if (check_validity)
+ // if index is invalid_dof_index:
+ // check if this one really is
+ // unused
+ Assert (dof_handler.get_tria()
+ .vertex_used((i-dof_handler.vertex_dofs.begin()) /
+ dof_handler.selected_fe->dofs_per_vertex)
+ == false,
+ ExcInternalError ());
+
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.faces->lines.dofs.begin();
+ i!=dof_handler.faces->lines.dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.faces->quads.dofs.begin();
+ i!=dof_handler.faces->quads.dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+
+ for (unsigned int level=0; level<dof_handler.levels.size(); ++level)
+ {
+ for (std::vector<unsigned int>::iterator
+ i=dof_handler.levels[level]->hexes.dofs.begin();
+ i!=dof_handler.levels[level]->hexes.dofs.end(); ++i)
+ if (*i != DoFHandler<3,spacedim>::invalid_dof_index)
+ *i = ((indices.n_elements() == 0) ?
+ new_numbers[*i] :
+ new_numbers[indices.index_within_set(*i)]);
+ }
+
+ // update the cache
+ // used for cell dof
+ // indices
+ for (typename DoFHandler<3,spacedim>::cell_iterator
+ cell = dof_handler.begin();
+ cell != dof_handler.end(); ++cell)
+ cell->update_cell_dof_indices_cache ();
+ }
+ };
+
+
+
+/* --------------------- class PolicyBase ---------------- */
+
+ template <int dim, int spacedim>
+ PolicyBase<dim,spacedim>::~PolicyBase ()
+ {}
+
+
+/* --------------------- class Sequential ---------------- */
+
+
+ template <int dim, int spacedim>
+ NumberCache
+ Sequential<dim,spacedim>::
+ distribute_dofs (const unsigned int offset,
+ DoFHandler<dim,spacedim> &dof_handler) const
+ {
+ const unsigned int n_dofs =
+ Implementation::distribute_dofs (offset,
+ types::invalid_subdomain_id,
+ dof_handler);
+
+ // now set the elements of the
+ // number cache appropriately
+ NumberCache number_cache;
+ number_cache.n_global_dofs = n_dofs;
+ number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
+
+ number_cache.locally_owned_dofs
+ = IndexSet (number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs.add_range (0,
+ number_cache.n_global_dofs);
+
+ number_cache.n_locally_owned_dofs_per_processor
+ = std::vector<unsigned int> (1,
+ number_cache.n_global_dofs);
+
+ number_cache.locally_owned_dofs_per_processor
+ = std::vector<IndexSet> (1,
+ number_cache.locally_owned_dofs);
+ return number_cache;
+ }
+
+
+
+ template <int dim, int spacedim>
+ NumberCache
+ Sequential<dim,spacedim>::
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const
+ {
+ Implementation::renumber_dofs (new_numbers, IndexSet(0),
+ dof_handler, true);
+
+ // in the sequential case,
+ // the number cache should
+ // not have changed but we
+ // have to set the elements
+ // of the structure
+ // appropriately anyway
+ NumberCache number_cache;
+ number_cache.n_global_dofs = dof_handler.n_dofs();
+ number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
+
+ number_cache.locally_owned_dofs
+ = IndexSet (number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs.add_range (0,
+ number_cache.n_global_dofs);
+
+ number_cache.n_locally_owned_dofs_per_processor
+ = std::vector<unsigned int> (1,
+ number_cache.n_global_dofs);
+
+ number_cache.locally_owned_dofs_per_processor
+ = std::vector<IndexSet> (1,
+ number_cache.locally_owned_dofs);
+ return number_cache;
+ }
+
+
+
+/* --------------------- class ParallelDistributed ---------------- */
+
+#ifdef DEAL_II_USE_P4EST
+
+ namespace
+ {
+ template <int dim>
+ struct types
+ {
+
+ /**
+ * A list of tree+quadrant and
+ * their dof indices. dofs is of
+ * the form num_dofindices of
+ * quadrant 0, followed by
+ * num_dofindices indices,
+ * num_dofindices of quadrant 1,
+ * ...
+ */
+ struct cellinfo
+ {
+ std::vector<unsigned int> tree_index;
+ std::vector<typename dealii::internal::p4est::types<dim>::quadrant> quadrants;
+ std::vector<unsigned int> dofs;
+
+ unsigned int bytes_for_buffer () const
+ {
+ return (sizeof(unsigned int) +
+ tree_index.size() * sizeof(unsigned int) +
+ quadrants.size() * sizeof(typename dealii::internal::p4est
+ ::types<dim>::quadrant) +
+ dofs.size() * sizeof(unsigned int));
+ }
+
+ void pack_data (std::vector<char> &buffer) const
+ {
+ buffer.resize(bytes_for_buffer());
+
+ char * ptr = &buffer[0];
+
+ const unsigned int num_cells = tree_index.size();
+ std::memcpy(ptr, &num_cells, sizeof(unsigned int));
+ ptr += sizeof(unsigned int);
+
+ std::memcpy(ptr,
+ &tree_index[0],
+ num_cells*sizeof(unsigned int));
+ ptr += num_cells*sizeof(unsigned int);
+
+ std::memcpy(ptr,
+ &quadrants[0],
+ num_cells * sizeof(typename dealii::internal::p4est::
+ types<dim>::quadrant));
+ ptr += num_cells*sizeof(typename dealii::internal::p4est::types<dim>::
+ quadrant);
+
+ std::memcpy(ptr,
+ &dofs[0],
+ dofs.size() * sizeof(unsigned int));
+ ptr += dofs.size() * sizeof(unsigned int);
+
+ Assert (ptr == &buffer[buffer.size()],
+ ExcInternalError());
+
+ }
+ };
+ };
+
+
+
+ template <int dim, int spacedim>
+ void
+ fill_dofindices_recursively (const typename parallel::distributed::Triangulation<dim,spacedim> & tria,
+ const unsigned int tree_index,
+ const typename DoFHandler<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename dealii::internal::p4est::types<dim>::quadrant &p4est_cell,
+ const std::map<unsigned int, std::set<dealii::types::subdomain_id_t> > &vertices_with_ghost_neighbors,
+ std::map<dealii::types::subdomain_id_t, typename types<dim>::cellinfo> &needs_to_get_cell)
+ {
+ // see if we have to
+ // recurse...
+ if (dealii_cell->has_children())
+ {
+ typename dealii::internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ internal::p4est::init_quadrant_children<dim>(p4est_cell, p4est_child);
+
+
+ for (unsigned int c=0;c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ fill_dofindices_recursively<dim,spacedim>(tria,
+ tree_index,
+ dealii_cell->child(c),
+ p4est_child[c],
+ vertices_with_ghost_neighbors,
+ needs_to_get_cell);
+ return;
+ }
+
+ // we're at a leaf cell. see if
+ // the cell is flagged as
+ // interesting. note that we
+ // have only flagged our own
+ // cells before
+ if (dealii_cell->user_flag_set() && !dealii_cell->is_ghost())
+ {
+ Assert (!dealii_cell->is_artificial(), ExcInternalError());
+
+ // check each vertex if
+ // it is interesting and
+ // push dofindices if yes
+ std::set<dealii::types::subdomain_id_t> send_to;
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ {
+ const std::map<unsigned int, std::set<dealii::types::subdomain_id_t> >::const_iterator
+ neighbor_subdomains_of_vertex
+ = vertices_with_ghost_neighbors.find (dealii_cell->vertex_index(v));
+
+ if (neighbor_subdomains_of_vertex ==
+ vertices_with_ghost_neighbors.end())
+ continue;
+
+ Assert(neighbor_subdomains_of_vertex->second.size()!=0,
+ ExcInternalError());
+
+ send_to.insert(neighbor_subdomains_of_vertex->second.begin(),
+ neighbor_subdomains_of_vertex->second.end());
+ }
+
+ if (send_to.size() > 0)
+ {
+ // this cell's dof_indices
+ // need to be sent to
+ // someone
+ std::vector<unsigned int>
+ local_dof_indices (dealii_cell->get_fe().dofs_per_cell);
+ dealii_cell->get_dof_indices (local_dof_indices);
+
+ for (std::set<dealii::types::subdomain_id_t>::iterator it=send_to.begin();
+ it!=send_to.end();++it)
+ {
+ const dealii::types::subdomain_id_t subdomain = *it;
+
+ // get an iterator
+ // to what needs to
+ // be sent to that
+ // subdomain (if
+ // already exists),
+ // or create such
+ // an object
+ typename std::map<dealii::types::subdomain_id_t, typename types<dim>::cellinfo>::iterator
+ p
+ = needs_to_get_cell.insert (std::make_pair(subdomain,
+ typename types<dim>::cellinfo()))
+ .first;
+
+ p->second.tree_index.push_back(tree_index);
+ p->second.quadrants.push_back(p4est_cell);
+
+ p->second.dofs.push_back(dealii_cell->get_fe().dofs_per_cell);
+ p->second.dofs.insert(p->second.dofs.end(),
+ local_dof_indices.begin(),
+ local_dof_indices.end());
+
+ }
+ }
+ }
+ }
+
+
+ template <int dim, int spacedim>
+ void
+ set_dofindices_recursively (
+ const parallel::distributed::Triangulation<dim,spacedim> &tria,
+ const typename dealii::internal::p4est::types<dim>::quadrant &p4est_cell,
+ const typename DoFHandler<dim,spacedim>::cell_iterator &dealii_cell,
+ const typename dealii::internal::p4est::types<dim>::quadrant &quadrant,
+ unsigned int * dofs)
+ {
+ if (internal::p4est::quadrant_is_equal<dim>(p4est_cell, quadrant))
+ {
+ Assert(!dealii_cell->has_children(), ExcInternalError());
+ Assert(dealii_cell->is_ghost(), ExcInternalError());
+
+ // update dof indices of cell
+ std::vector<unsigned int>
+ dof_indices (dealii_cell->get_fe().dofs_per_cell);
+ dealii_cell->update_cell_dof_indices_cache();
+ dealii_cell->get_dof_indices(dof_indices);
+
+ bool complete = true;
+ for (unsigned int i=0;i<dof_indices.size();++i)
+ if (dofs[i] != DoFHandler<dim,spacedim>::invalid_dof_index)
+ {
+ Assert((dof_indices[i] ==
+ (DoFHandler<dim,spacedim>::invalid_dof_index))
+ ||
+ (dof_indices[i]==dofs[i]),
+ ExcInternalError());
+ dof_indices[i]=dofs[i];
+ }
+ else
+ complete=false;
+
+ if (!complete)
+ const_cast
+ <typename DoFHandler<dim,spacedim>::cell_iterator &>
+ (dealii_cell)->set_user_flag();
+ else
+ const_cast
+ <typename DoFHandler<dim,spacedim>::cell_iterator &>
+ (dealii_cell)->clear_user_flag();
+
+ const_cast
+ <typename DoFHandler<dim,spacedim>::cell_iterator &>
+ (dealii_cell)->set_dof_indices(dof_indices);
+
+ return;
+ }
+
+ if (! dealii_cell->has_children())
+ return;
+
+ if (! internal::p4est::quadrant_is_ancestor<dim> (p4est_cell, quadrant))
+ return;
+
+ typename dealii::internal::p4est::types<dim>::quadrant
+ p4est_child[GeometryInfo<dim>::max_children_per_cell];
+ internal::p4est::init_quadrant_children<dim>(p4est_cell, p4est_child);
+
+ for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
+ set_dofindices_recursively<dim,spacedim> (tria, p4est_child[c],
+ dealii_cell->child(c),
+ quadrant, dofs);
+ }
+
+ /**
+ * Return a vector in which,
+ * for every vertex index, we
+ * mark whether a locally owned
+ * cell is adjacent.
+ */
+ template <int dim, int spacedim>
+ std::vector<bool>
+ mark_locally_active_vertices (const parallel::distributed::Triangulation<dim,spacedim> &triangulation)
+ {
+ std::vector<bool> locally_active_vertices (triangulation.n_vertices(),
+ false);
+
+ for (typename dealii::Triangulation<dim,spacedim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ if (cell->subdomain_id() == triangulation.locally_owned_subdomain())
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ locally_active_vertices[cell->vertex_index(v)] = true;
+
+ return locally_active_vertices;
+ }
+
+
+
+ template <int spacedim>
+ void
+ communicate_dof_indices_on_marked_cells
+ (const DoFHandler<1,spacedim> &,
+ const std::map<unsigned int, std::set<dealii::types::subdomain_id_t> > &,
+ const std::vector<unsigned int> &,
+ const std::vector<unsigned int> &)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+
+ template <int dim, int spacedim>
+ void
+ communicate_dof_indices_on_marked_cells
+ (const DoFHandler<dim,spacedim> &dof_handler,
+ const std::map<unsigned int, std::set<dealii::types::subdomain_id_t> > &vertices_with_ghost_neighbors,
+ const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
+ const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation)
+ {
+#ifndef DEAL_II_USE_P4EST
+ (void)vertices_with_ghost_neighbors;
+ Assert (false, ExcNotImplemented());
+#else
+
+ const parallel::distributed::Triangulation< dim, spacedim > * tr
+ = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+ (&dof_handler.get_tria()));
+ Assert (tr != 0, ExcInternalError());
+
+ // now collect cells and their
+ // dof_indices for the
+ // interested neighbors
+ typedef
+ std::map<dealii::types::subdomain_id_t, typename types<dim>::cellinfo>
+ cellmap_t;
+ cellmap_t needs_to_get_cells;
+
+ for (typename DoFHandler<dim,spacedim>::cell_iterator
+ cell = dof_handler.begin(0);
+ cell != dof_handler.end(0);
+ ++cell)
+ {
+ typename dealii::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
+
+ fill_dofindices_recursively<dim,spacedim>
+ (*tr,
+ coarse_cell_to_p4est_tree_permutation[cell->index()],
+ cell,
+ p4est_coarse_cell,
+ vertices_with_ghost_neighbors,
+ needs_to_get_cells);
+ }
+
+
+
+ //sending
+ std::vector<std::vector<char> > sendbuffers (needs_to_get_cells.size());
+ std::vector<std::vector<char> >::iterator buffer = sendbuffers.begin();
+ std::vector<MPI_Request> requests (needs_to_get_cells.size());
+
+ unsigned int idx=0;
+
+ for (typename cellmap_t::iterator it=needs_to_get_cells.begin();
+ it!=needs_to_get_cells.end();
+ ++it, ++buffer, ++idx)
+ {
+ const unsigned int num_cells = it->second.tree_index.size();
+
+ Assert(num_cells==it->second.quadrants.size(), ExcInternalError());
+ Assert(num_cells>0, ExcInternalError());
+
+ // pack all the data into
+ // the buffer for this
+ // recipient and send
+ // it. keep data around
+ // till we can make sure
+ // that the packet has been
+ // received
+ it->second.pack_data (*buffer);
+ MPI_Isend(&(*buffer)[0], buffer->size(),
+ MPI_BYTE, it->first,
+ 123, tr->get_communicator(), &requests[idx]);
+ }
+
+
+ // mark all own cells, that miss some
+ // dof_data and collect the neighbors
+ // that are going to send stuff to us
+ std::set<dealii::types::subdomain_id_t> senders;
+ {
+ std::vector<unsigned int> local_dof_indices;
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell, endc = dof_handler.end();
+
+ for (cell = dof_handler.begin_active(); cell != endc; ++cell)
+ if (!cell->is_artificial())
+ {
+ if (cell->is_ghost())
+ {
+ if (cell->user_flag_set())
+ senders.insert(cell->subdomain_id());
+ }
+ else
+ {
+ local_dof_indices.resize (cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices (local_dof_indices);
+ if (local_dof_indices.end() !=
+ std::find (local_dof_indices.begin(),
+ local_dof_indices.end(),
+ DoFHandler<dim,spacedim>::invalid_dof_index))
+ cell->set_user_flag();
+ else
+ cell->clear_user_flag();
+ }
+
+ }
+ }
+
+
+ //* 5. receive ghostcelldata
+ std::vector<char> receive;
+ typename types<dim>::cellinfo cellinfo;
+ for (unsigned int i=0;i<senders.size();++i)
+ {
+ MPI_Status status;
+ int len;
+ MPI_Probe(MPI_ANY_SOURCE, 123, tr->get_communicator(), &status);
+ MPI_Get_count(&status, MPI_BYTE, &len);
+ receive.resize(len);
+
+ char * ptr = &receive[0];
+ MPI_Recv(ptr, len, MPI_BYTE, status.MPI_SOURCE, status.MPI_TAG,
+ tr->get_communicator(), &status);
+
+ unsigned int cells;
+ memcpy(&cells, ptr, sizeof(unsigned int));
+ ptr+=sizeof(unsigned int);
+
+ //reinterpret too evil?
+ unsigned int * treeindex=reinterpret_cast<unsigned int*>(ptr);
+ ptr+=cells*sizeof(unsigned int);
+ typename dealii::internal::p4est::types<dim>::quadrant * quadrant
+ =reinterpret_cast<typename dealii::internal::p4est::types<dim>::quadrant *>(ptr);
+ ptr+=cells*sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
+ unsigned int * dofs=reinterpret_cast<unsigned int*>(ptr);
+
+ for (unsigned int c=0;c<cells;++c, dofs+=1+dofs[0])
+ {
+ typename DoFHandler<dim,spacedim>::cell_iterator
+ cell (&dof_handler.get_tria(),
+ 0,
+ p4est_tree_to_coarse_cell_permutation[treeindex[c]],
+ &dof_handler);
+
+ typename dealii::internal::p4est::types<dim>::quadrant p4est_coarse_cell;
+ internal::p4est::init_coarse_quadrant<dim>(p4est_coarse_cell);
+
+ Assert(cell->get_fe().dofs_per_cell==dofs[0], ExcInternalError());
+
+ set_dofindices_recursively<dim,spacedim> (*tr,
+ p4est_coarse_cell,
+ cell,
+ quadrant[c],
+ (dofs+1));
+ }
+ }
+
+ // complete all sends, so that we can
+ // safely destroy the buffers.
+ MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+
+
+#ifdef DEBUG
+ {
+ //check all msgs got sent and received
+ unsigned int sum_send=0;
+ unsigned int sum_recv=0;
+ unsigned int sent=needs_to_get_cells.size();
+ unsigned int recv=senders.size();
+
+ MPI_Reduce(&sent, &sum_send, 1, MPI_INT, MPI_SUM, 0, tr->get_communicator());
+ MPI_Reduce(&recv, &sum_recv, 1, MPI_INT, MPI_SUM, 0, tr->get_communicator());
+ Assert(sum_send==sum_recv, ExcInternalError());
+ }
+#endif
+
+ //update dofindices
+ {
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell, endc = dof_handler.end();
+
+ for (cell = dof_handler.begin_active(); cell != endc; ++cell)
+ if (!cell->is_artificial())
+ cell->update_cell_dof_indices_cache();
+ }
+
+ // important, so that sends between two
+ // calls to this function are not mixed
+ // up.
+ //
+ // this is necessary because above we
+ // just see if there are messages and
+ // then receive them, without
+ // discriminating where they come from
+ // and whether they were sent in phase
+ // 1 or 2. the need for a global
+ // communication step like this barrier
+ // could be avoided by receiving
+ // messages specifically from those
+ // processors from which we expect
+ // messages, and by using different
+ // tags for phase 1 and 2
+ MPI_Barrier(tr->get_communicator());
+#endif
+ }
+ }
+
+#endif // DEAL_II_USE_P4EST
+
+
+
+ template <int dim, int spacedim>
+ NumberCache
+ ParallelDistributed<dim, spacedim>::
+ distribute_dofs (const unsigned int offset,
+ DoFHandler<dim,spacedim> &dof_handler) const
+ {
+ Assert(offset==0, ExcNotImplemented());
+ NumberCache number_cache;
+
+#ifndef DEAL_II_USE_P4EST
+ (void)fe;
+ Assert (false, ExcNotImplemented());
+#else
+
+
+ parallel::distributed::Triangulation< dim, spacedim > * tr
+ = (dynamic_cast<parallel::distributed::Triangulation<dim,spacedim>*>
+ (const_cast<dealii::Triangulation< dim, spacedim >*>
+ (&dof_handler.get_tria())));
+ Assert (tr != 0, ExcInternalError());
+
+ const unsigned int
+ n_cpus = Utilities::System::get_n_mpi_processes (tr->get_communicator());
+
+ //* 1. distribute on own
+ //* subdomain
+ const unsigned int n_initial_local_dofs =
+ Implementation::distribute_dofs (0, tr->locally_owned_subdomain(),
+ dof_handler);
+
+ //* 2. iterate over ghostcells and
+ //kill dofs that are not owned
+ //by us
+ std::vector<unsigned int> renumbering(n_initial_local_dofs);
+ for (unsigned int i=0; i<renumbering.size(); ++i)
+ renumbering[i] = i;
+
+ {
+ std::vector<unsigned int> local_dof_indices;
+
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell != endc; ++cell)
+ if (cell->is_ghost() &&
+ (cell->subdomain_id() < tr->locally_owned_subdomain()))
+ {
+ // we found a
+ // neighboring ghost
+ // cell whose subdomain
+ // is "stronger" than
+ // our own subdomain
+
+ // delete all dofs that
+ // live there and that
+ // we have previously
+ // assigned a number to
+ // (i.e. the ones on
+ // the interface)
+ local_dof_indices.resize (cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices (local_dof_indices);
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
+ if (local_dof_indices[i] != DoFHandler<dim,spacedim>::invalid_dof_index)
+ renumbering[local_dof_indices[i]]
+ = DoFHandler<dim,spacedim>::invalid_dof_index;
+ }
+ }
+
+
+ // make indices consecutive
+ number_cache.n_locally_owned_dofs = 0;
+ for (std::vector<unsigned int>::iterator it=renumbering.begin();
+ it!=renumbering.end(); ++it)
+ if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
+ *it = number_cache.n_locally_owned_dofs++;
+
+ //* 3. communicate local dofcount and
+ //shift ids to make them unique
+ number_cache.n_locally_owned_dofs_per_processor.resize(n_cpus);
+
+ MPI_Allgather ( &number_cache.n_locally_owned_dofs,
+ 1, MPI_INT,
+ &number_cache.n_locally_owned_dofs_per_processor[0],
+ 1, MPI_INT,
+ tr->get_communicator());
+
+ const unsigned int
+ shift = std::accumulate( &number_cache
+ .n_locally_owned_dofs_per_processor[0],
+ &number_cache
+ .n_locally_owned_dofs_per_processor
+ [tr->locally_owned_subdomain()],
+ 0 );
+ for (std::vector<unsigned int>::iterator it=renumbering.begin();
+ it!=renumbering.end(); ++it)
+ if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
+ (*it) += shift;
+
+ // now re-enumerate all dofs to
+ // this shifted and condensed
+ // numbering form. we renumber
+ // some dofs as invalid, so
+ // choose the nocheck-version.
+ Implementation::renumber_dofs (renumbering, IndexSet(0),
+ dof_handler, false);
+
+ // now a little bit of
+ // housekeeping
+ number_cache.n_global_dofs
+ = std::accumulate( &number_cache
+ .n_locally_owned_dofs_per_processor[0],
+ &number_cache
+ .n_locally_owned_dofs_per_processor[n_cpus],
+ 0 );
+
+ number_cache.locally_owned_dofs = IndexSet(number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs
+ .add_range(shift,
+ shift+number_cache.n_locally_owned_dofs);
+
+ // fill global_dof_indexsets
+ number_cache.locally_owned_dofs_per_processor.resize(n_cpus);
+ {
+ unsigned int lshift = 0;
+ for (unsigned int i=0;i<n_cpus;++i)
+ {
+ number_cache.locally_owned_dofs_per_processor[i]
+ = IndexSet(number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs_per_processor[i]
+ .add_range(lshift,
+ lshift +
+ number_cache.n_locally_owned_dofs_per_processor[i]);
+ lshift += number_cache.n_locally_owned_dofs_per_processor[i];
+ }
+ }
+ Assert(number_cache.locally_owned_dofs_per_processor
+ [tr->locally_owned_subdomain()].n_elements()
+ ==
+ number_cache.n_locally_owned_dofs,
+ ExcInternalError());
+ Assert(!number_cache.locally_owned_dofs_per_processor
+ [tr->locally_owned_subdomain()].n_elements()
+ ||
+ number_cache.locally_owned_dofs_per_processor
+ [tr->locally_owned_subdomain()].nth_index_in_set(0)
+ == shift,
+ ExcInternalError());
+
+ //* 4. send dofids of cells that are
+ //ghostcells on other machines
+
+ std::vector<bool> user_flags;
+ tr->save_user_flags(user_flags);
+ tr->clear_user_flags ();
+
+ //mark all own cells for transfer
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell, endc = dof_handler.end();
+ for (cell = dof_handler.begin_active(); cell != endc; ++cell)
+ if (!cell->is_artificial())
+ cell->set_user_flag();
+
+ //mark the vertices we are interested
+ //in, i.e. belonging to own and marked cells
+ const std::vector<bool> locally_active_vertices
+ = mark_locally_active_vertices (*tr);
+
+ // add each ghostcells'
+ // subdomain to the vertex and
+ // keep track of interesting
+ // neighbors
+ std::map<unsigned int, std::set<dealii::types::subdomain_id_t> >
+ vertices_with_ghost_neighbors;
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active();
+ cell != dof_handler.end(); ++cell)
+ if (cell->is_ghost ())
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ if (locally_active_vertices[cell->vertex_index(v)])
+ vertices_with_ghost_neighbors[cell->vertex_index(v)]
+ .insert (cell->subdomain_id());
+
+
+ /* Send and receive cells. After this,
+ only the local cells are marked,
+ that received new data. This has to
+ be communicated in a second
+ communication step. */
+ communicate_dof_indices_on_marked_cells (dof_handler,
+ vertices_with_ghost_neighbors,
+ tr->coarse_cell_to_p4est_tree_permutation,
+ tr->p4est_tree_to_coarse_cell_permutation);
+
+ communicate_dof_indices_on_marked_cells (dof_handler,
+ vertices_with_ghost_neighbors,
+ tr->coarse_cell_to_p4est_tree_permutation,
+ tr->p4est_tree_to_coarse_cell_permutation);
+
+ tr->load_user_flags(user_flags);
+
+#ifdef DEBUG
+ //check that we are really done
+ {
+ std::vector<unsigned int> local_dof_indices;
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell, endc = dof_handler.end();
+
+ for (cell = dof_handler.begin_active(); cell != endc; ++cell)
+ if (!cell->is_artificial())
+ {
+ local_dof_indices.resize (cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices (local_dof_indices);
+ if (local_dof_indices.end() !=
+ std::find (local_dof_indices.begin(),
+ local_dof_indices.end(),
+ DoFHandler<dim,spacedim>::invalid_dof_index))
+ {
+ if (cell->is_ghost())
+ {
+ Assert(false, ExcMessage ("Not a ghost cell"));
+ }
+ else
+ {
+ Assert(false, ExcMessage ("Not one of our own cells"));
+ }
+ }
+ }
+ }
+#endif // DEBUG
+#endif // DEAL_II_USE_P4EST
+
+ return number_cache;
+ }
+
+
+
+ template <int dim, int spacedim>
+ NumberCache
+ ParallelDistributed<dim, spacedim>::
+ renumber_dofs (const std::vector<unsigned int> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler) const
+ {
+ Assert (new_numbers.size() == dof_handler.locally_owned_dofs().n_elements(),
+ ExcInternalError());
+
+ NumberCache number_cache;
+
+#ifndef DEAL_II_USE_P4EST
+ Assert (false, ExcNotImplemented());
+#else
+
+
+ //calculate new IndexSet. First try
+ //to find out if the new indices are
+ //contiguous blocks. This avoids
+ //inserting each index individually
+ //into the IndexSet, which is slow.
+ //If we own no DoFs, we still need to
+ //go through this function, but we
+ //can skip this calculation.
+
+ number_cache.locally_owned_dofs = IndexSet (dof_handler.n_dofs());
+ if (dof_handler.locally_owned_dofs().n_elements()>0)
+ {
+ std::vector<unsigned int>::const_iterator it = new_numbers.begin();
+ const unsigned int n_blocks = dof_handler.get_fe().n_blocks();
+ std::vector<std::pair<unsigned int,unsigned int> > block_indices(n_blocks);
+ block_indices[0].first = *it++;
+ block_indices[0].second = 1;
+ unsigned int current_block = 0, n_filled_blocks = 1;
+ for ( ; it != new_numbers.end(); ++it)
+ {
+ bool done = false;
+
+ // search from the current block onwards
+ // whether the next index is shifted by one
+ // from the previous one.
+ for (unsigned int i=0; i<n_filled_blocks; ++i)
+ if (*it == block_indices[current_block].first
+ +block_indices[current_block].second)
+ {
+ block_indices[current_block].second++;
+ done = true;
+ break;
+ }
+ else
+ {
+ if (current_block == n_filled_blocks-1)
+ current_block = 0;
+ else
+ ++current_block;
+ }
+
+ // could not find any contiguous range: need
+ // to add a new block if possible. Abort
+ // otherwise, which will add all elements
+ // individually to the IndexSet.
+ if (done == false)
+ {
+ if (n_filled_blocks < n_blocks)
+ {
+ block_indices[n_filled_blocks].first = *it;
+ block_indices[n_filled_blocks].second = 1;
+ current_block = n_filled_blocks;
+ ++n_filled_blocks;
+ }
+ else
+ break;
+ }
+ }
+
+ // check whether all indices could be assigned
+ // to blocks. If yes, we can add the block
+ // ranges to the IndexSet, otherwise we need
+ // to go through the indices once again and
+ // add each element individually (slow!)
+ unsigned int sum = 0;
+ for (unsigned int i=0; i<n_filled_blocks; ++i)
+ sum += block_indices[i].second;
+ if (sum == new_numbers.size())
+ for (unsigned int i=0; i<n_filled_blocks; ++i)
+ number_cache.locally_owned_dofs.add_range (block_indices[i].first,
+ block_indices[i].first+
+ block_indices[i].second);
+ else
+ for (it=new_numbers.begin() ; it != new_numbers.end(); ++it)
+ number_cache.locally_owned_dofs.add_index (*it);
+ }
+
+
+ number_cache.locally_owned_dofs.compress();
+ Assert (number_cache.locally_owned_dofs.n_elements() == new_numbers.size(),
+ ExcInternalError());
+ // also check with the number
+ // of locally owned degrees
+ // of freedom that the
+ // DoFHandler object still
+ // stores
+ Assert (number_cache.locally_owned_dofs.n_elements() ==
+ dof_handler.n_locally_owned_dofs(),
+ ExcInternalError());
+
+ // then also set this number
+ // in our own copy
+ number_cache.n_locally_owned_dofs = dof_handler.n_locally_owned_dofs();
+
+ // mark not locally active DoFs as
+ // invalid
+ {
+ std::vector<unsigned int> local_dof_indices;
+
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+
+ for (; cell != endc; ++cell)
+ if (!cell->is_artificial())
+ {
+ local_dof_indices.resize (cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices (local_dof_indices);
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
+ {
+ if (local_dof_indices[i] == DoFHandler<dim,spacedim>::invalid_dof_index)
+ continue;
+
+ if (!dof_handler.locally_owned_dofs().is_element(local_dof_indices[i]))
+ {
+ //this DoF is not owned
+ //by us, so set it to
+ //invalid.
+ local_dof_indices[i]
+ = DoFHandler<dim,spacedim>::invalid_dof_index;
+ }
+ }
+
+ cell->set_dof_indices (local_dof_indices);
+ }
+ }
+
+
+ // renumber. Skip when there is
+ // nothing to do because we own no
+ // DoF.
+ if (dof_handler.locally_owned_dofs().n_elements() > 0)
+ Implementation::renumber_dofs (new_numbers,
+ dof_handler.locally_owned_dofs(),
+ dof_handler,
+ false);
+
+ // communication
+ {
+ parallel::distributed::Triangulation< dim, spacedim > * tr
+ = (dynamic_cast<parallel::distributed::Triangulation<dim,spacedim>*>
+ (const_cast<dealii::Triangulation< dim, spacedim >*>
+ (&dof_handler.get_tria())));
+ Assert (tr != 0, ExcInternalError());
+
+ std::vector<bool> user_flags;
+ tr->save_user_flags(user_flags);
+ tr->clear_user_flags ();
+
+ //mark all own cells for transfer
+ typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell, endc = dof_handler.end();
+ for (cell = dof_handler.begin_active(); cell != endc; ++cell)
+ if (!cell->is_artificial())
+ cell->set_user_flag();
+ //mark the vertices we are interested
+ //in, i.e. belonging to own and marked cells
+ const std::vector<bool> locally_active_vertices
+ = mark_locally_active_vertices (*tr);
+
+ // add each ghostcells'
+ // subdomain to the vertex and
+ // keep track of interesting
+ // neighbors
+ std::map<unsigned int, std::set<dealii::types::subdomain_id_t> >
+ vertices_with_ghost_neighbors;
+ for (typename DoFHandler<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.begin_active();
+ cell != dof_handler.end(); ++cell)
+ if (cell->is_ghost ())
+ for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
+ if (locally_active_vertices[cell->vertex_index(v)])
+ vertices_with_ghost_neighbors[cell->vertex_index(v)]
+ .insert (cell->subdomain_id());
+
+ // Send and receive cells. After this, only
+ // the local cells are marked, that received
+ // new data. This has to be communicated in a
+ // second communication step.
+ communicate_dof_indices_on_marked_cells (dof_handler,
+ vertices_with_ghost_neighbors,
+ tr->coarse_cell_to_p4est_tree_permutation,
+ tr->p4est_tree_to_coarse_cell_permutation);
+
+ communicate_dof_indices_on_marked_cells (dof_handler,
+ vertices_with_ghost_neighbors,
+ tr->coarse_cell_to_p4est_tree_permutation,
+ tr->p4est_tree_to_coarse_cell_permutation);
+
+
+ // * Create global_dof_indexsets by
+ // transfering our own owned_dofs to
+ // every other machine.
+ const unsigned int n_cpus = Utilities::System::
+ get_n_mpi_processes (tr->get_communicator());
+
+ // Serialize our IndexSet and
+ // determine size.
+ std::ostringstream oss;
+ number_cache.locally_owned_dofs.block_write(oss);
+ std::string oss_str=oss.str();
+ std::vector<char> my_data(oss_str.begin(), oss_str.end());
+ unsigned int my_size = my_data.size();
+
+ // determine maximum size of IndexSet
+ unsigned int max_size;
+
+ MPI_Allreduce(&my_size, &max_size, 1, MPI_INT, MPI_MAX, tr->get_communicator());
+
+ // as we are reading past the end, we
+ // need to increase the size of the
+ // local buffer. This is filled with
+ // zeros.
+ my_data.resize(max_size);
+
+ std::vector<char> buffer(max_size*n_cpus);
+ MPI_Allgather(&my_data[0], max_size, MPI_BYTE,
+ &buffer[0], max_size, MPI_BYTE,
+ tr->get_communicator());
+
+ number_cache.locally_owned_dofs_per_processor.resize (n_cpus);
+ number_cache.n_locally_owned_dofs_per_processor.resize (n_cpus);
+ for (unsigned int i=0;i<n_cpus;++i)
+ {
+ std::stringstream strstr;
+ strstr.write(&buffer[i*max_size],max_size);
+ // This does not read the whole
+ // buffer, when the size is
+ // smaller than
+ // max_size. Therefor we need to
+ // create a new stringstream in
+ // each iteration (resetting
+ // would be fine too).
+ number_cache.locally_owned_dofs_per_processor[i]
+ .block_read(strstr);
+ number_cache.n_locally_owned_dofs_per_processor[i]
+ = number_cache.locally_owned_dofs_per_processor[i].n_elements();
+ }
+
+ tr->load_user_flags(user_flags);
+ }
+#endif
+
+ return number_cache;
+ }
+ }
+ }
+}
+
+
+
+
+/*-------------- Explicit Instantiations -------------------------------*/
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ namespace Policy
+ {
+ template class PolicyBase<deal_II_dimension,deal_II_dimension>;
+ template class Sequential<deal_II_dimension,deal_II_dimension>;
+ template class ParallelDistributed<deal_II_dimension,deal_II_dimension>;
+
+#if deal_II_dimension==1 || deal_II_dimension==2
+ template class PolicyBase<deal_II_dimension,deal_II_dimension+1>;
+ template class Sequential<deal_II_dimension,deal_II_dimension+1>;
+ template class ParallelDistributed<deal_II_dimension,deal_II_dimension+1>;
+#endif
+ }
+ }
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
//---------------------------------------------------------------------------
-// $id$
+// $Id$
// Version: $name$
//
// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
class WrapDoFIterator : private T
{
public:
+ typedef typename T::AccessorType AccessorType;
+
WrapDoFIterator (const T& t) : T(t) {}
void get_dof_indices (std::vector<unsigned int>& v) const
class WrapMGDoFIterator : private T
{
public:
+ typedef typename T::AccessorType AccessorType;
+
WrapMGDoFIterator (const T& t) : T(t) {}
void get_dof_indices (std::vector<unsigned int>& v) const
}
+
template <int dim>
void Cuthill_McKee (MGDoFHandler<dim> &dof_handler,
const unsigned int level,
}
+
template <int dim>
void
component_wise (DoFHandler<dim> &dof_handler,
const std::vector<unsigned int> &component_order_arg)
{
- std::vector<unsigned int> renumbering (dof_handler.n_dofs(),
+ std::vector<unsigned int> renumbering (dof_handler.n_locally_owned_dofs(),
DoFHandler<dim>::invalid_dof_index);
typedef
typename DoFHandler<dim>::cell_iterator>(renumbering,
start, end,
component_order_arg);
-
- if (result == 0) return;
-
- Assert (result == dof_handler.n_dofs(),
+ if (result == 0)
+ return;
+
+ // verify that the last numbered
+ // degree of freedom is either
+ // equal to the number of degrees
+ // of freedom in total (the
+ // sequential case) or in the
+ // distributed case at least
+ // makes sense
+ Assert ((result == dof_handler.n_locally_owned_dofs())
+ ||
+ ((dof_handler.n_locally_owned_dofs() < dof_handler.n_dofs())
+ &&
+ (result <= dof_handler.n_dofs())),
ExcRenumberingIncomplete());
dof_handler.renumber_dofs (renumbering);
}
+
template <int dim>
void
component_wise (MGDoFHandler<dim> &dof_handler,
}
+
template <int dim>
void
component_wise (MGDoFHandler<dim> &dof_handler,
-
template <int dim, class ITERATOR, class ENDITERATOR>
unsigned int
compute_component_wise (std::vector<unsigned int>& new_indices,
- ITERATOR& start,
+ const ITERATOR & start,
const ENDITERATOR& end,
const std::vector<unsigned int> &component_order_arg)
{
return 0;
}
-// Assert (new_indices.size() == start->dof_handler().n_dofs(),
-// ExcDimensionMismatch(new_indices.size(),
-// start->dof_handler.n_dofs()));
-
// Copy last argument into a
// writable vector.
std::vector<unsigned int> component_order (component_order_arg);
Assert (component_order.size() == fe_collection.n_components(),
ExcDimensionMismatch(component_order.size(), fe_collection.n_components()));
- for (unsigned int i=0; i< component_order.size(); ++i)
+ for (unsigned int i=0; i<component_order.size(); ++i)
Assert(component_order[i] < fe_collection.n_components(),
ExcIndexRange(component_order[i], 0, fe_collection.n_components()));
// of freedom with this
// component
component_list[f][i] = component_order[comp];
- };
+ }
}
// set up a map where for each
// take care of that
std::vector<std::vector<unsigned int> >
component_to_dof_map (fe_collection.n_components());
- for (;start!=end;++start)
- {
+ for (ITERATOR cell=start; cell!=end; ++cell)
+ if (!cell->is_artificial() && !cell->is_ghost())
+ {
// on each cell: get dof indices
// and insert them into the global
// list using their component
- const unsigned int fe_index = start->active_fe_index();
- const unsigned int dofs_per_cell =fe_collection[fe_index].dofs_per_cell;
- local_dof_indices.resize (dofs_per_cell);
- start.get_dof_indices (local_dof_indices);
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- component_to_dof_map[component_list[fe_index][i]].
- push_back (local_dof_indices[i]);
- };
+ const unsigned int fe_index = cell->active_fe_index();
+ const unsigned int dofs_per_cell =fe_collection[fe_index].dofs_per_cell;
+ local_dof_indices.resize (dofs_per_cell);
+ cell.get_dof_indices (local_dof_indices);
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ if (start->get_dof_handler().locally_owned_dofs().is_element(local_dof_indices[i]))
+ component_to_dof_map[component_list[fe_index][i]].
+ push_back (local_dof_indices[i]);
+ }
// now we've got all indices sorted
// into buckets labelled with their
.erase (std::unique (component_to_dof_map[component].begin(),
component_to_dof_map[component].end()),
component_to_dof_map[component].end());
- };
+ }
+
+ // calculate the number of locally owned
+ // DoFs per bucket
+ const unsigned int n_buckets = fe_collection.n_components();
+ std::vector<unsigned int> shifts(n_buckets);
+
+ if (const parallel::distributed::Triangulation<dim> * tria
+ = (dynamic_cast<const parallel::distributed::Triangulation<dim>*>
+ (&start->get_dof_handler().get_tria())))
+ {
+#ifdef DEAL_II_USE_P4EST
+ std::vector<unsigned int> local_dof_count(n_buckets);
+
+ for (unsigned int c=0; c<n_buckets; ++c)
+ local_dof_count[c] = component_to_dof_map[c].size();
+
+
+ // gather information from all CPUs
+ std::vector<unsigned int>
+ all_dof_counts(fe_collection.n_components() *
+ Utilities::System::get_n_mpi_processes (tria->get_communicator()));
+
+ MPI_Allgather ( &local_dof_count[0], n_buckets, MPI_INT, &all_dof_counts[0],
+ n_buckets, MPI_INT, tria->get_communicator());
+
+ for (unsigned int i=0; i<n_buckets; ++i)
+ Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
+ ==
+ local_dof_count[i],
+ ExcInternalError());
+
+ //calculate shifts
+ unsigned int cumulated = 0;
+ for (unsigned int c=0; c<n_buckets; ++c)
+ {
+ shifts[c]=cumulated;
+ for (unsigned int i=0; i<tria->locally_owned_subdomain(); ++i)
+ shifts[c] += all_dof_counts[c+n_buckets*i];
+ for (unsigned int i=0; i<Utilities::System::get_n_mpi_processes (tria->get_communicator()); ++i)
+ cumulated += all_dof_counts[c+n_buckets*i];
+ }
+#else
+ Assert (false, ExcInternalError());
+#endif
+ }
+ else
+ {
+ shifts[0] = 0;
+ for (unsigned int c=1; c<fe_collection.n_components(); ++c)
+ shifts[c] = shifts[c-1] + component_to_dof_map[c-1].size();
+ }
+
+
+
// now concatenate all the
// components in the order the user
// desired to see
unsigned int next_free_index = 0;
- for (unsigned int c=0; c<fe_collection.n_components(); ++c)
+ for (unsigned int component=0; component<fe_collection.n_components(); ++component)
{
- const unsigned int component = c;
-
const typename std::vector<unsigned int>::const_iterator
begin_of_component = component_to_dof_map[component].begin(),
end_of_component = component_to_dof_map[component].end();
+ next_free_index = shifts[component];
+
for (typename std::vector<unsigned int>::const_iterator
dof_index = begin_of_component;
dof_index != end_of_component; ++dof_index)
- new_indices[*dof_index] = next_free_index++;
- };
+ {
+ Assert (start->get_dof_handler().locally_owned_dofs()
+ .index_within_set(*dof_index)
+ <
+ new_indices.size(),
+ ExcInternalError());
+ new_indices[start->get_dof_handler().locally_owned_dofs()
+ .index_within_set(*dof_index)]
+ = next_free_index++;
+ }
+ }
return next_free_index;
}
// first get the association of each dof
// with a subdomain and determine the total
// number of subdomain ids used
- std::vector<unsigned int> subdomain_association (n_dofs);
+ std::vector<types::subdomain_id_t> subdomain_association (n_dofs);
DoFTools::get_subdomain_association (dof_handler,
subdomain_association);
const unsigned int n_subdomains
// current cell is owned by the calling
// processor. Otherwise, just continue.
for (; cell!=endc; ++cell)
- if ((subdomain_id == numbers::invalid_unsigned_int)
- ||
- (subdomain_id == cell->subdomain_id()))
+ if (((subdomain_id == types::invalid_subdomain_id)
+ ||
+ (subdomain_id == cell->subdomain_id()))
+ &&
+ !cell->is_artificial()
+ &&
+ !cell->is_ghost())
{
const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
dofs_on_this_cell.resize (dofs_per_cell);
SparsityPattern &sparsity,
const ConstraintMatrix &constraints,
const bool keep_constrained_dofs,
- const unsigned int subdomain_id)
+ const types::subdomain_id_t subdomain_id)
{
const unsigned int n_dofs = dof.n_dofs();
// current cell is owned by the calling
// processor. Otherwise, just continue.
for (; cell!=endc; ++cell)
- if ((subdomain_id == numbers::invalid_unsigned_int)
- ||
- (subdomain_id == cell->subdomain_id()))
+ if (((subdomain_id == types::invalid_subdomain_id)
+ ||
+ (subdomain_id == cell->subdomain_id()))
+ &&
+ !cell->is_artificial()
+ &&
+ !cell->is_ghost())
{
const unsigned int fe_index = cell->active_fe_index();
const unsigned int dofs_per_cell =fe_collection[fe_index].dofs_per_cell;
const unsigned int n_master_dofs = master_dofs.size ();
const unsigned int n_slave_dofs = slave_dofs.size ();
+ // check for a couple
+ // conditions that happened
+ // in parallel distributed
+ // mode
+ for (unsigned int row=0; row!=n_slave_dofs; ++row)
+ Assert (slave_dofs[row] != numbers::invalid_unsigned_int,
+ ExcInternalError());
+ for (unsigned int col=0; col!=n_master_dofs; ++col)
+ Assert (master_dofs[col] != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+
for (unsigned int row=0; row!=n_slave_dofs; ++row)
if (constraints.is_constrained (slave_dofs[row]) == false)
{
// currently not used but may be in the future:
-
+
// static
// void
// make_hp_hanging_node_constraints (const dealii::MGDoFHandler<1,2> &,
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face(face)->has_children())
- {
- // so now we've found a
- // face of an active
- // cell that has
- // children. that means
- // that there are
- // hanging nodes here.
-
- // in any case, faces
- // can have at most two
- // active fe indices,
- // but here the face
- // can have only one
- // (namely the same as
- // that from the cell
- // we're sitting on),
- // and each of the
- // children can have
- // only one as
- // well. check this
- Assert (cell->face(face)->n_active_fe_indices() == 1,
- ExcInternalError());
- Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
- == true,
- ExcInternalError());
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- Assert (cell->face(face)->child(c)->n_active_fe_indices() == 1,
+ // artificial cells can at
+ // best neighbor ghost cells,
+ // but we're not interested
+ // in these interfaces
+ if (!cell->is_artificial ())
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face(face)->has_children())
+ {
+ // in any case, faces
+ // can have at most two
+ // active fe indices,
+ // but here the face
+ // can have only one
+ // (namely the same as
+ // that from the cell
+ // we're sitting on),
+ // and each of the
+ // children can have
+ // only one as
+ // well. check this
+ Assert (cell->face(face)->n_active_fe_indices() == 1,
ExcInternalError());
+ Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
+ == true,
+ ExcInternalError());
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ if (!cell->neighbor_child_on_subface(face,c)->is_artificial())
+ Assert (cell->face(face)->child(c)->n_active_fe_indices() == 1,
+ ExcInternalError());
- // right now, all that
- // is implemented is
- // the case that both
- // sides use the same
- // fe
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- Assert (cell->face(face)->child(c)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcNotImplemented());
-
- // ok, start up the work
- const FiniteElement<dim,spacedim> &fe = cell->get_fe();
- const unsigned int fe_index = cell->active_fe_index();
+ // right now, all that
+ // is implemented is
+ // the case that both
+ // sides use the same
+ // fe
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ if (!cell->neighbor_child_on_subface(face,c)->is_artificial())
+ Assert (cell->face(face)->child(c)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcNotImplemented());
+
+ // ok, start up the work
+ const FiniteElement<dim,spacedim> &fe = cell->get_fe();
+ const unsigned int fe_index = cell->active_fe_index();
+
+ const unsigned int
+ n_dofs_on_mother = 2*fe.dofs_per_vertex + fe.dofs_per_line,
+ n_dofs_on_children = fe.dofs_per_vertex + 2*fe.dofs_per_line;
+
+ dofs_on_mother.resize (n_dofs_on_mother);
+ dofs_on_children.resize (n_dofs_on_children);
+
+ Assert(n_dofs_on_mother == fe.constraints().n(),
+ ExcDimensionMismatch(n_dofs_on_mother,
+ fe.constraints().n()));
+ Assert(n_dofs_on_children == fe.constraints().m(),
+ ExcDimensionMismatch(n_dofs_on_children,
+ fe.constraints().m()));
+
+ const typename DH::line_iterator this_face = cell->face(face);
+
+ // fill the dofs indices. Use same
+ // enumeration scheme as in
+ // @p{FiniteElement::constraints()}
+ unsigned int next_index = 0;
+ for (unsigned int vertex=0; vertex<2; ++vertex)
+ for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
+ dofs_on_mother[next_index++] = this_face->vertex_dof_index(vertex,dof,
+ fe_index);
+ for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
+ dofs_on_mother[next_index++] = this_face->dof_index(dof, fe_index);
+ AssertDimension (next_index, dofs_on_mother.size());
- const unsigned int
- n_dofs_on_mother = 2*fe.dofs_per_vertex + fe.dofs_per_line,
- n_dofs_on_children = fe.dofs_per_vertex + 2*fe.dofs_per_line;
-
- dofs_on_mother.resize (n_dofs_on_mother);
- dofs_on_children.resize (n_dofs_on_children);
-
- Assert(n_dofs_on_mother == fe.constraints().n(),
- ExcDimensionMismatch(n_dofs_on_mother,
- fe.constraints().n()));
- Assert(n_dofs_on_children == fe.constraints().m(),
- ExcDimensionMismatch(n_dofs_on_children,
- fe.constraints().m()));
-
- const typename DH::line_iterator this_face = cell->face(face);
-
- // fill the dofs indices. Use same
- // enumeration scheme as in
- // @p{FiniteElement::constraints()}
- unsigned int next_index = 0;
- for (unsigned int vertex=0; vertex<2; ++vertex)
+ next_index = 0;
for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
- dofs_on_mother[next_index++] = this_face->vertex_dof_index(vertex,dof,
- fe_index);
- for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
- dofs_on_mother[next_index++] = this_face->dof_index(dof, fe_index);
- AssertDimension (next_index, dofs_on_mother.size());
-
- next_index = 0;
- for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(0)->vertex_dof_index(1,dof,fe_index);
- for (unsigned int child=0; child<2; ++child)
- for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
dofs_on_children[next_index++]
- = this_face->child(child)->dof_index(dof, fe_index);
- AssertDimension (next_index, dofs_on_children.size());
+ = this_face->child(0)->vertex_dof_index(1,dof,fe_index);
+ for (unsigned int child=0; child<2; ++child)
+ for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->child(child)->dof_index(dof, fe_index);
+ AssertDimension (next_index, dofs_on_children.size());
- // for each row in the constraint
- // matrix for this line:
- for (unsigned int row=0; row!=dofs_on_children.size(); ++row)
- {
- constraints.add_line (dofs_on_children[row]);
- for (unsigned int i=0; i!=dofs_on_mother.size(); ++i)
- constraints.add_entry (dofs_on_children[row],
- dofs_on_mother[i],
- fe.constraints()(row,i));
+ // for each row in the constraint
+ // matrix for this line:
+ for (unsigned int row=0; row!=dofs_on_children.size(); ++row)
+ {
+ constraints.add_line (dofs_on_children[row]);
+ for (unsigned int i=0; i!=dofs_on_mother.size(); ++i)
+ constraints.add_entry (dofs_on_children[row],
+ dofs_on_mother[i],
+ fe.constraints()(row,i));
- constraints.set_inhomogeneity (dofs_on_children[row], 0.);
- }
- }
- else
- {
- // this face has no
- // children, but it
- // could still be that
- // it is shared by two
- // cells that use a
- // different fe index
- Assert (cell->face(face)->n_active_fe_indices() == 1,
- ExcNotImplemented());
- Assert (cell->face(face)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcInternalError());
- }
+ constraints.set_inhomogeneity (dofs_on_children[row], 0.);
+ }
+ }
+ else
+ {
+ // this face has no
+ // children, but it
+ // could still be
+ // that it is shared
+ // by two cells that
+ // use a different fe
+ // index. check a
+ // couple of things,
+ // but ignore the
+ // case that the
+ // neighbor is an
+ // artificial cell
+ if (!cell->at_boundary(face) &&
+ !cell->neighbor(face)->is_artificial())
+ {
+ Assert (cell->face(face)->n_active_fe_indices() == 1,
+ ExcNotImplemented());
+ Assert (cell->face(face)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcInternalError());
+ }
+ }
}
#endif
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face(face)->has_children())
- {
- // first of all, make sure that
- // we treat a case which is
- // possible, i.e. either no dofs
- // on the face at all or no
- // anisotropic refinement
- if (cell->get_fe().dofs_per_face == 0)
- continue;
-
- Assert(cell->face(face)->refinement_case()==RefinementCase<dim-1>::isotropic_refinement,
- ExcNotImplemented());
-
- // so now we've found a
- // face of an active
- // cell that has
- // children. that means
- // that there are
- // hanging nodes here.
-
- // in any case, faces
- // can have at most two
- // active fe indices,
- // but here the face
- // can have only one
- // (namely the same as
- // that from the cell
- // we're sitting on),
- // and each of the
- // children can have
- // only one as
- // well. check this
- AssertDimension (cell->face(face)->n_active_fe_indices(), 1);
- Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
- == true,
- ExcInternalError());
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- AssertDimension (cell->face(face)->child(c)->n_active_fe_indices(), 1);
-
- // right now, all that
- // is implemented is
- // the case that both
- // sides use the same
- // fe, and not only
- // that but also that
- // all lines bounding
- // this face and the
- // children have the
- // same fe
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- {
- Assert (cell->face(face)->child(c)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcNotImplemented());
- for (unsigned int e=0; e<4; ++e)
+ // artificial cells can at
+ // best neighbor ghost cells,
+ // but we're not interested
+ // in these interfaces
+ if (!cell->is_artificial ())
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face(face)->has_children())
+ {
+ // first of all, make sure that
+ // we treat a case which is
+ // possible, i.e. either no dofs
+ // on the face at all or no
+ // anisotropic refinement
+ if (cell->get_fe().dofs_per_face == 0)
+ continue;
+
+ Assert(cell->face(face)->refinement_case()==RefinementCase<dim-1>::isotropic_refinement,
+ ExcNotImplemented());
+
+ // in any case, faces
+ // can have at most two
+ // active fe indices,
+ // but here the face
+ // can have only one
+ // (namely the same as
+ // that from the cell
+ // we're sitting on),
+ // and each of the
+ // children can have
+ // only one as
+ // well. check this
+ AssertDimension (cell->face(face)->n_active_fe_indices(), 1);
+ Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
+ == true,
+ ExcInternalError());
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ AssertDimension (cell->face(face)->child(c)->n_active_fe_indices(), 1);
+
+ // right now, all that
+ // is implemented is
+ // the case that both
+ // sides use the same
+ // fe, and not only
+ // that but also that
+ // all lines bounding
+ // this face and the
+ // children have the
+ // same fe
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ if (!cell->neighbor_child_on_subface(face,c)->is_artificial())
{
- Assert (cell->face(face)->child(c)->line(e)
- ->n_active_fe_indices() == 1,
- ExcNotImplemented());
- Assert (cell->face(face)->child(c)->line(e)
+ Assert (cell->face(face)->child(c)
->fe_index_is_active(cell->active_fe_index()) == true,
ExcNotImplemented());
+ for (unsigned int e=0; e<4; ++e)
+ {
+ Assert (cell->face(face)->child(c)->line(e)
+ ->n_active_fe_indices() == 1,
+ ExcNotImplemented());
+ Assert (cell->face(face)->child(c)->line(e)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcNotImplemented());
+ }
}
- }
- for (unsigned int e=0; e<4; ++e)
- {
- Assert (cell->face(face)->line(e)
- ->n_active_fe_indices() == 1,
- ExcNotImplemented());
- Assert (cell->face(face)->line(e)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcNotImplemented());
- }
-
- // ok, start up the work
- const FiniteElement<dim> &fe = cell->get_fe();
- const unsigned int fe_index = cell->active_fe_index();
-
- const unsigned int
- n_dofs_on_mother = (4*fe.dofs_per_vertex+
- 4*fe.dofs_per_line+
- fe.dofs_per_quad),
- n_dofs_on_children = (5*fe.dofs_per_vertex+
- 12*fe.dofs_per_line+
- 4*fe.dofs_per_quad);
- //TODO[TL]: think about this and the following in case of anisotropic refinement
-
- dofs_on_mother.resize (n_dofs_on_mother);
- dofs_on_children.resize (n_dofs_on_children);
-
- Assert(n_dofs_on_mother == fe.constraints().n(),
- ExcDimensionMismatch(n_dofs_on_mother,
- fe.constraints().n()));
- Assert(n_dofs_on_children == fe.constraints().m(),
- ExcDimensionMismatch(n_dofs_on_children,
- fe.constraints().m()));
-
- const typename DH::face_iterator this_face = cell->face(face);
-
- // fill the dofs indices. Use same
- // enumeration scheme as in
- // @p{FiniteElement::constraints()}
- unsigned int next_index = 0;
- for (unsigned int vertex=0; vertex<4; ++vertex)
- for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
- dofs_on_mother[next_index++] = this_face->vertex_dof_index(vertex,dof,
- fe_index);
- for (unsigned int line=0; line<4; ++line)
- for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
- dofs_on_mother[next_index++]
- = this_face->line(line)->dof_index(dof, fe_index);
- for (unsigned int dof=0; dof!=fe.dofs_per_quad; ++dof)
- dofs_on_mother[next_index++] = this_face->dof_index(dof, fe_index);
- AssertDimension (next_index, dofs_on_mother.size());
-
- next_index = 0;
-
- // assert some consistency
- // assumptions
- //TODO[TL]: think about this in case of anisotropic refinement
- Assert (dof_handler.get_tria().get_anisotropic_refinement_flag() ||
- ((this_face->child(0)->vertex_index(3) ==
- this_face->child(1)->vertex_index(2)) &&
- (this_face->child(0)->vertex_index(3) ==
- this_face->child(2)->vertex_index(1)) &&
- (this_face->child(0)->vertex_index(3) ==
- this_face->child(3)->vertex_index(0))),
- ExcInternalError());
- for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(0)->vertex_dof_index(3,dof);
+ for (unsigned int e=0; e<4; ++e)
+ {
+ Assert (cell->face(face)->line(e)
+ ->n_active_fe_indices() == 1,
+ ExcNotImplemented());
+ Assert (cell->face(face)->line(e)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcNotImplemented());
+ }
- // dof numbers on the centers of
- // the lines bounding this face
- for (unsigned int line=0; line<4; ++line)
+ // ok, start up the work
+ const FiniteElement<dim> &fe = cell->get_fe();
+ const unsigned int fe_index = cell->active_fe_index();
+
+ const unsigned int
+ n_dofs_on_mother = (4*fe.dofs_per_vertex+
+ 4*fe.dofs_per_line+
+ fe.dofs_per_quad),
+ n_dofs_on_children = (5*fe.dofs_per_vertex+
+ 12*fe.dofs_per_line+
+ 4*fe.dofs_per_quad);
+ //TODO[TL]: think about this and the following in case of anisotropic refinement
+
+ dofs_on_mother.resize (n_dofs_on_mother);
+ dofs_on_children.resize (n_dofs_on_children);
+
+ Assert(n_dofs_on_mother == fe.constraints().n(),
+ ExcDimensionMismatch(n_dofs_on_mother,
+ fe.constraints().n()));
+ Assert(n_dofs_on_children == fe.constraints().m(),
+ ExcDimensionMismatch(n_dofs_on_children,
+ fe.constraints().m()));
+
+ const typename DH::face_iterator this_face = cell->face(face);
+
+ // fill the dofs indices. Use same
+ // enumeration scheme as in
+ // @p{FiniteElement::constraints()}
+ unsigned int next_index = 0;
+ for (unsigned int vertex=0; vertex<4; ++vertex)
+ for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
+ dofs_on_mother[next_index++] = this_face->vertex_dof_index(vertex,dof,
+ fe_index);
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
+ dofs_on_mother[next_index++]
+ = this_face->line(line)->dof_index(dof, fe_index);
+ for (unsigned int dof=0; dof!=fe.dofs_per_quad; ++dof)
+ dofs_on_mother[next_index++] = this_face->dof_index(dof, fe_index);
+ AssertDimension (next_index, dofs_on_mother.size());
+
+ next_index = 0;
+
+ // assert some consistency
+ // assumptions
+ //TODO[TL]: think about this in case of anisotropic refinement
+ Assert (dof_handler.get_tria().get_anisotropic_refinement_flag() ||
+ ((this_face->child(0)->vertex_index(3) ==
+ this_face->child(1)->vertex_index(2)) &&
+ (this_face->child(0)->vertex_index(3) ==
+ this_face->child(2)->vertex_index(1)) &&
+ (this_face->child(0)->vertex_index(3) ==
+ this_face->child(3)->vertex_index(0))),
+ ExcInternalError());
for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
dofs_on_children[next_index++]
- = this_face->line(line)->child(0)->vertex_dof_index(1,dof, fe_index);
-
- // next the dofs on the lines interior
- // to the face; the order of these
- // lines is laid down in the
- // FiniteElement class documentation
- for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(0)->line(1)->dof_index(dof, fe_index);
- for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(2)->line(1)->dof_index(dof, fe_index);
- for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(0)->line(3)->dof_index(dof, fe_index);
- for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
- dofs_on_children[next_index++]
- = this_face->child(1)->line(3)->dof_index(dof, fe_index);
+ = this_face->child(0)->vertex_dof_index(3,dof);
- // dofs on the bordering lines
- for (unsigned int line=0; line<4; ++line)
- for (unsigned int child=0; child<2; ++child)
- for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
+ // dof numbers on the centers of
+ // the lines bounding this face
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int dof=0; dof!=fe.dofs_per_vertex; ++dof)
dofs_on_children[next_index++]
- = this_face->line(line)->child(child)->dof_index(dof, fe_index);
+ = this_face->line(line)->child(0)->vertex_dof_index(1,dof, fe_index);
- // finally, for the dofs interior
- // to the four child faces
- for (unsigned int child=0; child<4; ++child)
- for (unsigned int dof=0; dof!=fe.dofs_per_quad; ++dof)
+ // next the dofs on the lines interior
+ // to the face; the order of these
+ // lines is laid down in the
+ // FiniteElement class documentation
+ for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
dofs_on_children[next_index++]
- = this_face->child(child)->dof_index(dof, fe_index);
- AssertDimension (next_index, dofs_on_children.size());
+ = this_face->child(0)->line(1)->dof_index(dof, fe_index);
+ for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->child(2)->line(1)->dof_index(dof, fe_index);
+ for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->child(0)->line(3)->dof_index(dof, fe_index);
+ for (unsigned int dof=0; dof<fe.dofs_per_line; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->child(1)->line(3)->dof_index(dof, fe_index);
+
+ // dofs on the bordering lines
+ for (unsigned int line=0; line<4; ++line)
+ for (unsigned int child=0; child<2; ++child)
+ for (unsigned int dof=0; dof!=fe.dofs_per_line; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->line(line)->child(child)->dof_index(dof, fe_index);
+
+ // finally, for the dofs interior
+ // to the four child faces
+ for (unsigned int child=0; child<4; ++child)
+ for (unsigned int dof=0; dof!=fe.dofs_per_quad; ++dof)
+ dofs_on_children[next_index++]
+ = this_face->child(child)->dof_index(dof, fe_index);
+ AssertDimension (next_index, dofs_on_children.size());
- // for each row in the constraint
- // matrix for this line:
- for (unsigned int row=0; row!=dofs_on_children.size(); ++row)
- {
- constraints.add_line (dofs_on_children[row]);
- for (unsigned int i=0; i!=dofs_on_mother.size(); ++i)
- constraints.add_entry (dofs_on_children[row],
- dofs_on_mother[i],
- fe.constraints()(row,i));
+ // for each row in the constraint
+ // matrix for this line:
+ for (unsigned int row=0; row!=dofs_on_children.size(); ++row)
+ {
+ constraints.add_line (dofs_on_children[row]);
+ for (unsigned int i=0; i!=dofs_on_mother.size(); ++i)
+ constraints.add_entry (dofs_on_children[row],
+ dofs_on_mother[i],
+ fe.constraints()(row,i));
- constraints.set_inhomogeneity(dofs_on_children[row], 0.);
- }
- }
- else
- {
- // this face has no
- // children, but it
- // could still be that
- // it is shared by two
- // cells that use a
- // different fe index
- Assert (cell->face(face)->n_active_fe_indices() == 1,
- ExcNotImplemented());
- Assert (cell->face(face)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcInternalError());
- }
+ constraints.set_inhomogeneity(dofs_on_children[row], 0.);
+ }
+ }
+ else
+ {
+ // this face has no
+ // children, but it
+ // could still be
+ // that it is shared
+ // by two cells that
+ // use a different fe
+ // index. check a
+ // couple of things,
+ // but ignore the
+ // case that the
+ // neighbor is an
+ // artificial cell
+ if (!cell->at_boundary(face) &&
+ !cell->neighbor(face)->is_artificial())
+ {
+ Assert (cell->face(face)->n_active_fe_indices() == 1,
+ ExcNotImplemented());
+ Assert (cell->face(face)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcInternalError());
+ }
+ }
}
#endif
typename DH::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
for (; cell!=endc; ++cell)
- for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
- if (cell->face(face)->has_children())
- {
- // first of all, make sure that
- // we treat a case which is
- // possible, i.e. either no dofs
- // on the face at all or no
- // anisotropic refinement
- if (cell->get_fe().dofs_per_face == 0)
- continue;
-
- Assert(cell->face(face)->refinement_case()==RefinementCase<dim-1>::isotropic_refinement,
- ExcNotImplemented());
-
- // check to see if the child elements
- // have no dofs on the
- // shared face. if none of them do, we
- // we can simply continue to the next face.
- // if only some of them have dofs, while
- // others do not, then we don't know
- // what to do and throw an exception.
- bool any_are_zero = false;
- bool all_are_zero = true;
-
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- {
- if(cell->neighbor_child_on_subface (face, c)->get_fe().dofs_per_face == 0)
- any_are_zero = true;
- else
- all_are_zero = false;
- }
-
- if(all_are_zero)
- continue;
-
- Assert( all_are_zero || !any_are_zero, ExcNotImplemented() );
-
- // so now we've found a
- // face of an active
- // cell that has
- // children. that means
- // that there are
- // hanging nodes here.
-
- // in any case, faces
- // can have at most two
- // sets of active fe
- // indices, but here
- // the face can have
- // only one (namely the
- // same as that from
- // the cell we're
- // sitting on), and
- // each of the children
- // can have only one as
- // well. check this
- Assert (cell->face(face)->n_active_fe_indices() == 1,
- ExcInternalError());
- Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
- == true,
- ExcInternalError());
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- Assert (cell->face(face)->child(c)->n_active_fe_indices() == 1,
- ExcInternalError());
+ // artificial cells can at
+ // best neighbor ghost cells,
+ // but we're not interested
+ // in these interfaces
+ if (!cell->is_artificial ())
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (cell->face(face)->has_children())
+ {
+ // first of all, make sure that
+ // we treat a case which is
+ // possible, i.e. either no dofs
+ // on the face at all or no
+ // anisotropic refinement
+ if (cell->get_fe().dofs_per_face == 0)
+ continue;
+
+ Assert(cell->face(face)->refinement_case()==RefinementCase<dim-1>::isotropic_refinement,
+ ExcNotImplemented());
+
+ // check to see if the child elements
+ // have no dofs on the
+ // shared face. if none of them do, we
+ // we can simply continue to the next face.
+ // if only some of them have dofs, while
+ // others do not, then we don't know
+ // what to do and throw an exception.
+ //
+ // ignore all
+ // interfaces with
+ // artificial cells
+ bool any_are_zero = false;
+ bool all_are_zero = true;
+
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ if (!cell->neighbor_child_on_subface(face, c)->is_artificial())
+ {
+ if (cell->neighbor_child_on_subface(face, c)->get_fe().dofs_per_face == 0)
+ any_are_zero = true;
+ else
+ all_are_zero = false;
+ }
- // first find out
- // whether we can
- // constrain each of
- // the subfaces to the
- // mother face. in the
- // lingo of the hp
- // paper, this would be
- // the simple
- // case. note that we
- // can short-circuit
- // this decision if the
- // dof_handler doesn't
- // support hp at all
- FiniteElementDomination::Domination
- mother_face_dominates = FiniteElementDomination::either_element_can_dominate;
-
- if (DoFHandlerSupportsDifferentFEs<DH>::value == true)
- for (unsigned int c=0; c<cell->face(face)->number_of_children(); ++c)
- mother_face_dominates = mother_face_dominates &
- (cell->get_fe().compare_for_face_domination
- (cell->neighbor_child_on_subface (face, c)->get_fe()));
-
- switch (mother_face_dominates)
- {
- case FiniteElementDomination::this_element_dominates:
- case FiniteElementDomination::either_element_can_dominate:
+ if(all_are_zero)
+ continue;
+
+ Assert( all_are_zero || !any_are_zero, ExcNotImplemented() );
+
+ // so now we've found a
+ // face of an active
+ // cell that has
+ // children. that means
+ // that there are
+ // hanging nodes here.
+
+ // in any case, faces
+ // can have at most two
+ // sets of active fe
+ // indices, but here
+ // the face can have
+ // only one (namely the
+ // same as that from
+ // the cell we're
+ // sitting on), and
+ // each of the children
+ // can have only one as
+ // well. check this
+ Assert (cell->face(face)->n_active_fe_indices() == 1,
+ ExcInternalError());
+ Assert (cell->face(face)->fe_index_is_active(cell->active_fe_index())
+ == true,
+ ExcInternalError());
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ Assert (cell->face(face)->child(c)->n_active_fe_indices() == 1,
+ ExcInternalError());
+
+ // first find out
+ // whether we can
+ // constrain each of
+ // the subfaces to the
+ // mother face. in the
+ // lingo of the hp
+ // paper, this would be
+ // the simple
+ // case. note that we
+ // can short-circuit
+ // this decision if the
+ // dof_handler doesn't
+ // support hp at all
+ //
+ // ignore all
+ // interfaces with
+ // artificial cells
+ FiniteElementDomination::Domination
+ mother_face_dominates = FiniteElementDomination::either_element_can_dominate;
+
+ if (DoFHandlerSupportsDifferentFEs<DH>::value == true)
+ for (unsigned int c=0; c<cell->face(face)->number_of_children(); ++c)
+ if (!cell->neighbor_child_on_subface (face, c)->is_artificial())
+ mother_face_dominates = mother_face_dominates &
+ (cell->get_fe().compare_for_face_domination
+ (cell->neighbor_child_on_subface (face, c)->get_fe()));
+
+ switch (mother_face_dominates)
{
- // Case 1 (the
- // simple case
- // and the only
- // case that can
- // happen for
- // non-hp
- // DoFHandlers):
- // The coarse
- // element
- // dominates the
- // elements on
- // the subfaces
- // (or they are
- // all the same)
- master_dofs.resize (cell->get_fe().dofs_per_face);
-
- cell->face(face)->get_dof_indices (master_dofs,
- cell->active_fe_index ());
-
- // Now create constraint matrix for
- // the subfaces and assemble it.
- for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
- {
- const typename DH::active_face_iterator
- subface = cell->face(face)->child(c);
-
- Assert (subface->n_active_fe_indices() == 1,
- ExcInternalError());
-
- const unsigned int
- subface_fe_index = subface->nth_active_fe_index(0);
-
- // Same procedure as for the
- // mother cell. Extract the face
- // DoFs from the cell DoFs.
- slave_dofs.resize (subface->get_fe(subface_fe_index)
- .dofs_per_face);
- subface->get_dof_indices (slave_dofs, subface_fe_index);
-
- // Now create the
- // element constraint
- // for this subface.
- //
- // As a side remark,
- // one may wonder the
- // following:
- // neighbor_child is
- // clearly computed
- // correctly,
- // i.e. taking into
- // account
- // face_orientation
- // (just look at the
- // implementation of
- // that
- // function). however,
- // we don't care about
- // this here, when we
- // ask for
- // subface_interpolation
- // on subface c. the
- // question rather is:
- // do we have to
- // translate 'c' here
- // as well?
- //
- // the answer is in
- // fact 'no'. if one
- // does that, results
- // are wrong:
- // constraints are
- // added twice for the
- // same pair of nodes
- // but with differing
- // weights. in
- // addition, one can
- // look at the
- // deal.II/project_*_03
- // tests that look at
- // exactly this case:
- // there, we have a
- // mesh with at least
- // one
- // face_orientation==false
- // and hanging nodes,
- // and the results of
- // those tests show
- // that the result of
- // projection verifies
- // the approximation
- // properties of a
- // finite element onto
- // that mesh
- ensure_existence_of_subface_matrix
- (cell->get_fe(),
- subface->get_fe(subface_fe_index),
- c,
- subface_interpolation_matrices
- [cell->active_fe_index()][subface_fe_index][c]);
-
- // Add constraints to global constraint
- // matrix.
+ case FiniteElementDomination::this_element_dominates:
+ case FiniteElementDomination::either_element_can_dominate:
+ {
+ // Case 1 (the
+ // simple case
+ // and the only
+ // case that can
+ // happen for
+ // non-hp
+ // DoFHandlers):
+ // The coarse
+ // element
+ // dominates the
+ // elements on
+ // the subfaces
+ // (or they are
+ // all the same)
+ //
+ // so we are
+ // going to
+ // constrain
+ // the DoFs on
+ // the face
+ // children
+ // against the
+ // DoFs on the
+ // face itself
+ master_dofs.resize (cell->get_fe().dofs_per_face);
+
+ cell->face(face)->get_dof_indices (master_dofs,
+ cell->active_fe_index ());
+
+ // Now create
+ // constraint matrix
+ // for the subfaces and
+ // assemble it. ignore
+ // all interfaces with
+ // artificial cells
+ // because we can only
+ // get to such
+ // interfaces if the
+ // current cell is a
+ // ghost cell. also
+ // ignore the interface
+ // if the neighboring
+ // cell is a ghost cell
+ // in 2d: what we would
+ // compute here are the
+ // constraints on the
+ // ghost cell's DoFs,
+ // but we are not
+ // interested in those:
+ // we only want
+ // constraints on
+ // *locally active*
+ // DoFs, not on
+ // *locally relevant*
+ // DoFs. However, in 3d
+ // we must still
+ // compute those
+ // constraints because
+ // it might happen that
+ // a constraint is
+ // related to an edge
+ // where the hanging
+ // node is only
+ // detected if we also
+ // look between ghosts
+ for (unsigned int c=0; c<cell->face(face)->n_children(); ++c)
+ {
+ if (cell->neighbor_child_on_subface (face, c)->is_artificial()
+ ||
+ (dim == 2 && cell->neighbor_child_on_subface (face, c)->is_ghost()))
+ continue;
+
+ const typename DH::active_face_iterator
+ subface = cell->face(face)->child(c);
+
+ Assert (subface->n_active_fe_indices() == 1,
+ ExcInternalError());
+
+ const unsigned int
+ subface_fe_index = subface->nth_active_fe_index(0);
+
+ // Same procedure as for the
+ // mother cell. Extract the face
+ // DoFs from the cell DoFs.
+ slave_dofs.resize (subface->get_fe(subface_fe_index)
+ .dofs_per_face);
+ subface->get_dof_indices (slave_dofs, subface_fe_index);
+
+ for (unsigned int i=0; i<slave_dofs.size(); ++i)
+ Assert (slave_dofs[i] != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ // Now create the
+ // element constraint
+ // for this subface.
+ //
+ // As a side remark,
+ // one may wonder the
+ // following:
+ // neighbor_child is
+ // clearly computed
+ // correctly,
+ // i.e. taking into
+ // account
+ // face_orientation
+ // (just look at the
+ // implementation of
+ // that
+ // function). however,
+ // we don't care about
+ // this here, when we
+ // ask for
+ // subface_interpolation
+ // on subface c. the
+ // question rather is:
+ // do we have to
+ // translate 'c' here
+ // as well?
+ //
+ // the answer is in
+ // fact 'no'. if one
+ // does that, results
+ // are wrong:
+ // constraints are
+ // added twice for the
+ // same pair of nodes
+ // but with differing
+ // weights. in
+ // addition, one can
+ // look at the
+ // deal.II/project_*_03
+ // tests that look at
+ // exactly this case:
+ // there, we have a
+ // mesh with at least
+ // one
+ // face_orientation==false
+ // and hanging nodes,
+ // and the results of
+ // those tests show
+ // that the result of
+ // projection verifies
+ // the approximation
+ // properties of a
+ // finite element onto
+ // that mesh
+ ensure_existence_of_subface_matrix
+ (cell->get_fe(),
+ subface->get_fe(subface_fe_index),
+ c,
+ subface_interpolation_matrices
+ [cell->active_fe_index()][subface_fe_index][c]);
+
+ // Add constraints to global constraint
+ // matrix.
#ifdef WOLFGANG
- std::cout << "Constraints for cell=" << cell
- << ", face=" << face
- << ", subface=" << c
- << std::endl;
+ std::cout << "Constraints for cell=" << cell
+ << ", face=" << face
+ << ", subface=" << c
+ << std::endl;
#endif
- filter_constraints (master_dofs,
- slave_dofs,
- *(subface_interpolation_matrices
- [cell->active_fe_index()][subface_fe_index][c]),
- constraints);
- }
+ filter_constraints (master_dofs,
+ slave_dofs,
+ *(subface_interpolation_matrices
+ [cell->active_fe_index()][subface_fe_index][c]),
+ constraints);
+ }
- break;
- }
+ break;
+ }
- case FiniteElementDomination::other_element_dominates:
- case FiniteElementDomination::neither_element_dominates:
- {
- // Case 2 (the "complex"
- // case): at least one
- // (the neither_... case)
- // of the finer elements
- // or all of them (the
- // other_... case) is
- // dominating. See the hp
- // paper for a way how to
- // deal with this
- // situation
- //
- // since this is
- // something that
- // can only
- // happen for hp
- // dof handlers,
- // add a check
- // here...
- Assert (DoFHandlerSupportsDifferentFEs<DH>::value == true,
- ExcInternalError());
-
- // we first have
- // to find the
- // finite element
- // that is able
- // to generate a
- // space that all
- // the other ones
- // can be
- // constrained to
- const unsigned int dominating_fe_index
- = get_most_dominating_subface_fe_index (cell->face(face));
-
- const FiniteElement<dim,spacedim> &dominating_fe
- = dof_handler.get_fe()[dominating_fe_index];
-
- // check also
- // that it is
- // able to
- // constrain the
- // mother
- // face. it
- // should be, or
- // we wouldn't
- // have gotten
- // into the
- // branch for the
- // 'complex' case
- Assert ((dominating_fe.compare_for_face_domination
- (cell->face(face)->get_fe(cell->face(face)->nth_active_fe_index(0)))
- == FiniteElementDomination::this_element_dominates)
- ||
- (dominating_fe.compare_for_face_domination
- (cell->face(face)->get_fe(cell->face(face)->nth_active_fe_index(0)))
- == FiniteElementDomination::either_element_can_dominate),
- ExcInternalError());
+ case FiniteElementDomination::other_element_dominates:
+ case FiniteElementDomination::neither_element_dominates:
+ {
+ // Case 2 (the "complex"
+ // case): at least one
+ // (the neither_... case)
+ // of the finer elements
+ // or all of them (the
+ // other_... case) is
+ // dominating. See the hp
+ // paper for a way how to
+ // deal with this
+ // situation
+ //
+ // since this is
+ // something that
+ // can only
+ // happen for hp
+ // dof handlers,
+ // add a check
+ // here...
+ Assert (DoFHandlerSupportsDifferentFEs<DH>::value == true,
+ ExcInternalError());
+ // we first have
+ // to find the
+ // finite element
+ // that is able
+ // to generate a
+ // space that all
+ // the other ones
+ // can be
+ // constrained to
+ const unsigned int dominating_fe_index
+ = get_most_dominating_subface_fe_index (cell->face(face));
+
+ const FiniteElement<dim,spacedim> &dominating_fe
+ = dof_handler.get_fe()[dominating_fe_index];
+
+ // check also
+ // that it is
+ // able to
+ // constrain the
+ // mother
+ // face. it
+ // should be, or
+ // we wouldn't
+ // have gotten
+ // into the
+ // branch for the
+ // 'complex' case
+ Assert ((dominating_fe.compare_for_face_domination
+ (cell->face(face)->get_fe(cell->face(face)->nth_active_fe_index(0)))
+ == FiniteElementDomination::this_element_dominates)
+ ||
+ (dominating_fe.compare_for_face_domination
+ (cell->face(face)->get_fe(cell->face(face)->nth_active_fe_index(0)))
+ == FiniteElementDomination::either_element_can_dominate),
+ ExcInternalError());
- // first get the
- // interpolation matrix
- // from the mother to the
- // virtual dofs
- Assert (dominating_fe.dofs_per_face <=
- cell->get_fe().dofs_per_face,
- ExcInternalError());
- ensure_existence_of_face_matrix
- (dominating_fe,
- cell->get_fe(),
- face_interpolation_matrices
- [dominating_fe_index][cell->active_fe_index()]);
-
- // split this matrix into
- // master and slave
- // components. invert the
- // master component
- ensure_existence_of_master_dof_mask
- (cell->get_fe(),
- dominating_fe,
- (*face_interpolation_matrices
- [dominating_fe_index]
- [cell->active_fe_index()]),
- master_dof_masks
- [dominating_fe_index]
- [cell->active_fe_index()]);
-
- ensure_existence_of_split_face_matrix
- (*face_interpolation_matrices
- [dominating_fe_index][cell->active_fe_index()],
- (*master_dof_masks
- [dominating_fe_index][cell->active_fe_index()]),
- split_face_interpolation_matrices
- [dominating_fe_index][cell->active_fe_index()]);
-
- const FullMatrix<double> &restrict_mother_to_virtual_master_inv
- = (split_face_interpolation_matrices
- [dominating_fe_index][cell->active_fe_index()]->first);
-
- const FullMatrix<double> &restrict_mother_to_virtual_slave
- = (split_face_interpolation_matrices
- [dominating_fe_index][cell->active_fe_index()]->second);
-
- // now compute
- // the constraint
- // matrix as the
- // product
- // between the
- // inverse matrix
- // and the slave
- // part
- constraint_matrix.reinit (cell->get_fe().dofs_per_face -
- dominating_fe.dofs_per_face,
- dominating_fe.dofs_per_face);
- restrict_mother_to_virtual_slave
- .mmult (constraint_matrix,
- restrict_mother_to_virtual_master_inv);
-
- // then figure
- // out the global
- // numbers of
- // master and
- // slave dofs and
- // apply
- // constraints
- scratch_dofs.resize (cell->get_fe().dofs_per_face);
- cell->face(face)->get_dof_indices (scratch_dofs,
- cell->active_fe_index ());
-
- // split dofs into master
- // and slave components
- master_dofs.clear ();
- slave_dofs.clear ();
- for (unsigned int i=0; i<cell->get_fe().dofs_per_face; ++i)
- if ((*master_dof_masks
- [dominating_fe_index][cell->active_fe_index()])[i] == true)
- master_dofs.push_back (scratch_dofs[i]);
- else
- slave_dofs.push_back (scratch_dofs[i]);
+ // first get the
+ // interpolation matrix
+ // from the mother to the
+ // virtual dofs
+ Assert (dominating_fe.dofs_per_face <=
+ cell->get_fe().dofs_per_face,
+ ExcInternalError());
- AssertDimension (master_dofs.size(), dominating_fe.dofs_per_face);
- AssertDimension (slave_dofs.size(),
- cell->get_fe().dofs_per_face - dominating_fe.dofs_per_face);
+ ensure_existence_of_face_matrix
+ (dominating_fe,
+ cell->get_fe(),
+ face_interpolation_matrices
+ [dominating_fe_index][cell->active_fe_index()]);
+
+ // split this matrix into
+ // master and slave
+ // components. invert the
+ // master component
+ ensure_existence_of_master_dof_mask
+ (cell->get_fe(),
+ dominating_fe,
+ (*face_interpolation_matrices
+ [dominating_fe_index]
+ [cell->active_fe_index()]),
+ master_dof_masks
+ [dominating_fe_index]
+ [cell->active_fe_index()]);
+
+ ensure_existence_of_split_face_matrix
+ (*face_interpolation_matrices
+ [dominating_fe_index][cell->active_fe_index()],
+ (*master_dof_masks
+ [dominating_fe_index][cell->active_fe_index()]),
+ split_face_interpolation_matrices
+ [dominating_fe_index][cell->active_fe_index()]);
+
+ const FullMatrix<double> &restrict_mother_to_virtual_master_inv
+ = (split_face_interpolation_matrices
+ [dominating_fe_index][cell->active_fe_index()]->first);
+
+ const FullMatrix<double> &restrict_mother_to_virtual_slave
+ = (split_face_interpolation_matrices
+ [dominating_fe_index][cell->active_fe_index()]->second);
+
+ // now compute
+ // the constraint
+ // matrix as the
+ // product
+ // between the
+ // inverse matrix
+ // and the slave
+ // part
+ constraint_matrix.reinit (cell->get_fe().dofs_per_face -
+ dominating_fe.dofs_per_face,
+ dominating_fe.dofs_per_face);
+ restrict_mother_to_virtual_slave
+ .mmult (constraint_matrix,
+ restrict_mother_to_virtual_master_inv);
+
+ // then figure
+ // out the global
+ // numbers of
+ // master and
+ // slave dofs and
+ // apply
+ // constraints
+ scratch_dofs.resize (cell->get_fe().dofs_per_face);
+ cell->face(face)->get_dof_indices (scratch_dofs,
+ cell->active_fe_index ());
+
+ // split dofs into master
+ // and slave components
+ master_dofs.clear ();
+ slave_dofs.clear ();
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_face; ++i)
+ if ((*master_dof_masks
+ [dominating_fe_index][cell->active_fe_index()])[i] == true)
+ master_dofs.push_back (scratch_dofs[i]);
+ else
+ slave_dofs.push_back (scratch_dofs[i]);
+
+ AssertDimension (master_dofs.size(), dominating_fe.dofs_per_face);
+ AssertDimension (slave_dofs.size(),
+ cell->get_fe().dofs_per_face - dominating_fe.dofs_per_face);
#ifdef WOLFGANG
- std::cout << "Constraints for cell=" << cell
- << ", face=" << face
- << " (complicated case, mother)"
- << std::endl;
+ std::cout << "Constraints for cell=" << cell
+ << ", face=" << face
+ << " (complicated case, mother)"
+ << std::endl;
#endif
- filter_constraints (master_dofs,
- slave_dofs,
- constraint_matrix,
- constraints);
+ filter_constraints (master_dofs,
+ slave_dofs,
+ constraint_matrix,
+ constraints);
+
+
+
+ // next we have
+ // to deal with
+ // the
+ // subfaces. do
+ // as discussed
+ // in the hp
+ // paper
+ for (unsigned int sf=0;
+ sf<cell->face(face)->n_children(); ++sf)
+ {
+ // ignore
+ // interfaces
+ // with
+ // artificial
+ // cells as
+ // well as
+ // interfaces
+ // between
+ // ghost
+ // cells in 2d
+ if (cell->neighbor_child_on_subface (face, sf)->is_artificial()
+ ||
+ (dim==2 && cell->is_ghost()
+ &&
+ cell->neighbor_child_on_subface (face, sf)->is_ghost()))
+ continue;
+
+ Assert (cell->face(face)->child(sf)
+ ->n_active_fe_indices() == 1,
+ ExcInternalError());
+
+ const unsigned int subface_fe_index
+ = cell->face(face)->child(sf)->nth_active_fe_index(0);
+ const FiniteElement<dim,spacedim> &subface_fe
+ = dof_handler.get_fe()[subface_fe_index];
+
+ // first get the
+ // interpolation
+ // matrix from the
+ // subface to the
+ // virtual dofs
+ Assert (dominating_fe.dofs_per_face <=
+ subface_fe.dofs_per_face,
+ ExcInternalError());
+ ensure_existence_of_subface_matrix
+ (dominating_fe,
+ subface_fe,
+ sf,
+ subface_interpolation_matrices
+ [dominating_fe_index][subface_fe_index][sf]);
+ const FullMatrix<double> &restrict_subface_to_virtual
+ = *(subface_interpolation_matrices
+ [dominating_fe_index][subface_fe_index][sf]);
+ constraint_matrix.reinit (subface_fe.dofs_per_face,
+ dominating_fe.dofs_per_face);
- // next we have to
- // deal with the
- // subfaces. do as
- // discussed in the
- // paper
- for (unsigned int sf=0;
- sf<cell->face(face)->n_children(); ++sf)
- {
- Assert (cell->face(face)->child(sf)
- ->n_active_fe_indices() == 1,
- ExcInternalError());
-
- const unsigned int subface_fe_index
- = cell->face(face)->child(sf)->nth_active_fe_index(0);
- const FiniteElement<dim,spacedim> &subface_fe
- = dof_handler.get_fe()[subface_fe_index];
-
- // first get the
- // interpolation
- // matrix from the
- // subface to the
- // virtual dofs
- Assert (dominating_fe.dofs_per_face <=
- subface_fe.dofs_per_face,
- ExcInternalError());
- ensure_existence_of_subface_matrix
- (dominating_fe,
- subface_fe,
- sf,
- subface_interpolation_matrices
- [dominating_fe_index][subface_fe_index][sf]);
-
- const FullMatrix<double> &restrict_subface_to_virtual
- = *(subface_interpolation_matrices
- [dominating_fe_index][subface_fe_index][sf]);
-
- constraint_matrix.reinit (subface_fe.dofs_per_face,
- dominating_fe.dofs_per_face);
-
- restrict_subface_to_virtual
- .mmult (constraint_matrix,
- restrict_mother_to_virtual_master_inv);
-
- slave_dofs.resize (subface_fe.dofs_per_face);
- cell->face(face)->child(sf)->get_dof_indices (slave_dofs,
- subface_fe_index);
+ restrict_subface_to_virtual
+ .mmult (constraint_matrix,
+ restrict_mother_to_virtual_master_inv);
+
+ slave_dofs.resize (subface_fe.dofs_per_face);
+ cell->face(face)->child(sf)->get_dof_indices (slave_dofs,
+ subface_fe_index);
#ifdef WOLFGANG
- std::cout << "Constraints for cell=" << cell
- << ", face=" << face
- << ", subface=" << sf
- << " (complicated case, children)"
- << std::endl;
+ std::cout << "Constraints for cell=" << cell
+ << ", face=" << face
+ << ", subface=" << sf
+ << " (complicated case, children)"
+ << std::endl;
#endif
- filter_constraints (master_dofs,
- slave_dofs,
- constraint_matrix,
- constraints);
- }
+ filter_constraints (master_dofs,
+ slave_dofs,
+ constraint_matrix,
+ constraints);
+ }
- break;
+ break;
+ }
+
+ default:
+ // we shouldn't get here
+ Assert (false, ExcInternalError());
}
+ }
+ else
+ {
+ // this face has no
+ // children, but it
+ // could still be that
+ // it is shared by two
+ // cells that use a
+ // different fe index
+ Assert (cell->face(face)
+ ->fe_index_is_active(cell->active_fe_index()) == true,
+ ExcInternalError());
- default:
- // we shouldn't get here
- Assert (false, ExcInternalError());
- }
- }
- else
- {
- // this face has no
- // children, but it
- // could still be that
- // it is shared by two
- // cells that use a
- // different fe index
- Assert (cell->face(face)
- ->fe_index_is_active(cell->active_fe_index()) == true,
- ExcInternalError());
-
- // Only if there is
- // a neighbor with
- // a different
- // active_fe_index
- // and the same h-level,
- // some action has
- // to be taken.
- if ((DoFHandlerSupportsDifferentFEs<DH>::value == true)
- &&
- !cell->face(face)->at_boundary ()
- &&
- (cell->neighbor(face)->active_fe_index () !=
- cell->active_fe_index ())
- &&
- (!cell->face(face)->has_children() &&
- !cell->neighbor_is_coarser(face) ))
- {
- const typename DH::cell_iterator neighbor = cell->neighbor (face);
+ // see if there is a
+ // neighbor that is
+ // an artificial
+ // cell. in that
+ // case, we're not
+ // interested in this
+ // interface. we test
+ // this case first
+ // since artificial
+ // cells may not have
+ // an active_fe_index
+ // set, etc
+ if (!cell->at_boundary(face)
+ &&
+ cell->neighbor(face)->is_artificial())
+ continue;
+
+ // Only if there is
+ // a neighbor with
+ // a different
+ // active_fe_index
+ // and the same h-level,
+ // some action has
+ // to be taken.
+ if ((DoFHandlerSupportsDifferentFEs<DH>::value == true)
+ &&
+ !cell->face(face)->at_boundary ()
+ &&
+ (cell->neighbor(face)->active_fe_index () !=
+ cell->active_fe_index ())
+ &&
+ (!cell->face(face)->has_children() &&
+ !cell->neighbor_is_coarser(face) ))
+ {
+ const typename DH::cell_iterator neighbor = cell->neighbor (face);
- // see which side of the
- // face we have to
- // constrain
- switch (cell->get_fe().compare_for_face_domination (neighbor->get_fe ()))
- {
- case FiniteElementDomination::this_element_dominates:
+ // see which side of the
+ // face we have to
+ // constrain
+ switch (cell->get_fe().compare_for_face_domination (neighbor->get_fe ()))
{
- // Get DoFs on
- // dominating and
- // dominated side of
- // the face
- master_dofs.resize (cell->get_fe().dofs_per_face);
- cell->face(face)->get_dof_indices (master_dofs,
- cell->active_fe_index ());
-
- slave_dofs.resize (neighbor->get_fe().dofs_per_face);
- cell->face(face)->get_dof_indices (slave_dofs,
- neighbor->active_fe_index ());
-
- // break if the n_master_dofs == 0,
- // because we are attempting to
- // constrain to an element that has
- // has no face dofs
- if(master_dofs.size() == 0) break;
-
- // make sure
- // the element
- // constraints
- // for this
- // face are
- // available
- ensure_existence_of_face_matrix
- (cell->get_fe(),
- neighbor->get_fe(),
- face_interpolation_matrices
- [cell->active_fe_index()][neighbor->active_fe_index()]);
-
- // Add constraints to global constraint
- // matrix.
+ case FiniteElementDomination::this_element_dominates:
+ {
+ // Get DoFs on
+ // dominating and
+ // dominated side of
+ // the face
+ master_dofs.resize (cell->get_fe().dofs_per_face);
+ cell->face(face)->get_dof_indices (master_dofs,
+ cell->active_fe_index ());
+
+ slave_dofs.resize (neighbor->get_fe().dofs_per_face);
+ cell->face(face)->get_dof_indices (slave_dofs,
+ neighbor->active_fe_index ());
+
+ // break if the n_master_dofs == 0,
+ // because we are attempting to
+ // constrain to an element that has
+ // has no face dofs
+ if(master_dofs.size() == 0) break;
+
+ // make sure
+ // the element
+ // constraints
+ // for this
+ // face are
+ // available
+ ensure_existence_of_face_matrix
+ (cell->get_fe(),
+ neighbor->get_fe(),
+ face_interpolation_matrices
+ [cell->active_fe_index()][neighbor->active_fe_index()]);
+
+ // Add constraints to global constraint
+ // matrix.
#ifdef WOLFGANG
- std::cout << "p-constraints for cell=" << cell
- << ", face=" << face << std::endl;
+ std::cout << "p-constraints for cell=" << cell
+ << ", face=" << face << std::endl;
#endif
- filter_constraints (master_dofs,
- slave_dofs,
- *(face_interpolation_matrices
- [cell->active_fe_index()]
- [neighbor->active_fe_index()]),
- constraints);
+ filter_constraints (master_dofs,
+ slave_dofs,
+ *(face_interpolation_matrices
+ [cell->active_fe_index()]
+ [neighbor->active_fe_index()]),
+ constraints);
+
+ break;
+ }
- break;
- }
+ case FiniteElementDomination::other_element_dominates:
+ {
+ // we don't do anything
+ // here since we will
+ // come back to this
+ // face from the other
+ // cell, at which time
+ // we will fall into
+ // the first case
+ // clause above
+ break;
+ }
- case FiniteElementDomination::other_element_dominates:
- {
- // we don't do anything
- // here since we will
- // come back to this
- // face from the other
- // cell, at which time
- // we will fall into
- // the first case
- // clause above
- break;
- }
+ case FiniteElementDomination::either_element_can_dominate:
+ {
+ // it appears as if
+ // neither element has
+ // any constraints on
+ // its neighbor. this
+ // may be because
+ // neither element has
+ // any DoFs on faces at
+ // all. or that the two
+ // elements are
+ // actually the same,
+ // although they happen
+ // to run under
+ // different fe_indices
+ // (this is what
+ // happens in
+ // hp/hp_hanging_nodes_01
+ // for example).
+ //
+ // another possibility
+ // is what happens in
+ // crash_13. there, we
+ // have
+ // FESystem(FE_Q(1),FE_DGQ(0))
+ // vs. FESystem(FE_Q(1),FE_DGQ(1)).
+ // neither of them
+ // dominates the
+ // other. the point is
+ // that it doesn't
+ // matter, since
+ // hopefully for this
+ // case, both sides'
+ // dofs should have
+ // been unified.
+ //
+ // make sure this is
+ // actually true. this
+ // actually only
+ // matters, of course,
+ // if either of the two
+ // finite elements
+ // actually do have
+ // dofs on the face
+ if ((cell->get_fe().dofs_per_face != 0)
+ ||
+ (cell->neighbor(face)->get_fe().dofs_per_face != 0))
+ {
+ Assert (cell->get_fe().dofs_per_face
+ ==
+ cell->neighbor(face)->get_fe().dofs_per_face,
+ ExcNotImplemented());
+
+ // (ab)use the master
+ // and slave dofs
+ // arrays for a
+ // moment here
+ master_dofs.resize (cell->get_fe().dofs_per_face);
+ cell->face(face)
+ ->get_dof_indices (master_dofs,
+ cell->active_fe_index ());
+
+ slave_dofs.resize (cell->neighbor(face)->get_fe().dofs_per_face);
+ cell->face(face)
+ ->get_dof_indices (slave_dofs,
+ cell->neighbor(face)->active_fe_index ());
+
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_face; ++i)
+ AssertDimension (master_dofs[i], slave_dofs[i]);
+ }
- case FiniteElementDomination::either_element_can_dominate:
- {
- // it appears as if
- // neither element has
- // any constraints on
- // its neighbor. this
- // may be because
- // neither element has
- // any DoFs on faces at
- // all. or that the two
- // elements are
- // actually the same,
- // although they happen
- // to run under
- // different fe_indices
- // (this is what
- // happens in
- // hp/hp_hanging_nodes_01
- // for example).
- //
- // another possibility
- // is what happens in
- // crash_13. there, we
- // have
- // FESystem(FE_Q(1),FE_DGQ(0))
- // vs. FESystem(FE_Q(1),FE_DGQ(1)).
- // neither of them
- // dominates the
- // other. the point is
- // that it doesn't
- // matter, since
- // hopefully for this
- // case, both sides'
- // dofs should have
- // been unified.
- //
- // make sure this is
- // actually true. this
- // actually only
- // matters, of course,
- // if either of the two
- // finite elements
- // actually do have
- // dofs on the face
- if ((cell->get_fe().dofs_per_face != 0)
- ||
- (cell->neighbor(face)->get_fe().dofs_per_face != 0))
- {
- Assert (cell->get_fe().dofs_per_face
- ==
- cell->neighbor(face)->get_fe().dofs_per_face,
- ExcNotImplemented());
-
- // (ab)use the master
- // and slave dofs
- // arrays for a
- // moment here
- master_dofs.resize (cell->get_fe().dofs_per_face);
- cell->face(face)
- ->get_dof_indices (master_dofs,
- cell->active_fe_index ());
-
- slave_dofs.resize (cell->neighbor(face)->get_fe().dofs_per_face);
- cell->face(face)
- ->get_dof_indices (slave_dofs,
- cell->neighbor(face)->active_fe_index ());
-
- for (unsigned int i=0; i<cell->get_fe().dofs_per_face; ++i)
- AssertDimension (master_dofs[i], slave_dofs[i]);
- }
+ break;
+ }
- break;
- }
+ case FiniteElementDomination::neither_element_dominates:
+ {
+ // we don't presently
+ // know what exactly to
+ // do here. it isn't quite
+ // clear what exactly we
+ // would have to do
+ // here. sit tight until
+ // someone trips over the
+ // following statement
+ // and see what exactly
+ // is going on
+ Assert (false, ExcNotImplemented());
+ break;
+ }
- case FiniteElementDomination::neither_element_dominates:
- {
- // we don't presently
- // know what exactly to
- // do here. it isn't quite
- // clear what exactly we
- // would have to do
- // here. sit tight until
- // someone trips over the
- // following statement
- // and see what exactly
- // is going on
- Assert (false, ExcNotImplemented());
- break;
+ default:
+ // we shouldn't get
+ // here
+ Assert (false, ExcInternalError());
}
-
- default:
- // we shouldn't get
- // here
- Assert (false, ExcInternalError());
- }
- }
- }
+ }
+ }
}
}
}
const dealii::hp::FECollection<DH::dimension,DH::space_dimension>
fe_collection (dof.get_fe());
Assert (fe_collection.n_components() < 256, ExcNotImplemented());
- Assert(dofs_by_component.size() == dof.n_dofs(),
- ExcDimensionMismatch(dofs_by_component.size(), dof.n_dofs()));
+ Assert (dofs_by_component.size() == dof.n_locally_owned_dofs(),
+ ExcDimensionMismatch(dofs_by_component.size(),
+ dof.n_locally_owned_dofs()));
// next set up a table for the
// degrees of freedom on each of
else
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
if (fe.is_primitive(i))
- local_component_association[f][i] = fe.system_to_component_index(i).first;
+ local_component_association[f][i] =
+ fe.system_to_component_index(i).first;
else
// if this shape function is
// not primitive, then we have
std::vector<unsigned int> indices;
for (typename DH::active_cell_iterator c=dof.begin_active();
c!=dof.end(); ++ c)
- {
- const unsigned int fe_index = c->active_fe_index();
- const unsigned int dofs_per_cell = c->get_fe().dofs_per_cell;
- indices.resize(dofs_per_cell);
- c->get_dof_indices(indices);
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- dofs_by_component[indices[i]] = local_component_association[fe_index][i];
- }
+ if (!c->is_artificial() && !c->is_ghost())
+ {
+ const unsigned int fe_index = c->active_fe_index();
+ const unsigned int dofs_per_cell = c->get_fe().dofs_per_cell;
+ indices.resize(dofs_per_cell);
+ c->get_dof_indices(indices);
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ if (dof.locally_owned_dofs().is_element(indices[i]))
+ dofs_by_component[dof.locally_owned_dofs().index_within_set(indices[i])]
+ = local_component_association[fe_index][i];
+ }
}
}
dof_data = 0;
else
{
- std::vector<unsigned char> component_dofs (dof_handler.n_dofs());
+ std::vector<unsigned char> component_dofs (dof_handler.n_locally_owned_dofs());
std::vector<bool> component_mask (dof_handler.get_fe().n_components(),
false);
component_mask[component] = true;
ExcDimensionMismatch(component_select.size(), n_components(dof)));
}
- Assert(selected_dofs.size() == dof.n_dofs(),
- ExcDimensionMismatch(selected_dofs.size(), dof.n_dofs()));
+ Assert(selected_dofs.size() == dof.n_locally_owned_dofs(),
+ ExcDimensionMismatch(selected_dofs.size(), dof.n_locally_owned_dofs()));
// two special cases: no component
// is selected, and all components
if (std::count (component_select.begin(), component_select.end(), true)
== 0)
{
- std::fill_n (selected_dofs.begin(), dof.n_dofs(), false);
+ std::fill_n (selected_dofs.begin(), dof.n_locally_owned_dofs(), false);
return;
- };
- if (std::count (component_select.begin(), component_select.end(), true)
+ }
+ else if (std::count (component_select.begin(), component_select.end(), true)
== static_cast<signed int>(component_select.size()))
{
- std::fill_n (selected_dofs.begin(), dof.n_dofs(), true);
+ std::fill_n (selected_dofs.begin(), dof.n_locally_owned_dofs(), true);
return;
- };
+ }
// preset all values by false
- std::fill_n (selected_dofs.begin(), dof.n_dofs(), false);
+ std::fill_n (selected_dofs.begin(), dof.n_locally_owned_dofs(), false);
// if we count by blocks, we need to extract
// the association of blocks with local dofs,
// info. Otherwise, we let the function
// extract_dofs_by_component function do the
// job.
- std::vector<unsigned char> dofs_by_component (dof.n_dofs());
+ std::vector<unsigned char> dofs_by_component (dof.n_locally_owned_dofs());
internal::extract_dofs_by_component (dof, component_select, count_by_blocks,
dofs_by_component);
- for (unsigned int i=0; i<dof.n_dofs(); ++i)
+ for (unsigned int i=0; i<dof.n_locally_owned_dofs(); ++i)
if (component_select[dofs_by_component[i]] == true)
selected_dofs[i] = true;
}
template <class DH>
void
-DoFTools::extract_subdomain_dofs (const DH &dof_handler,
- const unsigned int subdomain_id,
- std::vector<bool> &selected_dofs)
+DoFTools::extract_subdomain_dofs (const DH &dof_handler,
+ const types::subdomain_id_t subdomain_id,
+ std::vector<bool> &selected_dofs)
{
Assert(selected_dofs.size() == dof_handler.n_dofs(),
ExcDimensionMismatch(selected_dofs.size(), dof_handler.n_dofs()));
+template <class DH>
+void
+DoFTools::extract_locally_owned_dofs (const DH & dof_handler,
+ IndexSet & dof_set)
+{
+ // collect all the locally owned dofs
+ dof_set = dof_handler.locally_owned_dofs();
+ dof_set.compress ();
+}
+
+
+
+template <class DH>
+void
+DoFTools::extract_locally_active_dofs (const DH & dof_handler,
+ IndexSet & dof_set)
+{
+ // collect all the locally owned dofs
+ dof_set = dof_handler.locally_owned_dofs();
+
+ // add the DoF on the adjacent ghost cells
+ // to the IndexSet, cache them in a
+ // set. need to check each dof manually
+ // because we can't be sure that the dof
+ // range of locally_owned_dofs is really
+ // contiguous.
+ std::vector<unsigned int> dof_indices;
+ std::set<unsigned int> global_dof_indices;
+
+ typename DH::active_cell_iterator cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (!cell->is_ghost() && !cell->is_artificial())
+ {
+ dof_indices.resize(cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices(dof_indices);
+
+ for (std::vector<unsigned int>::iterator it=dof_indices.begin();
+ it!=dof_indices.end();
+ ++it)
+ if (!dof_set.is_element(*it))
+ global_dof_indices.insert(*it);
+ }
+
+ dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
+
+ dof_set.compress();
+}
+
+
+
+template <class DH>
+void
+DoFTools::extract_locally_relevant_dofs (const DH & dof_handler,
+ IndexSet & dof_set)
+{
+ // collect all the locally owned dofs
+ dof_set = dof_handler.locally_owned_dofs();
+
+ // add the DoF on the adjacent ghost cells
+ // to the IndexSet, cache them in a
+ // set. need to check each dof manually
+ // because we can't be sure that the dof
+ // range of locally_owned_dofs is really
+ // contiguous.
+ std::vector<unsigned int> dof_indices;
+ std::set<unsigned int> global_dof_indices;
+
+ typename DH::active_cell_iterator cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->is_ghost())
+ {
+ dof_indices.resize(cell->get_fe().dofs_per_cell);
+ cell->get_dof_indices(dof_indices);
+
+ for (std::vector<unsigned int>::iterator it=dof_indices.begin();
+ it!=dof_indices.end();
+ ++it)
+ if (!dof_set.is_element(*it))
+ global_dof_indices.insert(*it);
+ }
+
+ dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end());
+
+ dof_set.compress();
+}
+
+
+
template <class DH>
void
DoFTools::extract_constant_modes (const DH &dof_handler,
if (component_select[i] == true)
localized_component[i] = n_components_selected++;
- std::vector<unsigned char> dofs_by_component (dof_handler.n_dofs());
+ std::vector<unsigned char> dofs_by_component (dof_handler.n_locally_owned_dofs());
internal::extract_dofs_by_component (dof_handler, component_select, false,
dofs_by_component);
unsigned int n_selected_dofs = 0;
component_list[d] = component_select[d];
unsigned int counter = 0;
- for (unsigned int i=0; i<dof_handler.n_dofs(); ++i)
+ for (unsigned int i=0; i<dof_handler.n_locally_owned_dofs(); ++i)
if (component_select[dofs_by_component[i]])
{
constant_modes[localized_component[dofs_by_component[i]]][counter] = true;
template <class DH>
void
DoFTools::get_subdomain_association (const DH &dof_handler,
- std::vector<unsigned int> &subdomain_association)
+ std::vector<types::subdomain_id_t> &subdomain_association)
{
+ // if the Triangulation is distributed, the
+ // only thing we can usefully ask is for
+ // its locally owned subdomain
+ Assert ((dynamic_cast<const parallel::distributed::
+ Triangulation<DH::dimension,DH::space_dimension>*>
+ (&dof_handler.get_tria()) == 0),
+ ExcMessage ("For parallel::distributed::Triangulation objects and "
+ "associated DoF handler objects, asking for any subdomain other "
+ "than the locally owned one does not make sense."));
+
Assert(subdomain_association.size() == dof_handler.n_dofs(),
ExcDimensionMismatch(subdomain_association.size(),
dof_handler.n_dofs()));
// preset all values by an invalid value
std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(),
- numbers::invalid_unsigned_int);
+ types::invalid_subdomain_id);
std::vector<unsigned int> local_dof_indices;
local_dof_indices.reserve (max_dofs_per_cell(dof_handler));
endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
- const unsigned int subdomain_id = cell->subdomain_id();
+ Assert (cell->is_artificial() == false,
+ ExcMessage ("You can't call this function for meshes that "
+ "have artificial cells."));
+
+ const types::subdomain_id_t subdomain_id = cell->subdomain_id();
const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
local_dof_indices.resize (dofs_per_cell);
cell->get_dof_indices (local_dof_indices);
Assert (std::find (subdomain_association.begin(),
subdomain_association.end(),
- numbers::invalid_unsigned_int)
+ types::invalid_subdomain_id)
== subdomain_association.end(),
ExcInternalError());
}
template <class DH>
unsigned int
DoFTools::count_dofs_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain)
+ const types::subdomain_id_t subdomain)
{
- std::vector<unsigned int> subdomain_association (dof_handler.n_dofs());
+ std::vector<types::subdomain_id_t> subdomain_association (dof_handler.n_dofs());
get_subdomain_association (dof_handler, subdomain_association);
return std::count (subdomain_association.begin(),
template <class DH>
IndexSet
DoFTools::dof_indices_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain)
+ const types::subdomain_id_t subdomain)
{
- std::vector<unsigned int> subdomain_association (dof_handler.n_dofs());
- get_subdomain_association (dof_handler, subdomain_association);
+ // if the DoFHandler is distributed, the
+ // only thing we can usefully ask is for
+ // its locally owned subdomain
+ Assert ((dynamic_cast<const parallel::distributed::
+ Triangulation<DH::dimension,DH::space_dimension>*>
+ (&dof_handler.get_tria()) == 0)
+ ||
+ (subdomain ==
+ dynamic_cast<const parallel::distributed::
+ Triangulation<DH::dimension,DH::space_dimension>*>
+ (&dof_handler.get_tria())->locally_owned_subdomain()),
+ ExcMessage ("For parallel::distributed::Triangulation objects and "
+ "associated DoF handler objects, asking for any subdomain other "
+ "than the locally owned one does not make sense."));
IndexSet index_set (dof_handler.n_dofs());
- unsigned int index = 0;
- while (index < dof_handler.n_dofs())
- {
- // find first_of
- while ((index < dof_handler.n_dofs()) &&
- (subdomain_association[index] != subdomain))
- ++index;
- const unsigned int begin = index;
-
- // find first_not_of
- while ((index < dof_handler.n_dofs()) &&
- (subdomain_association[index] == subdomain))
- ++index;
- const unsigned int end = index;
-
- if ((begin < end) && (begin < dof_handler.n_dofs()))
- index_set.add_range (begin, end);
- }
+ std::vector<unsigned int> local_dof_indices;
+ local_dof_indices.reserve (max_dofs_per_cell(dof_handler));
+
+ typename DH::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if ((cell->is_artificial() == false)
+ &&
+ (cell->subdomain_id() == subdomain))
+ {
+ const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
+ local_dof_indices.resize (dofs_per_cell);
+ cell->get_dof_indices (local_dof_indices);
+ index_set.add_indices (local_dof_indices.begin(),
+ local_dof_indices.end());
+ }
+ index_set.compress ();
return index_set;
}
template <class DH>
void
DoFTools::count_dofs_with_subdomain_association (const DH &dof_handler,
- const unsigned int subdomain,
+ const types::subdomain_id_t subdomain,
std::vector<unsigned int> &n_dofs_on_subdomain)
{
Assert (n_dofs_on_subdomain.size() == dof_handler.get_fe().n_components(),
// computations
if (n_components == 1)
{
- dofs_per_component[0] = dof_handler.n_dofs();
+ dofs_per_component[0] = dof_handler.n_locally_owned_dofs();
return;
}
// otherwise determine the number
// of dofs in each component
// separately. do so in parallel
- std::vector<unsigned char> dofs_by_component (dof_handler.n_dofs());
+ std::vector<unsigned char> dofs_by_component (dof_handler.n_locally_owned_dofs());
internal::extract_dofs_by_component (dof_handler, std::vector<bool>(), false,
dofs_by_component);
||
(std::accumulate (dofs_per_component.begin(),
dofs_per_component.end(), 0U)
- == dof_handler.n_dofs()),
+ == dof_handler.n_locally_owned_dofs()),
ExcInternalError());
+
+ // reduce information from all CPUs
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (const parallel::distributed::Triangulation<dim> * tria
+ = (dynamic_cast<const parallel::distributed::Triangulation<dim>*>
+ (&dof_handler.get_tria())))
+ {
+ std::vector<unsigned int> local_dof_count = dofs_per_component;
+
+ MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components,
+ MPI_INT, MPI_SUM, tria->get_communicator());
+ }
+#endif
}
+
template <int dim, int spacedim>
void
DoFTools::
}
// otherwise determine the number
// of dofs in each block
- // separately. do so in parallel
- std::vector<unsigned char> dofs_by_block (dof_handler.n_dofs());
+ // separately.
+ std::vector<unsigned char> dofs_by_block (dof_handler.n_locally_owned_dofs());
internal::extract_dofs_by_component (dof_handler, std::vector<bool>(),
true, dofs_by_block);
// next count what we got
- for (unsigned int block=0;block<fe.n_blocks();++block)
+ for (unsigned int block=0; block<fe.n_blocks(); ++block)
dofs_per_block[target_block[block]]
+= std::count(dofs_by_block.begin(), dofs_by_block.end(),
block);
+
+#if DEAL_II_COMPILER_SUPPORTS_MPI
+ // if we are working on a parallel
+ // mesh, we now need to collect
+ // this information from all
+ // processors
+ if (const parallel::distributed::Triangulation<dim> * tria
+ = (dynamic_cast<const parallel::distributed::Triangulation<dim>*>
+ (&dof_handler.get_tria())))
+ {
+ std::vector<unsigned int> local_dof_count = dofs_per_block;
+ MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0], n_target_blocks,
+ MPI_INT, MPI_SUM, tria->get_communicator());
+ }
+#endif
}
+
template <int dim, int spacedim>
void
DoFTools::
++face_no)
{
const FiniteElement<dim,spacedim> &fe = cell->get_fe();
-
+
typename DH<dim,spacedim>::face_iterator face = cell->face(face_no);
if (face->boundary_indicator () == 0)
// face is of the right component
nonzero = true;
break;
}
-
+
if (nonzero)
zero_boundary_constraints.add_line (face_dofs[i]);
}
void
DoFTools::extract_subdomain_dofs<DoFHandler<deal_II_dimension> >
(const DoFHandler<deal_II_dimension> &dof_handler,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
std::vector<bool> &selected_dofs);
template
void
DoFTools::extract_subdomain_dofs<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &dof_handler,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
std::vector<bool> &selected_dofs);
+template
+void
+DoFTools::extract_locally_owned_dofs<DoFHandler<deal_II_dimension> >
+(const DoFHandler<deal_II_dimension> & dof_handler,
+ IndexSet & dof_set);
+
+template
+void
+DoFTools::extract_locally_active_dofs<DoFHandler<deal_II_dimension> >
+(const DoFHandler<deal_II_dimension> & dof_handler,
+ IndexSet & dof_set);
+
+
+template
+void
+DoFTools::extract_locally_relevant_dofs<DoFHandler<deal_II_dimension> >
+(const DoFHandler<deal_II_dimension> & dof_handler,
+ IndexSet & dof_set);
+
template
void
DoFTools::extract_constant_modes<DoFHandler<deal_II_dimension> >
void
DoFTools::get_subdomain_association<DoFHandler<deal_II_dimension> >
(const DoFHandler<deal_II_dimension> &dof_handler,
- std::vector<unsigned int> &subdomain_association);
+ std::vector<types::subdomain_id_t> &subdomain_association);
template
void
DoFTools::get_subdomain_association<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &dof_handler,
- std::vector<unsigned int> &subdomain_association);
+ std::vector<types::subdomain_id_t> &subdomain_association);
template
unsigned int
DoFTools::count_dofs_with_subdomain_association<DoFHandler<deal_II_dimension> >
(const DoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
IndexSet
DoFTools::dof_indices_with_subdomain_association<DoFHandler<deal_II_dimension> >
(const DoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
void
DoFTools::count_dofs_with_subdomain_association<DoFHandler<deal_II_dimension> >
(const DoFHandler<deal_II_dimension> &,
- const unsigned int,
+ const types::subdomain_id_t,
std::vector<unsigned int> &);
template
unsigned int
DoFTools::count_dofs_with_subdomain_association<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
IndexSet
DoFTools::dof_indices_with_subdomain_association<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
void
DoFTools::count_dofs_with_subdomain_association<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &,
- const unsigned int,
+ const types::subdomain_id_t,
std::vector<unsigned int> &);
template
unsigned int
DoFTools::count_dofs_with_subdomain_association<MGDoFHandler<deal_II_dimension> >
(const MGDoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
IndexSet
DoFTools::dof_indices_with_subdomain_association<MGDoFHandler<deal_II_dimension> >
(const MGDoFHandler<deal_II_dimension> &,
- const unsigned int);
+ const types::subdomain_id_t);
template
void
DoFTools::count_dofs_with_subdomain_association<MGDoFHandler<deal_II_dimension> >
(const MGDoFHandler<deal_II_dimension> &,
- const unsigned int,
+ const types::subdomain_id_t,
std::vector<unsigned int> &);
const DoFHandler<deal_II_dimension>&,
std::vector<unsigned int>&, std::vector<unsigned int>);
+
template
void
DoFTools::count_dofs_per_component<deal_II_dimension> (
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <base/memory_consumption.h>
+#include <dofs/number_cache.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ namespace DoFHandler
+ {
+ NumberCache::NumberCache ()
+ :
+ n_global_dofs (0),
+ n_locally_owned_dofs (0)
+ {}
+
+
+
+ unsigned int
+ NumberCache::memory_consumption () const
+ {
+ return
+ MemoryConsumption::memory_consumption (n_global_dofs) +
+ MemoryConsumption::memory_consumption (n_locally_owned_dofs) +
+ MemoryConsumption::memory_consumption (locally_owned_dofs) +
+ MemoryConsumption::memory_consumption (n_locally_owned_dofs_per_processor) +
+ MemoryConsumption::memory_consumption (locally_owned_dofs_per_processor);
+ }
+ }
+}
+
+DEAL_II_NAMESPACE_CLOSE
//
//---------------------------------------------------------------------------
-#include <iostream>
-
#include <base/quadrature_lib.h>
#include <base/qprojector.h>
#include <base/thread_management.h>
#include <base/std_cxx1x/shared_ptr.h>
+#include <iostream>
+
DEAL_II_NAMESPACE_OPEN
// $Id$
// Version: $Name$
//
-// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
//
//---------------------------------------------------------------------------
+#include <base/template_constraints.h>
#include <lac/vector.h>
+#include <lac/block_vector_base.h>
+#include <lac/block_vector.h>
#include <lac/petsc_vector.h>
+#include <lac/petsc_block_vector.h>
#include <lac/trilinos_vector.h>
+#include <lac/trilinos_block_vector.h>
#include <grid/grid_refinement.h>
#include <grid/tria_accessor.h>
DEAL_II_NAMESPACE_OPEN
-namespace
+namespace
{
- template <typename number>
- inline
- number
- max_element (const Vector<number> &criteria)
+ namespace internal
{
- return *std::max_element(criteria.begin(), criteria.end());
- }
+ template <typename number>
+ inline
+ number
+ max_element (const Vector<number> &criteria)
+ {
+ return *std::max_element(criteria.begin(), criteria.end());
+ }
-
- template <typename number>
- inline
- number
- min_element (const Vector<number> &criteria)
- {
- return *std::min_element(criteria.begin(), criteria.end());
+
+ template <typename number>
+ inline
+ number
+ min_element (const Vector<number> &criteria)
+ {
+ return *std::min_element(criteria.begin(), criteria.end());
+ }
+
+
+#ifdef DEAL_II_USE_PETSC
+ PetscScalar
+ max_element (const PETScWrappers::Vector &criteria)
+ {
+ // this is horribly slow (since we have
+ // to get the array of values from PETSc
+ // in every iteration), but works
+ PetscScalar m = 0;
+ for (unsigned int i=0; i<criteria.size(); ++i)
+ m = std::max (m, criteria(i));
+ return m;
+ }
+
+
+ PetscScalar
+ min_element (const PETScWrappers::Vector &criteria)
+ {
+ // this is horribly slow (since we have
+ // to get the array of values from PETSc
+ // in every iteration), but works
+ PetscScalar m = criteria(0);
+ for (unsigned int i=1; i<criteria.size(); ++i)
+ m = std::min (m, criteria(i));
+ return m;
+ }
+#endif
+
+
+#ifdef DEAL_II_USE_TRILINOS
+ TrilinosScalar
+ max_element (const TrilinosWrappers::Vector &criteria)
+ {
+ TrilinosScalar m = 0;
+ criteria.trilinos_vector().MaxValue(&m);
+ return m;
+ }
+
+
+ TrilinosScalar
+ min_element (const TrilinosWrappers::Vector &criteria)
+ {
+ TrilinosScalar m = 0;
+ criteria.trilinos_vector().MinValue(&m);
+ return m;
+ }
+#endif
}
-#ifdef DEAL_II_USE_PETSC
- PetscScalar
- max_element (const PETScWrappers::Vector &criteria)
+ template <typename Vector>
+ typename constraint_and_return_value<!IsBlockVector<Vector>::value,
+ typename Vector::value_type>::type
+ min_element (const Vector &criteria)
{
- // this is horribly slow (since we have
- // to get the array of values from PETSc
- // in every iteration), but works
- PetscScalar m = 0;
- for (unsigned int i=0; i<criteria.size(); ++i)
- m = std::max (m, criteria(i));
- return m;
+ return internal::min_element (criteria);
}
- PetscScalar
- min_element (const PETScWrappers::Vector &criteria)
+ template <typename Vector>
+ typename constraint_and_return_value<!IsBlockVector<Vector>::value,
+ typename Vector::value_type>::type
+ max_element (const Vector &criteria)
{
- // this is horribly slow (since we have
- // to get the array of values from PETSc
- // in every iteration), but works
- PetscScalar m = criteria(0);
- for (unsigned int i=1; i<criteria.size(); ++i)
- m = std::min (m, criteria(i));
- return m;
+ return internal::max_element (criteria);
}
-#endif
-#ifdef DEAL_II_USE_TRILINOS
- TrilinosScalar
- max_element (const TrilinosWrappers::Vector &criteria)
+ template <typename Vector>
+ typename constraint_and_return_value<IsBlockVector<Vector>::value,
+ typename Vector::value_type>::type
+ min_element (const Vector &criteria)
{
- TrilinosScalar m = 0;
- criteria.trilinos_vector().MaxValue(&m);
- return m;
+ typename Vector::value_type t = internal::min_element(criteria.block(0));
+ for (unsigned int b=1; b<criteria.n_blocks(); ++b)
+ t = std::min (t, internal::min_element(criteria.block(b)));
+
+ return t;
}
- TrilinosScalar
- min_element (const TrilinosWrappers::Vector &criteria)
+ template <typename Vector>
+ typename constraint_and_return_value<IsBlockVector<Vector>::value,
+ typename Vector::value_type>::type
+ max_element (const Vector &criteria)
{
- TrilinosScalar m = 0;
- criteria.trilinos_vector().MinValue(&m);
- return m;
+ typename Vector::value_type t = internal::max_element(criteria.block(0));
+ for (unsigned int b=1; b<criteria.n_blocks(); ++b)
+ t = std::max (t, internal::max_element(criteria.block(b)));
+
+ return t;
}
-#endif
+
}
{
int i,j;
typename Vector::value_type v;
-
+
if (r<=l)
return;
j = r;
do
{
- do
+ do
{
++i;
}
--j;
}
while ((a(ind[j])<v) && (j>0));
-
+
if (i<j)
std::swap (ind[i], ind[j]);
else
}
while (i<j);
qsort_index(a,ind,l,i-1);
- qsort_index(a,ind,i+1,r);
+ qsort_index(a,ind,i+1,r);
}
}
Assert (criteria.size() == tria.n_active_cells(),
ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
Assert (criteria.is_non_negative (), ExcNegativeCriteria());
-
+
// when all indicators are zero we
// do not need to refine but only
// to coarsen
if (criteria.all_zero())
return;
-
+
typename Triangulation<dim,spacedim>::active_cell_iterator cell = tria.begin_active();
const unsigned int n_cells = criteria.size();
//TODO: This is undocumented, looks fishy and seems unnecessary
-
+
double new_threshold=threshold;
// when threshold==0 find the
// smallest value in criteria
&& (criteria(index)<new_threshold))
new_threshold=criteria(index);
}
-
+
for (unsigned int index=0; index<n_cells; ++cell, ++index)
if (std::fabs(criteria(index)) >= new_threshold)
cell->set_refine_flag();
typename Triangulation<dim,spacedim>::active_cell_iterator cell = tria.begin_active();
const unsigned int n_cells = criteria.size();
-
+
for (unsigned int index=0; index<n_cells; ++cell, ++index)
if (std::fabs(criteria(index)) <= threshold)
if (!cell->refine_flag_set())
// refining cells and instead try to
// only coarsen as many as it would
// take to get to the target
-
+
// as we have no information on cells
// being refined isotropically or
// anisotropically, assume isotropic
refine_cells = static_cast<int> (refine_cells * alpha);
coarsen_cells = static_cast<int> (coarsen_cells * alpha);
}
-
+
if (refine_cells || coarsen_cells)
{
- dealii::Vector<typename Vector::value_type> tmp(criteria);
+ dealii::Vector<typename Vector::value_type> tmp (criteria);
if (refine_cells)
{
std::nth_element (tmp.begin(), tmp.begin()+refine_cells,
Assert ((bottom_fraction>=0) && (bottom_fraction<=1), ExcInvalidParameterValue());
Assert (top_fraction+bottom_fraction <= 1, ExcInvalidParameterValue());
Assert (criteria.is_non_negative (), ExcNegativeCriteria());
-
+
// let tmp be the cellwise square of the
// error, which is what we have to sum
// up and compare with
// @p{fraction_of_error*total_error}.
- dealii::Vector<typename Vector::value_type> tmp(criteria);
+ dealii::Vector<typename Vector::value_type> tmp;
+ tmp = criteria;
const double total_error = tmp.l1_norm();
// sort the largest criteria to the
return;
}
}
-
+
// in some rare cases it may happen that
// both thresholds are the same (e.g. if
if ((top_threshold == max_element(criteria)) &&
(top_fraction != 1))
top_threshold *= 0.999;
-
+
if (bottom_threshold>=top_threshold)
bottom_threshold = 0.999*top_threshold;
-
+
// actually flag cells
if (top_threshold < max_element(criteria))
refine (tria, criteria, top_threshold);
-
+
if (bottom_threshold > min_element(criteria))
coarsen (tria, criteria, bottom_threshold);
}
Assert (criteria.size() == tria.n_active_cells(),
ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
Assert (criteria.is_non_negative (), ExcNegativeCriteria());
-
+
// get an increasing order on
// the error indicator
std::vector<unsigned int> tmp(criteria.size());
for (unsigned int i=0;i<criteria.size();++i)
tmp[i] = i;
-
+
qsort_index(criteria,tmp,0,criteria.size()-1);
-
+
double s0 = 0.75 * criteria(tmp[0]);
double E = criteria.l1_norm();
-
+
unsigned int N = criteria.size();
unsigned int M = 0;
-
+
// The first M cells are refined
// to minimize the expected error
// multiplied with the expected
// N+3*M (N is the current number
// of cells)
double min = (3.*(1.+M)+N) * (E-s0);
-
+
unsigned int minArg = N-1;
-
+
for (M=1;M<criteria.size();++M)
{
s0+= 0.75 * criteria(tmp[M]);
-
+
if ( (3.*(1+M)+N)*(E-s0) <= min)
{
min = (3.*(1+M)+N)*(E-s0);
// explicit instantiations
-template
-void
-GridRefinement::
-refine<deal_II_dimension,Vector<float>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<float> &,
- const double);
-
-template
-void
-GridRefinement::
-refine<deal_II_dimension,Vector<double>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<double> &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,Vector<float>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<float> &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,Vector<double>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<double> &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,Vector<double>,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const Vector<double> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,Vector<float>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<float> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,Vector<double>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<double> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,Vector<float>,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const Vector<float> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,Vector<float>,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const Vector<float> &);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,Vector<double>,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const Vector<double> &);
-
-#ifdef DEAL_II_USE_PETSC
-template
-void
-GridRefinement::
-refine<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const PETScWrappers::Vector &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const PETScWrappers::Vector &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const PETScWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const PETScWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const PETScWrappers::Vector &);
-#endif
-
-#ifdef DEAL_II_USE_TRILINOS
-template
-void
-GridRefinement::
-refine<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension>
- (Triangulation<deal_II_dimension> &,
- const TrilinosWrappers::Vector &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const TrilinosWrappers::Vector &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const TrilinosWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const TrilinosWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension>
-(Triangulation<deal_II_dimension> &,
- const TrilinosWrappers::Vector &);
-#endif
-
-
-
-#if deal_II_dimension != 3
-// explicit instantiations
-template
-void
-GridRefinement::
-refine<deal_II_dimension,Vector<float>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<float> &,
- const double);
-
-template
-void
-GridRefinement::
-refine<deal_II_dimension,Vector<double>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<double> &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,Vector<float>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<float> &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,Vector<double>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<double> &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,Vector<double>,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<double> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,Vector<float>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<float> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,Vector<double>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<double> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,Vector<float>,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<float> &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,Vector<float>,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<float> &);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,Vector<double>,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const Vector<double> &);
-
-#ifdef DEAL_II_USE_PETSC
-template
-void
-GridRefinement::
-refine<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const PETScWrappers::Vector &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const PETScWrappers::Vector &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const PETScWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const PETScWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,PETScWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const PETScWrappers::Vector &);
-#endif
-
-#ifdef DEAL_II_USE_TRILINOS
-template
-void
-GridRefinement::
-refine<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension+1>
- (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const TrilinosWrappers::Vector &,
- const double);
-
-template
-void
-GridRefinement::
-coarsen<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const TrilinosWrappers::Vector &,
- const double);
-
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_number<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const TrilinosWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_fixed_fraction<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const TrilinosWrappers::Vector &,
- const double,
- const double,
- const unsigned int);
-
-template
-void
-GridRefinement::
-refine_and_coarsen_optimize<deal_II_dimension,TrilinosWrappers::Vector,deal_II_dimension+1>
-(Triangulation<deal_II_dimension,deal_II_dimension+1> &,
- const TrilinosWrappers::Vector &);
-#endif
-
-
-#endif
+#include "grid_refinement.inst"
DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+
+for (S : REAL_SCALARS)
+{
+ template
+ void
+ GridRefinement::
+ refine<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double);
+
+ template
+ void
+ GridRefinement::
+ coarsen<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_fixed_number<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double,
+ const unsigned int);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_fixed_fraction<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double,
+ const unsigned int);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_optimize<deal_II_dimension,dealii::Vector<S>,deal_II_dimension>
+ (Triangulation<deal_II_dimension> &,
+ const dealii::Vector<S> &);
+
+#if deal_II_dimension < 3
+ template
+ void
+ GridRefinement::
+ refine<deal_II_dimension,dealii::Vector<S>,deal_II_dimension+1>
+ (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
+ const dealii::Vector<S> &,
+ const double);
+
+ template
+ void
+ GridRefinement::
+ coarsen<deal_II_dimension,dealii::Vector<S>,deal_II_dimension+1>
+ (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
+ const dealii::Vector<S> &,
+ const double);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_fixed_number<deal_II_dimension,dealii::Vector<S>,deal_II_dimension+1>
+ (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double,
+ const unsigned int);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_fixed_fraction<deal_II_dimension,dealii::Vector<S>,deal_II_dimension+1>
+ (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
+ const dealii::Vector<S> &,
+ const double,
+ const double,
+ const unsigned int);
+
+ template
+ void
+ GridRefinement::
+ refine_and_coarsen_optimize<deal_II_dimension,dealii::Vector<S>,deal_II_dimension+1>
+ (Triangulation<deal_II_dimension,deal_II_dimension+1> &,
+ const dealii::Vector<S> &);
+#endif
+}
void
GridTools::
get_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
- std::vector<unsigned int> &subdomain)
+ std::vector<types::subdomain_id_t> &subdomain)
{
Assert (subdomain.size() == triangulation.n_active_cells(),
ExcDimensionMismatch (subdomain.size(),
unsigned int
GridTools::
count_cells_with_subdomain_association (const Triangulation<dim, spacedim> &triangulation,
- const unsigned int subdomain)
+ const types::subdomain_id_t subdomain)
{
unsigned int count = 0;
for (typename Triangulation<dim, spacedim>::active_cell_iterator
if (cell->subdomain_id() == subdomain)
++count;
- Assert (count != 0, ExcNonExistentSubdomain(subdomain));
-
return count;
}
get_face_connectivity_of_cells (const Triangulation<deal_II_dimension> &triangulation,
SparsityPattern &cell_connectivity);
+#if deal_II_dimension < 3
+template
+void
+GridTools::
+get_face_connectivity_of_cells (const Triangulation<deal_II_dimension,deal_II_dimension+1> &triangulation,
+ SparsityPattern &cell_connectivity);
+#endif
+
template
void
GridTools::partition_triangulation (const unsigned int,
void
GridTools::
get_subdomain_association (const Triangulation<deal_II_dimension> &,
- std::vector<unsigned int> &);
+ std::vector<types::subdomain_id_t> &);
template
unsigned int
GridTools::
count_cells_with_subdomain_association (const Triangulation<deal_II_dimension> &,
- const unsigned int );
+ const types::subdomain_id_t);
template
subcells[i]->set_parent (cell->index ());
}
+
+
// set child index for
// even children children
// i=0,2 (0)
Triangulation (const MeshSmoothing smooth_grid,
const bool check_for_distorted_cells)
:
+ smooth_grid(smooth_grid),
faces(NULL),
anisotropic_refinement(false),
- smooth_grid(smooth_grid),
check_for_distorted_cells(check_for_distorted_cells)
{
// set default boundary for all
}
+
template <int dim, int spacedim>
Triangulation<dim, spacedim>::~Triangulation ()
{
}
+
template <int dim, int spacedim>
void Triangulation<dim, spacedim>::clear ()
{
#if deal_II_dimension == 1
+
+
template <>
unsigned int Triangulation<1,1>::n_raw_lines (const unsigned int level) const
{
#if deal_II_dimension == 1
+
template <>
unsigned int Triangulation<1,1>::max_adjacent_cells () const
{
{
prepare_coarsening_and_refinement ();
+ // verify a case with which we have had
+ // some difficulty in the past (see the
+ // deal.II/coarsening_* tests)
+ if (smooth_grid & limit_level_difference_at_vertices)
+ Assert (satisfies_level1_at_vertex_rule (*this) == true,
+ ExcInternalError());
+
// Inform RefinementListeners
// about beginning of refinement.
typename std::list<RefinementListener *>::iterator ref_listener =
execute_coarsening();
const DistortedCellList
- cells_with_distorted_children
- = execute_refinement();
+ cells_with_distorted_children = execute_refinement();
// verify a case with which we have had
// some difficulty in the past (see the
std::vector<bool> previous_coarsen_flags (n_active_cells());
save_coarsen_flags (previous_coarsen_flags);
+ std::vector<int> vertex_level (vertices.size(), 0);
+
bool continue_iterating = true;
do
// store highest level one
// of the cells adjacent to
// a vertex belongs to
- std::vector<int> vertex_level (vertices.size(), 0);
+ std::fill (vertex_level.begin(), vertex_level.end(), 0);
active_cell_iterator cell = begin_active(),
endc = end();
for (; cell!=endc; ++cell)
namespace
{
+
+ // check if the given @param cell marked
+ // for coarsening would produce an
+ // unrefined island. To break up long
+ // chains of these cells we recursively
+ // check our neighbors in case we change
+ // this cell. This reduces the number of
+ // outer iterations dramatically.
+ template <int dim, int spacedim>
+ void
+ possibly_do_not_produce_unrefined_islands(
+ const typename Triangulation<dim,spacedim>::cell_iterator &cell)
+ {
+ Assert (cell->has_children(), ExcInternalError());
+
+ unsigned int n_neighbors=0;
+ // count all neighbors
+ // that will be refined
+ // along the face of our
+ // cell after the next
+ // step
+ unsigned int count=0;
+ for (unsigned int n=0; n<GeometryInfo<dim>::faces_per_cell; ++n)
+ {
+ const typename Triangulation<dim,spacedim>::cell_iterator neighbor = cell->neighbor(n);
+ if (neighbor.state() == IteratorState::valid)
+ {
+ ++n_neighbors;
+ if (face_will_be_refined_by_neighbor(cell,n))
+ ++count;
+ }
+ }
+ // clear coarsen flags if
+ // either all existing
+ // neighbors will be
+ // refined or all but one
+ // will be and the cell
+ // is in the interior of
+ // the domain
+ if (count==n_neighbors ||
+ (count>=n_neighbors-1 &&
+ n_neighbors == GeometryInfo<dim>::faces_per_cell) )
+ {
+ for (unsigned int c=0; c<cell->n_children(); ++c)
+ cell->child(c)->clear_coarsen_flag();
+
+ for (unsigned int face=0; face<GeometryInfo<dim>::faces_per_cell; ++face)
+ if (!cell->at_boundary(face)
+ &&
+ ( !cell->neighbor(face)->active() )
+ && (cell_will_be_coarsened(cell->neighbor(face))) )
+ possibly_do_not_produce_unrefined_islands<dim,spacedim>( cell->neighbor(face) );
+ }
+ }
+
+
// see if the current cell needs to
// be refined to avoid unrefined
// islands.
cell->clear_coarsen_flag();
}
-
bool mesh_changed_in_this_loop = false;
do
{
-
//////////////////////////////////////
// STEP 1:
// do not coarsen a cell if 'most of
for (cell=begin(); cell!=endc; ++cell)
{
- if (!cell->active())
- {
// only do something if this
// cell will be coarsened
- if (cell_will_be_coarsened(cell))
- {
- unsigned int n_neighbors=0;
- // count all neighbors
- // that will be refined
- // along the face of our
- // cell after the next
- // step
- unsigned int count=0;
- for (unsigned int n=0;
- n<GeometryInfo<dim>::faces_per_cell; ++n)
- {
- const cell_iterator neighbor = cell->neighbor(n);
- if (neighbor.state() == IteratorState::valid)
- {
- ++n_neighbors;
- if (face_will_be_refined_by_neighbor(cell,n))
- ++count;
- }
- }
- // clear coarsen flags if
- // either all existing
- // neighbors will be
- // refined or all but one
- // will be and the cell
- // is in the interior of
- // the domain
- if (count==n_neighbors ||
- (count>=n_neighbors-1 &&
- n_neighbors==
- GeometryInfo<dim>::faces_per_cell))
- for (unsigned int c=0; c<cell->n_children(); ++c)
- cell->child(c)->clear_coarsen_flag();
- }
- } // if (!cell->active())
- } // for (all cells)
- } // if (smooth_grid & ...)
+ if (!cell->active() && cell_will_be_coarsened(cell))
+ possibly_do_not_produce_unrefined_islands<dim,spacedim>(cell);
+ }
+ }
//////////////////////////////////////
// priority.
// If patch_level_1 is set, this will
// be automatically fulfilled.
+ //
+ // there is one corner case
+ // to consider: if this is a
+ // distributed
+ // triangulation, there may
+ // be refined islands on the
+ // boundary of which we own
+ // only part (e.g. a single
+ // cell in the corner of a
+ // domain). the rest of the
+ // island is ghost cells and
+ // it *looks* like the area
+ // around it (artificial
+ // cells) are coarser but
+ // this is only because they
+ // may actually be equally
+ // fine on other
+ // processors. it's hard to
+ // detect this case but we
+ // can do the following:
+ // only set coarsen flags to
+ // remove this refined
+ // island if all cells we
+ // want to set flags on are
+ // locally owned
if (smooth_grid & (eliminate_refined_inner_islands |
eliminate_refined_boundary_islands) &&
!(smooth_grid & patch_level_1))
const cell_iterator endc = end();
for (cell=begin(); cell!=endc; ++cell)
- if (!cell->active() || (cell->active() && cell->refine_flag_set()))
+ if (!cell->active() ||
+ (cell->active() &&
+ cell->refine_flag_set() &&
+ !cell->is_ghost() &&
+ !cell->is_artificial()))
{
// check whether all
// children are
bool all_children_active = true;
if (!cell->active())
for (unsigned int c=0; c<cell->n_children(); ++c)
- if (!cell->child(c)->active())
+ if (!cell->child(c)->active() ||
+ cell->child(c)->is_ghost() ||
+ cell->child(c)->is_artificial())
{
all_children_active = false;
break;
template <int dim, int spacedim>
-unsigned int CellAccessor<dim, spacedim>::subdomain_id () const
+types::subdomain_id_t CellAccessor<dim, spacedim>::subdomain_id () const
{
Assert (this->used(), TriaAccessorExceptions::ExcCellNotUsed());
return this->tria->levels[this->present_level]->subdomain_ids[this->present_index];
template <int dim, int spacedim>
void
-CellAccessor<dim, spacedim>::set_subdomain_id (const unsigned int new_subdomain_id) const
+CellAccessor<dim, spacedim>::set_subdomain_id (const types::subdomain_id_t new_subdomain_id) const
{
Assert (this->used(), TriaAccessorExceptions::ExcCellNotUsed());
this->tria->levels[this->present_level]->subdomain_ids[this->present_index]
}
+template <int dim, int spacedim>
+void
+CellAccessor<dim, spacedim>::
+recursively_set_subdomain_id (const types::subdomain_id_t new_subdomain_id) const
+{
+ set_subdomain_id (new_subdomain_id);
+
+ if (this->has_children())
+ for (unsigned int c=0; c<this->n_children(); ++c)
+ this->child(c)->recursively_set_subdomain_id (new_subdomain_id);
+}
+
+
+
template <int dim, int spacedim>
void CellAccessor<dim, spacedim>::set_neighbor (const unsigned int i,
const TriaIterator<CellAccessor<dim, spacedim> > &pointer) const
DEAL_II_NAMESPACE_OPEN
+namespace parallel
+{
+ namespace distributed
+ {
+ template <int, int> class Triangulation;
+ }
+}
+
+
namespace internal
{
namespace hp
DoFHandler<dim,spacedim>::DoFHandler (const Triangulation<dim,spacedim> &tria)
:
tria(&tria, typeid(*this).name()),
- faces (NULL),
- used_dofs (0)
- {
+ faces (NULL)
+ {
+ Assert ((dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&tria)
+ == 0),
+ ExcMessage ("The given triangulation is parallel distributed but "
+ "this class does not currently support this."));
+
create_active_fe_table ();
tria.add_refinement_listener (*this);
}
MemoryConsumption::memory_consumption (tria) +
MemoryConsumption::memory_consumption (levels) +
MemoryConsumption::memory_consumption (*faces) +
- MemoryConsumption::memory_consumption (used_dofs) +
+ MemoryConsumption::memory_consumption (number_cache) +
MemoryConsumption::memory_consumption (vertex_dofs) +
MemoryConsumption::memory_consumption (vertex_dofs_offsets) +
MemoryConsumption::memory_consumption (has_children));
= internal::hp::DoFHandler::Implementation::template distribute_dofs_on_cell<spacedim> (cell,
next_free_dof);
- used_dofs = next_free_dof;
+ number_cache.n_global_dofs = next_free_dof;
}
// lower-dimensional objects
// where elements come together
std::vector<unsigned int>
- constrained_indices (used_dofs, numbers::invalid_unsigned_int);
+ constrained_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
compute_vertex_dof_identities (constrained_indices);
compute_line_dof_identities (constrained_indices);
compute_quad_dof_identities (constrained_indices);
// new numbers to those which are
// not constrained
std::vector<unsigned int>
- new_dof_indices (used_dofs, numbers::invalid_unsigned_int);
+ new_dof_indices (number_cache.n_global_dofs, numbers::invalid_unsigned_int);
unsigned int next_free_dof = 0;
- for (unsigned int i=0; i<used_dofs; ++i)
+ for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
if (constrained_indices[i] == numbers::invalid_unsigned_int)
{
new_dof_indices[i] = next_free_dof;
// then loop over all those that
// are constrained and record the
// new dof number for those:
- for (unsigned int i=0; i<used_dofs; ++i)
+ for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
if (constrained_indices[i] != numbers::invalid_unsigned_int)
{
Assert (new_dof_indices[constrained_indices[i]] !=
new_dof_indices[i] = new_dof_indices[constrained_indices[i]];
}
- for (unsigned int i=0; i<used_dofs; ++i)
+ for (unsigned int i=0; i<number_cache.n_global_dofs; ++i)
{
Assert (new_dof_indices[i] != numbers::invalid_unsigned_int,
ExcInternalError());
// used dof indices
renumber_dofs_internal (new_dof_indices, internal::int2type<dim>());
- used_dofs = next_free_dof;
+ // now set the elements of the
+ // number cache appropriately
+ number_cache.n_global_dofs = next_free_dof;
+ number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
+
+ number_cache.locally_owned_dofs
+ = IndexSet (number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs.add_range (0,
+ number_cache.n_global_dofs);
+
+ number_cache.n_locally_owned_dofs_per_processor
+ = std::vector<unsigned int> (1,
+ number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs_per_processor
+ = std::vector<IndexSet> (1,
+ number_cache.locally_owned_dofs);
// finally restore the user flags
const_cast<Triangulation<dim,spacedim> &>(*tria).load_user_flags(user_flags);
DEAL_II_NAMESPACE_OPEN
+namespace parallel
+{
+ namespace distributed
+ {
+ template <int, int> class Triangulation;
+ }
+}
+
//TODO[WB]: this class is currently only implemented for dim==spacedim. to
//make the general case happen it should undergo a similar transformation as
{
const unsigned int dim = 1;
- Assert (mg_dof_handler.get_tria().n_levels() > 0, DoFHandler<1>::ExcInvalidTriangulation());
+ Assert (mg_dof_handler.get_tria().n_levels() > 0,
+ ExcMessage("Invalid triangulation"));
//////////////////////////
// DESTRUCTION
void reserve_space (MGDoFHandler<2,spacedim> &mg_dof_handler)
{
const unsigned int dim = 2;
- Assert (mg_dof_handler.get_tria().n_levels() > 0, DoFHandler<2>::ExcInvalidTriangulation());
+ Assert (mg_dof_handler.get_tria().n_levels() > 0,
+ ExcMessage("Invalid triangulation"));
////////////////////////////
// DESTRUCTION
{
const unsigned int dim = 3;
- Assert (mg_dof_handler.get_tria().n_levels() > 0, DoFHandler<3>::ExcInvalidTriangulation());
+ Assert (mg_dof_handler.get_tria().n_levels() > 0,
+ ExcMessage("Invalid triangulation"));
////////////////////////////
// DESTRUCTION
:
DoFHandler<dim,spacedim> (tria),
mg_faces (NULL)
-{}
+{
+ Assert ((dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&tria)
+ == 0),
+ ExcMessage ("The given triangulation is parallel distributed but "
+ "this class does not currently support this."));
+}
for (unsigned int l=0; l<this->dofs->get_tria().n_levels(); ++l)
{
unsigned int max_index = 0;
- for (cell_iterator cell=first_cell(); cell != this->dofs->end();
- cell = next_cell(cell))
+ for (cell_iterator cell=first_locally_owned_cell(); cell != this->dofs->end();
+ cell = next_locally_owned_cell(cell))
if (static_cast<unsigned int>(cell->level()) == l)
max_index = std::max (max_index,
static_cast<unsigned int>(cell->index()));
std::vector<std::pair<cell_iterator, unsigned int> > all_cells;
{
// set the index of the first
- // cell. if first_cell/next_cell
+ // cell. if first_locally_owned_cell/next_locally_owned_cell
// returns non-active cells, then
// the index is not usable
// anyway, but otherwise we
// should keep track where we are
unsigned int index;
- if (first_cell()->has_children())
+ if (first_locally_owned_cell()->has_children())
index = 0;
else
index = std::distance (this->dofs->begin_active(),
- active_cell_iterator(first_cell()));
- for (cell_iterator cell=first_cell(); cell != this->dofs->end();
- cell = next_cell(cell))
+ active_cell_iterator(first_locally_owned_cell()));
+ for (cell_iterator cell=first_locally_owned_cell(); cell != this->dofs->end();
+ cell = next_locally_owned_cell(cell))
{
Assert (static_cast<unsigned int>(cell->level()) <
cell_to_patch_index_map.size(),
// ignore it. same if we are
// at the end of the range
if (!cell->has_children() &&
- next_cell(cell) != this->dofs->end() &&
- !next_cell(cell)->has_children())
+ next_locally_owned_cell(cell) != this->dofs->end() &&
+ !next_locally_owned_cell(cell)->has_children())
index += std::distance (active_cell_iterator(cell),
- active_cell_iterator(next_cell(cell)));
+ active_cell_iterator(next_locally_owned_cell(cell)));
}
}
}
+
+template <int dim, class DH>
+typename DataOut<dim,DH>::cell_iterator
+DataOut<dim,DH>::first_locally_owned_cell ()
+{
+ typename DataOut<dim,DH>::cell_iterator
+ cell = this->dofs->begin_active ();
+
+ // skip cells if the current one
+ // has no children (is active) and
+ // is a ghost or artificial cell
+ while ((cell != this->dofs->end()) &&
+ (cell->has_children() == false) &&
+ (cell->is_ghost() || cell->is_artificial()))
+ cell = next_cell(cell);
+
+ return cell;
+}
+
+
+
+template <int dim, class DH>
+typename DataOut<dim,DH>::cell_iterator
+DataOut<dim,DH>::next_locally_owned_cell (const typename DataOut<dim,DH>::cell_iterator &old_cell)
+{
+ typename DataOut<dim,DH>::cell_iterator
+ cell = next_cell(old_cell);
+ while ((cell != this->dofs->end()) &&
+ (cell->has_children() == false) &&
+ (cell->is_ghost() || cell->is_artificial()))
+ cell = next_cell(cell);
+ return cell;
+}
+
+
// explicit instantiations
#include "data_out.inst"
// $Id$
// Version: $Name$
//
-// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
internal::DataOutFaces::ParallelData<DH::dimension, DH::dimension> &data,
DataOutBase::Patch<DH::dimension-1,DH::space_dimension> &patch)
{
+ Assert (!cell_and_face->first->is_ghost() &&
+ !cell_and_face->first->is_artificial(),
+ ExcNotImplemented());
+
// we use the mapping to transform the
// vertices. However, the mapping works on
// cells, not faces, so transform the face
// $Id$
// Version: $Name$
//
-// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
internal::DataOutRotation::ParallelData<DH::dimension, DH::space_dimension> &data,
std::vector<DataOutBase::Patch<DH::dimension+1,DH::space_dimension+1> > &patches)
{
+ Assert (!(*cell)->is_ghost() &&
+ !(*cell)->is_artificial(),
+ ExcNotImplemented());
+
const unsigned int n_patches_per_circle = data.n_patches_per_circle;
// another abbreviation denoting
// $Id$
// Version: $Name$
//
-// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 by the deal.II authors
+// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
for (typename DH::active_cell_iterator cell=dof_handler->begin_active();
cell != dof_handler->end(); ++cell, ++patch, ++cell_number)
{
+ Assert (!cell->is_ghost() &&
+ !cell->is_artificial(),
+ ExcNotImplemented());
+
Assert (patch != patches.end(), ExcInternalError());
// first fill in the vertices of the patch
// $Id$
// Version: $Name$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* The subdomain id we are to care
* for.
*/
- const unsigned int subdomain_id;
+ const types::subdomain_id_t subdomain_id;
/**
* The material id we are to care
* for.
const dealii::hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
const bool need_quadrature_points,
const unsigned int n_solution_vectors,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id,
const typename FunctionMap<DH::dimension>::type *neumann_bc,
const std::vector<bool> component_mask,
const dealii::hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
const bool need_quadrature_points,
const unsigned int n_solution_vectors,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id,
const typename FunctionMap<DH::dimension>::type *neumann_bc,
const std::vector<bool> component_mask,
// exists in the global map
Assert (face_integrals.find (p->first) == face_integrals.end(),
ExcInternalError());
+
for (unsigned int i=0; i<p->second.size(); ++i)
Assert (p->second[i] >= 0, ExcInternalError());
{
const unsigned int n_solution_vectors = solutions.size();
- const unsigned int subdomain_id = parallel_data.subdomain_id;
+ const types::subdomain_id_t subdomain_id = parallel_data.subdomain_id;
const unsigned int material_id = parallel_data.material_id;
// empty our own copy of the local face
// material_id), or if one of the
// neighbors behind the face is on
// the subdomain we care for
- if ( ! ( ((subdomain_id == numbers::invalid_unsigned_int)
+ if ( ! ( ((subdomain_id == types::invalid_subdomain_id)
||
(cell->subdomain_id() == subdomain_id))
&&
if (face->has_children() == false)
care_for_cell |= ((cell->neighbor(face_no)->subdomain_id()
== subdomain_id) ||
- (subdomain_id == numbers::invalid_unsigned_int))
+ (subdomain_id == types::invalid_subdomain_id))
&&
((cell->neighbor(face_no)->material_id()
== material_id) ||
->material_id() == material_id)
&&
(subdomain_id ==
- numbers::invalid_unsigned_int)))
+ types::invalid_subdomain_id)))
{
care_for_cell = true;
break;
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
// just pass on to the other function
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
// just pass on to the other function
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask,
const Function<1> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &/*component_mask_*/,
const Function<1> */*coefficient*/,
const unsigned int,
- const unsigned int /*subdomain_id*/,
+ const types::subdomain_id_t /*subdomain_id*/,
const unsigned int /*material_id*/)
{
Assert (false, ExcInternalError());
const std::vector<bool> &component_mask_,
const Function<1> *coefficient,
const unsigned int,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id_,
const unsigned int material_id)
{
+ if (dynamic_cast<const parallel::distributed::Triangulation<1,spacedim>*>
+ (&dof_handler.get_tria())
+ != 0)
+ Assert ((subdomain_id_ == types::invalid_subdomain_id)
+ ||
+ (subdomain_id_ ==
+ dynamic_cast<const parallel::distributed::Triangulation<1,spacedim>&>
+ (dof_handler.get_tria()).locally_owned_subdomain()),
+ ExcMessage ("For parallel distributed triangulations, the only "
+ "valid subdomain_id that can be passed here is the "
+ "one that corresponds to the locally owned subdomain id."));
+
+ const types::subdomain_id_t subdomain_id
+ = ((dynamic_cast<const parallel::distributed::Triangulation<1,spacedim>*>
+ (&dof_handler.get_tria())
+ != 0)
+ ?
+ dynamic_cast<const parallel::distributed::Triangulation<1,spacedim>&>
+ (dof_handler.get_tria()).locally_owned_subdomain()
+ :
+ subdomain_id_);
+
+
const unsigned int n_components = dof_handler.get_fe().n_components();
const unsigned int n_solution_vectors = solutions.size();
typename DH::active_cell_iterator cell = dof_handler.begin_active();
for (unsigned int cell_index=0; cell != dof_handler.end();
++cell, ++cell_index)
- if (((subdomain_id == numbers::invalid_unsigned_int)
+ if (((subdomain_id == types::invalid_subdomain_id)
||
(cell->subdomain_id() == subdomain_id))
&&
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
// just pass on to the other function
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
// just pass on to the other function
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask_,
const Function<dim> *coefficients,
const unsigned int ,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id_,
const unsigned int material_id)
{
+ if (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+ (&dof_handler.get_tria())
+ != 0)
+ Assert ((subdomain_id_ == types::invalid_subdomain_id)
+ ||
+ (subdomain_id_ ==
+ dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>&>
+ (dof_handler.get_tria()).locally_owned_subdomain()),
+ ExcMessage ("For parallel distributed triangulations, the only "
+ "valid subdomain_id that can be passed here is the "
+ "one that corresponds to the locally owned subdomain id."));
+
+ const types::subdomain_id_t subdomain_id
+ = ((dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+ (&dof_handler.get_tria())
+ != 0)
+ ?
+ dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>&>
+ (dof_handler.get_tria()).locally_owned_subdomain()
+ :
+ subdomain_id_);
+
+
const unsigned int n_components = dof_handler.get_fe().n_components();
// sanity checks
for (typename DH::active_cell_iterator cell=dof_handler.begin_active();
cell!=dof_handler.end();
++cell, ++present_cell)
- if ( ((subdomain_id == numbers::invalid_unsigned_int)
+ if ( ((subdomain_id == types::invalid_subdomain_id)
||
(cell->subdomain_id() == subdomain_id))
&&
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
// forward to the function with the QCollection
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
const std::vector<bool> &component_mask,
const Function<dim> *coefficients,
const unsigned int n_threads,
- const unsigned int subdomain_id,
+ const types::subdomain_id_t subdomain_id,
const unsigned int material_id)
{
Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping"));
INSTANTIATE(PETScWrappers::Vector,hp::DoFHandler<deal_II_dimension>);
INSTANTIATE(PETScWrappers::BlockVector,hp::DoFHandler<deal_II_dimension>);
+
+INSTANTIATE(PETScWrappers::MPI::Vector,DoFHandler<deal_II_dimension>);
+INSTANTIATE(PETScWrappers::MPI::BlockVector,DoFHandler<deal_II_dimension>);
+
#endif
#ifdef DEAL_II_USE_TRILINOS
// $Id$
// Version: $Name$
//
-// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
template<int dim, typename VECTOR, class DH>
-SolutionTransfer<dim, VECTOR, DH>::SolutionTransfer(const DH &dof):
+SolutionTransfer<dim, VECTOR, DH>::SolutionTransfer(const DH &dof)
+ :
dof_handler(&dof, typeid(*this).name()),
n_dofs_old(0),
prepared_for(none)
template<int dim, typename VECTOR, class DH>
void SolutionTransfer<dim, VECTOR, DH>::prepare_for_pure_refinement()
-{
+{
Assert(prepared_for!=pure_refinement, ExcAlreadyPrepForRef());
- Assert(prepared_for!=coarsening_and_refinement,
+ Assert(prepared_for!=coarsening_and_refinement,
ExcAlreadyPrepForCoarseAndRef());
clear();
typename DH::active_cell_iterator cell = dof_handler->begin_active(),
endc = dof_handler->end();
- for (unsigned int i=0; cell!=endc; ++cell, ++i)
+ for (unsigned int i=0; cell!=endc; ++cell, ++i)
{
indices_on_cell[i].resize(cell->get_fe().dofs_per_cell);
// on each cell store the indices of the
}
-template<int dim, typename VECTOR, class DH>
+template<int dim, typename VECTOR, class DH>
void
SolutionTransfer<dim, VECTOR, DH>::refine_interpolate(const VECTOR &in,
VECTOR &out) const
pointerstruct,
cell_map_end=cell_map.end();
- for (; cell!=endc; ++cell)
+ for (; cell!=endc; ++cell)
{
pointerstruct=cell_map.find(std::make_pair(cell->level(),cell->index()));
-
+
if (pointerstruct!=cell_map_end)
// this cell was refined or not
// touched at all, so we can get
prepare_for_coarsening_and_refinement(const std::vector<VECTOR> &all_in)
{
Assert(prepared_for!=pure_refinement, ExcAlreadyPrepForRef());
- Assert(!prepared_for!=coarsening_and_refinement,
+ Assert(!prepared_for!=coarsening_and_refinement,
ExcAlreadyPrepForCoarseAndRef());
-
+
const unsigned int in_size=all_in.size();
Assert(in_size!=0, ExcNoInVectorsGiven());
typename DH::active_cell_iterator
act_cell = dof_handler->begin_active(),
endc = dof_handler->end();
- for (; act_cell!=endc; ++act_cell)
+ for (; act_cell!=endc; ++act_cell)
{
if (act_cell->coarsen_flag_set())
++n_cells_to_coarsen;
for (; cell!=endc; ++cell)
if (!cell->active() && cell->child(0)->coarsen_flag_set())
++n_coarsen_fathers;
-
+
if (n_cells_to_coarsen)
Assert(n_cells_to_coarsen>=2*n_coarsen_fathers, ExcInternalError());
std::vector<std::vector<unsigned int> >
(n_cells_to_stay_or_refine)
.swap(indices_on_cell);
-
+
std::vector<std::vector<Vector<typename VECTOR::value_type> > >
(n_coarsen_fathers,
std::vector<Vector<typename VECTOR::value_type> > (in_size))
.swap(dof_values_on_cell);
-
+
// we need counters for
// the 'to_stay_or_refine' cells 'n_sr' and
// the 'coarsen_fathers' cells 'n_cf',
unsigned int n_sr=0, n_cf=0;
- cell = dof_handler->begin();
- for (; cell!=endc; ++cell)
+ cell = dof_handler->begin();
+ for (; cell!=endc; ++cell)
{
if (cell->active() && !cell->coarsen_flag_set())
{
std::vector<Vector<typename VECTOR::value_type> >(in_size,
Vector<typename VECTOR::value_type>(dofs_per_cell))
.swap(dof_values_on_cell[n_cf]);
-
+
for (unsigned int j=0; j<in_size; ++j)
{
// store the data of each of
typename DH::cell_iterator cell = dof_handler->begin(),
endc = dof_handler->end();
- for (; cell!=endc; ++cell)
+ for (; cell!=endc; ++cell)
{
pointerstruct=cell_map.find(std::make_pair(cell->level(),cell->index()));
-
+
if (pointerstruct!=cell_map_end)
{
const std::vector<unsigned int> * const indexptr
=pointerstruct->second.dof_values_ptr;
const unsigned int dofs_per_cell=cell->get_fe().dofs_per_cell;
-
+
// cell stayed or is
// refined
if (indexptr)
@echo '<li><code>INCLUDE=$(INCLUDE)</code>' >> $@
@echo '<li><code>CXXFLAGS.g=$(CXXFLAGS.g)</code>' >> $@
@echo '<li><code>CXXFLAGS.o=$(CXXFLAGS.o)</code>' >> $@
- @echo '<li><code>CFLAGS=$(CFLAGS)</code>' >> $@
+ @echo '<li><code>CFLAGS.g=$(CFLAGS.g)</code>' >> $@
+ @echo '<li><code>CFLAGS.o=$(CFLAGS.o)</code>' >> $@
@echo '<li><code>F77FLAGS.g=$(F77FLAGS.g)</code>' >> $@
@echo '<li><code>F77FLAGS.o=$(F77FLAGS.o)</code>' >> $@
@echo '<li><code>LDFLAGS=$(LDFLAGS)</code>' >> $@
</p>
</dd>
- <dt> <code>CCFLAGS</code> </dt>
+ <dt> <code>CCFLAGS.g</code> </dt>
<dd> <p>
- C compiler flags. Since we only compiler C code for
- third-party libraries for which we assume that they and our
- interfaces to them are bug free, these flags are always for
- optimized mode.
+ C compiler flags for debug mode.
+ </p>
+ </dd>
+
+
+ <dt> <code>CCFLAGS.o</code> </dt>
+ <dd> <p>
+ C compiler flags for optimized mode.
</p>
</dd>
--- /dev/null
+//-------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//-------------------------------------------------------------------------
+
+/**
+ * @defgroup distributed Parallel computing with multiple processors using distributed memory
+ * @ingroup Parallel
+ *
+ * @brief A module discussing the use of parallelism on distributed memory
+ * clusters.
+ *
+ * <h3>Overview</h3>
+ *
+ * deal.II can use multiple machine connected via MPI to parallelize
+ * computations, in addition to the parallelization within a shared
+ * memory machine discussed in the @ref threads module. There are
+ * essentially two ways to utilize multiple machines:
+ *
+ * - Each machine keeps the entire mesh and DoF handler locally, but
+ * only a share of the global matrix, sparsity pattern, and solution
+ * vector is stored on each machine.
+ * - The mesh and DoFhandler are also distributed, i.e. each processor
+ * stores only a share of the cells and degrees of freedom. No
+ * processor has knowledge of the entire mesh, matrix, or solution,
+ * and in fact problems solved in this mode are usually so large
+ * (say, 100s of millions to billions of degrees of freedom) that no
+ * processor can or should store even a single solution vector.
+ *
+ * The first of these two options is relatively straightforward
+ * because most of the things one wants to do in a finite element
+ * program still work in essentially the same way, and handling
+ * distributed matrices, vectors, and linear solvers is something for
+ * which good external libraries such as @ref SoftwareTrilinos or @ref
+ * SoftwarePETSc exist that can make things look almost exactly the
+ * same as they would if everything was available locally. The use of
+ * this mode of parallelization is explained in the tutorial programs
+ * step-17, and step-18 and will not be discussed here in more detail.
+ *
+ * The use of truly distributed meshes is somewhat more complex because it
+ * changes or makes impossible some of the things that can otherwise be done
+ * with deal.II triangulations, DoF handlers, etc. This module documents these
+ * issues with a vantage point at 50,000 ft above ground without going into
+ * too many details. All the algorithms described below are implement in
+ * classes and functions in namespace parallel::distributed.
+ *
+ *
+ * <h4>Other resources</h4>
+ *
+ * A complete discussion of the algorithms used in this namespace, as well as
+ * a thorough description of many of the terms used here, can be found in the
+ * @ref distributed_paper "Distributed Computing paper". In particular, the
+ * paper shows that the methods discussed in this module scale to thousands of
+ * processors and well over a billion degrees of freedom (they may scale to
+ * even bigger problems but at the time of writing this, we do not have
+ * solvers that are capable of more than $2^{31}$ degrees of freedom due to
+ * the use of <code>signed int</code> as index). The paper also gives a
+ * concise definition of many of the terms that are used here and in other
+ * places of the library related to distributed computing. The step-40
+ * tutorial program shows an application of the classes and methods of this
+ * namespace to the Laplace equation, while step-32 extends the step-31
+ * program to massively parallel computations and thereby explains the use of
+ * the topic discussed here to more complicated applications.
+ *
+ *
+ * <h4>Distributed triangulations</h4>
+ *
+ * In %parallel %distributed mode, objects of type
+ * parallel::distributed::Triangulation on each processor only store
+ * a subset of cells. In particular, the global mesh can be thought of
+ * as decomposed so that each MPI process "owns" a number of
+ * cells. The mesh each process then stores locally consists of
+ * exactly those cells that it owns, as well as one layer of @ref
+ * GlossGhostCell "ghost cells" around the ones it locally owns, and a
+ * number of cells we call @ref GlossArtificialCell "artificial". The
+ * latter are cells that ensure that each processor has a mesh that
+ * has all the coarse level cells and that respects the invariant that
+ * neighboring cells can not differ by more than one level of
+ * refinement. The following pictures show such a mesh, %distributed
+ * across four processors, and the collection of cells each of these
+ * processors stores locally:
+ *
+ * <table align="center">
+ * <tr>
+ * <td> @image html distributed_mesh_0.png </td>
+ * <td> @image html distributed_mesh_1.png </td>
+ * </tr>
+ * <tr>
+ * <td> @image html distributed_mesh_2.png </td>
+ * <td> @image html distributed_mesh_3.png </td>
+ * </tr>
+ * </table>
+ *
+ * The cells are colored based on the @ref GlossSubdomainId "subdomain id",
+ * which identifies which processor owns a cell: turquoise for
+ * processor 0, green for processor 1, yellow for processor 2, and red
+ * for processor 3. As can be seen, each process has one layer of
+ * ghost cells around its own cells, which are correctly colored by
+ * the subdomain id that identifies the processor that owns each of
+ * these cells. Note also how each processor stores a number of
+ * artificial cells, indicated in blue, that only exist to ensure that
+ * each processor knows about all coarse grid cells and that the
+ * meshes have the 2:1 refinement property; however, in the area
+ * occupied by these artificial cells, a processor has no knowledge
+ * how refined the mesh there really is, as these are areas that are
+ * owned by other processors. As a consequence, all algorithms we will
+ * develop can only run over the locally owned cells and if necessary
+ * the ghost cells; trying to access data on any of the artificial
+ * cells is most likely an error. Note that we can determine whether
+ * we own a cell by testing that <code>cell-@>subdomain_id() ==
+ * triangulation.locally_owned_subdomain()</code>.
+ *
+ * The "real" mesh one has to think of here is the one that would
+ * result from forming the union of cells each of the processes own,
+ * i.e. from the overlap of the turquoise, green, yellow and red
+ * areas, disregarding the blue areas.
+ *
+ *
+ * <h4>Distributed degree of freedom handler</h4>
+ *
+ * The DoFHandler class builds on the Triangulation class, but it can
+ * detect whenever we actually use an object of type
+ * parallel::distributed::Triangulation as triangulation. In that
+ * case, it assigns global %numbers for all degrees of freedom that
+ * exist, given a finite element, on the global mesh, but each
+ * processor will only know about those that are defined on locally
+ * relevant cells (i.e. cells either locally owned or that are ghost
+ * cells). Internally, the algorithm essentially works by just looping
+ * over all cells we own locally and assigning DoF indices to the
+ * degrees of freedom defined on them and, in the case of degrees of
+ * freedom at the interface between subdomains owned by different
+ * processors, that are not owned by the neighboring processor. All
+ * processors then exchange how many degrees of freedom they locally
+ * own and shift their own indices in such a way that every degree of
+ * freedom on all subdomains are uniquely identified by an index
+ * between zero and DoFHandler::n_dofs() (this function returns the
+ * global number of degrees of freedom, accumulated over all
+ * processors). Note that after this step, the degrees of freedom
+ * owned by each process form a contiguous range that can, for
+ * example, be obtained by the contiguous index set returned by
+ * DoFHandler::locally_owned_dofs(). After
+ * assigning unique indices to all degrees of freedom, the
+ * DoFHandler::distribute_dofs() function then
+ * loops over all ghost cells and communicates with neighboring
+ * processors to ensure that the global indices of degrees of freedom
+ * on these ghost cells match the ones that the neighbor has assigned
+ * to them.
+ *
+ * Through this scheme, we can make sure that each cell we locally own
+ * as well as all the ghost cells can be asked to yield the globally
+ * correct indices for the degrees of freedom defined on
+ * them. However, asking for degrees of freedom on artificial cells is
+ * likely going to lead to nothing good, as no information is
+ * available for these cells (in fact, it isn't even known whether
+ * these cells are active on the global mesh, or are further refined).
+ *
+ * As usual, degrees of freedom can be renumbered after being enumerated,
+ * using the functions in namespace DoFRenumbering.
+ *
+ *
+ * <h4>Linear systems for %distributed computations</h4>
+ *
+ * One thing one learns very quickly when working with very large
+ * numbers of processors is that one can not store information about
+ * every degree of freedom on each processor, even if this information
+ * is "this degree of freedom doesn't live here". An example for this
+ * is that we can create an object for a (compressed) sparsity pattern
+ * that has DoFHandler::n_dofs() rows,
+ * but for which we fill only those rows that correspond to the
+ * DoFHandler::n_locally_owned_dofs() locally
+ * owned degrees of freedom. The reason is simple: for the sake of
+ * example, let's assume we have 1 billion degrees of freedom
+ * distributed across 100 processors; if we even only hold 16 bytes
+ * per line in this sparsity pattern (whether we own the corresponding
+ * DoF or not), we'll need 16 GB for this object even if every single
+ * line is empty. Of course, only 10 million lines will be non-empty,
+ * for which we need 160 MB plus whatever is necessary to store the
+ * actual column indices of nonzero entries. Let's say we have a
+ * moderately complex problem with 50 entries per row, for each of
+ * which we store the column index worth 4 bytes, then we'll need 216
+ * bytes for each of the 10 million lines that correspond to the
+ * degrees of freedom we own, for a total of 2.16 GB. And we'll need
+ * 16 bytes for each of the 990 million lines that we don't own, for a
+ * total of 15.840 GB. It is clear that this ratio doesn't become any
+ * better if we go to even higher %numbers of processors.
+ *
+ * The solution to this problem is to really only use any memory at
+ * all for those parts of the linear system that we own, or need for
+ * some other reason. For all other parts, we must know that they
+ * exist, but we can not set up any part of our data structure. To
+ * this end, there exists a class called IndexSet that denotes a set
+ * of indices which we care for, and for which we may have to allocate
+ * memory. The data structures for sparsity patterns, constraint
+ * matrices, matrices and vector can be initialized with these
+ * IndexSet objects to really only care for those rows or entries that
+ * correspond to indices in the index set, and not care about all
+ * others. These objects will then ask how many indices exist in the
+ * set, allocate memory for each one of them (e.g. initialize the data
+ * structures for a line of a sparsity pattern), and when you want to
+ * access data for global degree of freedom <code>i</code> you will be
+ * redirected to the result of calling IndexSet::index_within_set()
+ * with index <code>i</code> instead. Accessing data for elements
+ * <code>i</code> for which IndexSet::is_element() is false will yield
+ * an error.
+ *
+ * The remaining question is how to identify the set of indices that
+ * correspond to degrees of freedom we need to worry about on each
+ * processor. To this end, you can use the
+ * DoFTools::extract_locally_owned_dofs() function to get at all the
+ * indices a processor owns. Note that this is a subset of the degrees
+ * of freedom that are defined on the locally owned cells (since some
+ * of the degrees of freedom at the interface between two different
+ * subdomains may be owned by the neighbor). This set of degrees of
+ * freedom defined on cells we own can be obtained using the function
+ * DoFTools::extract_locally_active_dofs(). Finally, one
+ * sometimes needs the set of all degrees of freedom on the locally
+ * owned subdomain as well as the adjacent ghost cells. This
+ * information is provided by the
+ * DoFTools::extract_locally_relevant_dofs() function.
+ *
+ *
+ * <h5>Sparsity patterns</h5>
+ *
+ * At the time of writing this, the only class equipped to deal with the
+ * situation just explained is CompressedSimpleSparsityPattern. A version of
+ * the function CompressedSimpleSparsityPattern::reinit() exists that takes an
+ * IndexSet argument that indicates which lines of the sparsity pattern to
+ * allocate memory for. In other words, it is safe to create such an object
+ * that will report as its size 1 billion, but in fact only stores only as
+ * many rows as the index set has elements. You can then use the usual
+ * function DoFTools::make_sparsity_pattern to build the sparsity pattern that
+ * results from assembling on the locally owned portion of the mesh. The
+ * resulting object can be used to initialize a PETSc or Trilinos matrix which
+ * support very large object sizes through completely distributed storage. The
+ * matrix can then be assembled by only looping over those cells owned by the
+ * current processor.
+ *
+ * The only thing to pay attention to is for which degrees of freedom the
+ * sparsity needs to store entries. These are, in essence, the ones we could
+ * possibly store values to in the matrix upon assembly. It is clear that
+ * these are certainly the locally active degrees of freedom (which live on
+ * the cells we locally own) but through constraints, it may also be possible
+ * to write to entries that are located on ghost cells. Consequently, you need
+ * to pass the index set that results from
+ * DoFTools::extract_locally_relevant_dofs() upon initializing the sparsity
+ * pattern.
+ *
+ *
+ * <h4>Constraints on degrees of freedom</h4>
+ *
+ * When creating the sparsity pattern as well as when assembling the linear
+ * system, we need to know about constraints on degrees of freedom, for
+ * example resulting from hanging nodes or boundary conditions. Like the
+ * CompressedSimpleSparsityPattern class, the ConstraintMatrix can also take
+ * an IndexSet upon construction that indicates for which of the possibly very
+ * large number of degrees of freedom it should actually store
+ * constraints. Unlike for the sparsity pattern, these are now only those
+ * degrees of freedom which we work on locally when assembling, namely those
+ * returned by DoFTools::extract_locally_active_dofs() (a superset of the
+ * locally owned ones).
+ *
+ * There are, however, situations where more complicated constraints appear in
+ * finite element programs. An example is in $hp$ adaptive computations where
+ * degrees of freedom can be constrained against other degrees of freedom that
+ * are themselves constrained. In a case like this, in order to fully resolve
+ * this chain of constraints, it may not be sufficient to only store
+ * constraints on locally active degrees of freedom but one may also need to
+ * have constraints available on locally relevant ones. In that case, the
+ * ConstraintMatrix object needs to be initialized with the IndexSet produced
+ * by DoFTools::extract_locally_relevant_dofs() .
+ *
+ * In general, your program will continue to do something if you happen to not
+ * store all necessary constraints on each processor: you will just generate
+ * wrong matrix entries, but the program will not abort. This is opposed to
+ * the situation of the sparsity pattern: there, if the IndexSet passed to the
+ * CompressedSimpleSparsityPattern indicates that it should store too few rows
+ * of the matrix, the program will either abort when you attempt to write into
+ * matrix entries that do not exist or the matrix class will silently allocate
+ * more memory to accomodate them. As a consequence, it is useful to err on
+ * the side of caution when indicating which constraints to store and use the
+ * result of DoFTools::extract_locally_relevant_dofs() rather than
+ * DoFTools::extract_locally_active_dofs() . This is also affordable since the
+ * set of locally relevant degrees of freedom is only marginally larger than
+ * the set of locally active degrees of freedom. We choose this strategy in
+ * both step-32 and step-40.
+ *
+ *
+ * <h4>Postprocessing</h4>
+ *
+ * Like everything else, you can only do postprocessing on cells a
+ * local processor owns. The DataOut and KellyErrorEstimator classes
+ * do this automatically: they only operate on locally owned cells
+ * without the need to do anything in particular. At least for large
+ * computations, there is also no way to merge the results of all
+ * these local computations on a single machine, i.e. each processor
+ * has to be self-sufficient. For example, each processor has to
+ * generate its own parallel output files that have to be visualizated
+ * by a program that can deal with multiple input files rather than
+ * merging the results of calling DataOut to a single processor before
+ * generating a single output file. The latter can be achieved, for
+ * example, using the DataOutBase::write_vtu() and
+ * DataOutBase::write_pvtu_record() functions.
+ *
+ * These same considerations hold for all other postprocessing actions
+ * as well: while it is, for example, possible to compute a global
+ * energy dissipation rate by doing the computations locally and
+ * accumulating the resulting single number processor to a single
+ * number for the entire communication, it is in general not possible
+ * to do the same if the volume of data produced by every processor is
+ * significant.
+ *
+ * There is one particular consideration for postprocessing, however: whatever
+ * you do on each cell a processor owns, you need access to at least all those
+ * values of the solution vector that are active on these cells (i.e. to the
+ * set of all <i>locally active degrees of freedom</i>, in the language of the
+ * @ref distributed_paper "Distributed Computing paper"), which is a superset
+ * of the degrees of freedom this processor actually owns (because it may not
+ * own all degrees of freedom on the interface between its own cells and those
+ * cells owned by other processors). Sometimes, however, you need even more
+ * information: for example, to compute the KellyErrorIndicator results, one
+ * needs to evaluate the gradient at the interface on the current as well as
+ * its neighbor cell; the latter may be owned by another processor, so we need
+ * those degrees of freedom as well. In general, therefore, one needs access
+ * to the solution values for all degrees of freedom that are <i>locally
+ * relevant</i>. On the other hand, both of the packages we can use for
+ * parallel linear algebra (PETSc and Trilinos) subdivide vectors into chunks
+ * each processor owns and chunks stored on other processors. To postprocess
+ * stuff therefore means that we have to tell PETSc or Trilinos that it should
+ * also import <i>ghost elements</i>, i.e. additional vector elements of the
+ * solution vector other than the ones we store locally. Both the
+ * PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector class support
+ * specifying this information (see step-40 and step-32, respectively) through
+ * the PETScWrappers::MPI::Vector::update_ghost_values() function or, in the
+ * case of Trilinos, construction of a vector with an the locally relevant
+ * degrees of freedom index set.
+ */
+
+
+
+namespace parallel
+{
+ /**
+ * A namespace for class and
+ * functions that support %parallel
+ * computing on %distributed memory
+ * machines. See the @ref
+ * distributed module for an
+ * overview of the facilities this
+ * namespace offers.
+ *
+ * @ingroup distributed
+ */
+ namespace distributed
+ {
+ }
+}
* <dd>The "distributed computing paper" is a paper by W. Bangerth,
* C. Burstedde, T. Heister and M. Kronbichler titled "Algorithms and Data
* Structures for Massively Parallel Generic Finite Element Codes" that
- * described the implementation of parallel distributed computing in deal.II,
+ * describes the implementation of parallel distributed computing in deal.II,
* i.e. computations where not only the linear system is split onto different
* machines as in, for example, step-18, but also the Triangulation and
* DoFHandler objects. In essence, it is a guide to the parallel::distributed
* namespace.
*
- * The paper is currently in preparation.
+ * The paper is submitted to the ACM Transactions on Mathematical Software
+ * (ACM TOMS).
* </dd>
*
*
* polynomials. Let us give some examples:
*
* <table><tr>
- * <th>Element</th>
- * <th>Function space</th>
- * <th>Node values</th></tr>
- * <tr><th>FE_Q, FE_DGQ</th>
- * <td><i>Q<sub>k</sub></i></td>
- * <td>values in support points</td></tr>
- * <tr><th>FE_DGP</th>
- * <td><i>P<sub>k</sub></i></td>
- * <td>moments with respect to Legendre polynomials</td></tr>
- * <tr><th>FE_RaviartThomas (2d)</th>
- * <td><i>Q<sub>k+1,k</sub> x Q<sub>k,k+1</sub></i></td>
- * <td>moments on edges and in the interior</td></tr>
- * <tr><th>FE_RaviartThomasNodal</th>
- * <td><i>Q<sub>k+1,k</sub> x Q<sub>k,k+1</sub></i></td>
- * <td>Gauss points on edges(faces) and anisotropic Gauss points in the interior</td></tr>
+ * <th>Element</th>
+ * <th>Function space</th>
+ * <th>Node values</th></tr>
+ * <tr><th>FE_Q, FE_DGQ</th>
+ * <td><i>Q<sub>k</sub></i></td>
+ * <td>values in support points</td></tr>
+ * <tr><th>FE_DGP</th>
+ * <td><i>P<sub>k</sub></i></td>
+ * <td>moments with respect to Legendre polynomials</td></tr>
+ * <tr><th>FE_RaviartThomas (2d)</th>
+ * <td><i>Q<sub>k+1,k</sub> x Q<sub>k,k+1</sub></i></td>
+ * <td>moments on edges and in the interior</td></tr>
+ * <tr><th>FE_RaviartThomasNodal</th>
+ * <td><i>Q<sub>k+1,k</sub> x Q<sub>k,k+1</sub></i></td>
+ * <td>Gauss points on edges(faces) and anisotropic Gauss points in the interior</td></tr>
* </table>
*
* <dt class="glossary">@anchor GlossPrimitive <b>Primitive finite
* a vector-valued element has exactly one nonzero component if an element is
* primitive. This includes, in particular, all scalar elements as well as
* vector-valued elements assembled via the FESystem class from other
- * primitive (for example scalar) elements as shown in step-8,
+ * primitive (for example scalar) elements as shown in step-8,
* step-29, step-22 and several others. On the other hand,
* the FE_RaviartThomas class used in step-20 and step-21, or the FE_Nedelec
* class provide non-primitive finite elements because there, each
* cell is associated with.
*
* For programs that are parallelized based on MPI but where each processor
- * stores the entire triangulation (as in, for example, step-18
- * or step-32, subdomain ids are assigned to cells by
+ * stores the entire triangulation (as in, for example, step-18, but not in
+ * step-32), subdomain ids are assigned to cells by
* partitioning a mesh, and each MPI process then only works on those cells it
- * "owns", i.e. that belong to a subdomain that it is associated with
+ * "owns", i.e. that belong to a subdomain that the processor is associated with
* (traditionally, this is the case for the subdomain id whose numerical value
* coincides with the rank of the MPI process within the MPI
* communicator). Partitioning is typically done using the
* GridTools::partition() function, but any other method can also be used to
- * do this though most simple ideas will likely lead to less well balanced
- * numbers of degrees of freedom on the various subdomains.
+ * do this.
*
* On the other hand, for programs that are parallelized using MPI but
* where meshes are held distributed across several processors using
// $Id$
// Version: $Name$
//
-// Copyright (C) 2009, 2010 by the deal.II authors
+// Copyright (C) 2009 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* @brief A module discussing the use of multiple processor.
*
* This module contains information on %parallel computing. It is
- * subdivided into parts on @ref threads and on distributed
- * computing. The latter part will become available in a future
- * public release.
+ * subdivided into parts on @ref threads and on @ref distributed.
*/
* A namespace in which we define classes and algorithms that deal
* with running in %parallel on shared memory machines when deal.II is
* configured to use multiple threads (see @ref threads), as well as
- * running things in %parallel on %distributed memory machines.
+ * running things in %parallel on %distributed memory machines (see @ref
+ * distributed).
*
* @ingroup threads
* @author Wolfgang Bangerth, 2008, 2009
#
# make steps.png
#
-# by hand. This way, we avoid testing for neato during
+# by hand. This way, we avoid testing for dot during
# configuration. The map only needs to be generated after changes in
# the structure of the tutorials.
toc.html: steps.cmapx toc.html.in
@perl make_toc.pl > toc.html
# generate dot file
-steps.dot: steps.pl $D/examples/*/doc/tooltip
+steps.dot: steps.pl $D/examples/*/doc/tooltip $D/examples/*/doc/builds-on
@echo ================== Making $@
@perl $< > $@
steps.png: steps.dot
@echo ================== Making $@
- @neato -Tpng -Tcmapx -O $<
+ @dot -Tpng -Tcmapx -O $<
@mv steps.dot.png steps.png
@mv steps.dot.cmapx steps.cmapx
<map id="StepsMap" name="StepsMap">
-<area shape="poly" href="../deal.II/step_1.html" title="Creating a grid. Refining it. Writing it to a file" alt="" coords="73,150 73,122 54,103 26,103 7,122 7,150 26,169 54,169"/>
-<area shape="poly" href="../deal.II/step_2.html" title="Assigning degrees of freedom to a grid." alt="" coords="155,212 155,185 135,165 108,165 88,185 88,212 108,232 135,232"/>
-<area shape="poly" href="../deal.II/step_3.html" title="Solving Poisson's equation." alt="" coords="223,271 223,244 203,224 176,224 156,244 156,271 176,291 203,291"/>
-<area shape="poly" href="../deal.II/step_4.html" title="Dimension independent programming. Boundary conditions." alt="" coords="360,351 360,324 340,304 313,304 293,324 293,351 313,371 340,371"/>
-<area shape="poly" href="../deal.II/step_5.html" title="Reading a grid from disk. Computations on successively refined grids." alt="" coords="327,474 327,446 307,427 280,427 260,446 260,474 280,493 307,493"/>
-<area shape="rect" href="../deal.II/step_10.html" title="Higher order mappings." alt="" coords="435,280,472,307"/>
-<area shape="rect" href="../deal.II/step_15.html" title="1d problems. A nonlinear problem." alt="" coords="276,231,313,257"/>
-<area shape="rect" href="../deal.II/step_20.html" title="Mixed finite elements for the mixed Laplacian. Block solvers." alt="" coords="197,355,235,381"/>
-<area shape="rect" href="../deal.II/step_23.html" title="Time dependent problems. The wave equation." alt="" coords="341,203,379,229"/>
-<area shape="rect" href="../deal.II/step_29.html" title="A complex-valued Helmholtz equation. Sparse direct solvers." alt="" coords="405,327,443,353"/>
-<area shape="rect" href="../deal.II/step_34.html" title="Boundary element methods for potential flow." alt="" coords="368,261,405,288"/>
-<area shape="rect" href="../deal.II/step_36.html" title="Finding eigenvalues of the Schrödinger equation." alt="" coords="237,291,275,317"/>
-<area shape="rect" href="../deal.II/step_45.html" title="Periodic boundary conditions" alt="" coords="361,433,399,460"/>
-<area shape="poly" href="../deal.II/step_6.html" title="Adaptive local refinement. Higher order elements" alt="" coords="371,587 371,560 351,540 324,540 304,560 304,587 324,607 351,607"/>
-<area shape="rect" href="../deal.II/step_7.html" title="Helmholtz equation. Computing errors. Boundary integrals." alt="" coords="289,681,327,708"/>
-<area shape="rect" href="../deal.II/step_8.html" title="Systems of PDE. Elasticity." alt="" coords="417,651,455,677"/>
-<area shape="rect" href="../deal.II/step_9.html" title="Advection equation. Multithreading. Refinement criteria." alt="" coords="249,581,287,608"/>
-<area shape="rect" href="../deal.II/step_13.html" title="Modularity. Software design." alt="" coords="452,559,489,585"/>
-<area shape="rect" href="../deal.II/step_16.html" title="Multigrid on adaptive meshes." alt="" coords="393,589,431,616"/>
-<area shape="rect" href="../deal.II/step_22.html" title="The Stokes equation on adaptive meshes." alt="" coords="188,552,225,579"/>
-<area shape="rect" href="../deal.II/step_27.html" title="hp-adaptive finite element methods." alt="" coords="400,524,437,551"/>
-<area shape="rect" href="../deal.II/step_28.html" title="Handling multiple meshes at the same time. Neutron transport." alt="" coords="345,643,383,669"/>
-<area shape="rect" href="../deal.II/step_39.html" title="Interior Penalty for the Laplace equation. Adaptive refinement. Multigrid." alt="" coords="248,661,285,688"/>
-<area shape="rect" href="../deal.II/step_12.html" title="Discontinuous Galerkin for linear advection." alt="" coords="227,772,264,799"/>
-<area shape="rect" href="../deal.II/step_17.html" title="Parallel computing using MPI. Using PETSc." alt="" coords="504,721,541,748"/>
-<area shape="rect" href="../deal.II/step_11.html" title="Higher order mappings. Dealing with constraints." alt="" coords="536,232,573,259"/>
-<area shape="rect" href="../deal.II/step_30.html" title="Anisotropic refinement for DG methods." alt="" coords="133,832,171,859"/>
-<area shape="rect" href="../deal.II/step_33.html" title="Hyperbolic conservation laws: the Euler equations of gas dynamics." alt="" coords="251,879,288,905"/>
-<area shape="rect" href="../deal.II/step_14.html" title="Duality based error estimates. Adaptivity." alt="" coords="565,572,603,599"/>
-<area shape="rect" href="../deal.II/step_18.html" title="Quasistatic elasticity. More parallel computing." alt="" coords="585,785,623,812"/>
-<area shape="rect" href="../deal.II/step_19.html" title="Handling input parameter files. Converting output formats." alt="" coords="417,468,455,495"/>
-<area shape="rect" href="../deal.II/step_21.html" title="Two-phase flow in porous media." alt="" coords="149,449,187,476"/>
-<area shape="rect" href="../deal.II/step_31.html" title="Boussinesq flow for thermal convection." alt="" coords="97,627,135,653"/>
-<area shape="rect" href="../deal.II/step_35.html" title="A projection solver for the Navier-Stokes equations." alt="" coords="73,559,111,585"/>
-<area shape="rect" href="../deal.II/step_24.html" title="The wave equation with absorbing boundary conditions. Extracting point values." alt="" coords="377,100,415,127"/>
-<area shape="rect" href="../deal.II/step_25.html" title="The nonlinear sine-Gordon soliton equation" alt="" coords="417,7,455,33"/>
+<area shape="poly" id="node1" href="../deal.II/step_1.html" title="Creating a grid. Refining it. Writing it to a file" alt="" coords="655,60 655,28 632,5 600,5 577,28 577,60 600,83 632,83"/>
+<area shape="poly" id="node2" href="../deal.II/step_2.html" title="Assigning degrees of freedom to a grid." alt="" coords="655,185 655,153 632,131 600,131 577,153 577,185 600,208 632,208"/>
+<area shape="poly" id="node3" href="../deal.II/step_3.html" title="Solving Poisson's equation." alt="" coords="655,311 655,279 632,256 600,256 577,279 577,311 600,333 632,333"/>
+<area shape="poly" id="node4" href="../deal.II/step_4.html" title="Dimension independent programming. Boundary conditions." alt="" coords="655,436 655,404 632,381 600,381 577,404 577,436 600,459 632,459"/>
+<area shape="poly" id="node5" href="../deal.II/step_5.html" title="Reading a grid from disk. Computations on successively refined grids." alt="" coords="441,561 441,529 419,507 387,507 364,529 364,561 387,584 419,584"/>
+<area shape="rect" id="node10" href="../deal.II/step_10.html" title="Higher order mappings." alt="" coords="540,529,575,562"/>
+<area shape="rect" id="node15" href="../deal.II/step_15.html" title="1d problems. A nonlinear problem." alt="" coords="599,529,633,562"/>
+<area shape="rect" id="node20" href="../deal.II/step_20.html" title="Mixed finite elements for the mixed Laplacian. Block solvers." alt="" coords="256,531,291,559"/>
+<area shape="rect" id="node23" href="../deal.II/step_23.html" title="Time dependent problems. The wave equation." alt="" coords="657,531,692,559"/>
+<area shape="rect" id="node28" href="../deal.II/step_29.html" title="A complex-valued Helmholtz equation. Sparse direct solvers." alt="" coords="716,529,751,562"/>
+<area shape="rect" id="node33" href="../deal.II/step_34.html" title="Boundary element methods for potential flow." alt="" coords="775,531,809,559"/>
+<area shape="rect" id="node35" href="../deal.II/step_36.html" title="Finding eigenvalues of the Schrödinger equation." alt="" coords="833,529,868,562"/>
+<area shape="rect" id="node38" href="../deal.II/step_45.html" title="Periodic boundary conditions" alt="" coords="623,758,657,791"/>
+<area shape="poly" id="node6" href="../deal.II/step_6.html" title="Adaptive local refinement. Higher order elements" alt="" coords="419,687 419,655 396,632 364,632 341,655 341,687 364,709 396,709"/>
+<area shape="rect" id="node7" href="../deal.II/step_7.html" title="Helmholtz equation. Computing errors. Boundary integrals." alt="" coords="516,758,548,791"/>
+<area shape="rect" id="node8" href="../deal.II/step_8.html" title="Systems of PDE. Elasticity." alt="" coords="169,758,201,791"/>
+<area shape="rect" id="node9" href="../deal.II/step_9.html" title="Advection equation. Multithreading. Refinement criteria." alt="" coords="225,758,257,791"/>
+<area shape="rect" id="node13" href="../deal.II/step_13.html" title="Modularity. Software design." alt="" coords="457,758,492,791"/>
+<area shape="rect" id="node16" href="../deal.II/step_16.html" title="Multigrid on adaptive meshes." alt="" coords="281,758,316,791"/>
+<area shape="rect" id="node22" href="../deal.II/step_22.html" title="The Stokes equation on adaptive meshes." alt="" coords="60,761,95,789"/>
+<area shape="rect" id="node26" href="../deal.II/step_27.html" title="hp-adaptive finite element methods." alt="" coords="340,758,375,791"/>
+<area shape="rect" id="node27" href="../deal.II/step_28.html" title="Handling multiple meshes at the same time. Neutron transport." alt="" coords="399,758,433,791"/>
+<area shape="rect" id="node36" href="../deal.II/step_39.html" title="Interior Penalty for the Laplace equation. Adaptive refinement. Multigrid." alt="" coords="568,923,603,957"/>
+<area shape="rect" id="node37" href="../deal.II/step_40.html" title="Solving the Laplace equation on adaptive meshes on thousands of processors." alt="" coords="116,923,151,957"/>
+<area shape="rect" id="node12" href="../deal.II/step_12.html" title="Discontinuous Galerkin for linear advection." alt="" coords="513,841,548,874"/>
+<area shape="rect" id="node17" href="../deal.II/step_17.html" title="Parallel computing using MPI. Using PETSc." alt="" coords="169,843,204,871"/>
+<area shape="rect" id="node11" href="../deal.II/step_11.html" title="Higher order mappings. Dealing with constraints." alt="" coords="540,654,575,687"/>
+<area shape="rect" id="node29" href="../deal.II/step_30.html" title="Anisotropic refinement for DG methods." alt="" coords="451,923,485,957"/>
+<area shape="rect" id="node32" href="../deal.II/step_33.html" title="Hyperbolic conservation laws: the Euler equations of gas dynamics." alt="" coords="509,926,544,954"/>
+<area shape="rect" id="node14" href="../deal.II/step_14.html" title="Duality based error estimates. Adaptivity." alt="" coords="455,841,489,874"/>
+<area shape="rect" id="node18" href="../deal.II/step_18.html" title="Quasistatic elasticity. More parallel computing." alt="" coords="227,926,261,954"/>
+<area shape="rect" id="node31" href="../deal.II/step_32.html" title="A parallel Boussinesq flow for thermal convection." alt="" coords="116,1006,151,1034"/>
+<area shape="rect" id="node19" href="../deal.II/step_19.html" title="Handling input parameter files. Converting output formats." alt="" coords="228,841,263,874"/>
+<area shape="rect" id="node21" href="../deal.II/step_21.html" title="Two-phase flow in porous media." alt="" coords="183,657,217,685"/>
+<area shape="rect" id="node30" href="../deal.II/step_31.html" title="Boussinesq flow for thermal convection." alt="" coords="57,926,92,954"/>
+<area shape="rect" id="node34" href="../deal.II/step_35.html" title="A projection solver for the Navier-Stokes equations." alt="" coords="5,843,40,871"/>
+<area shape="rect" id="node24" href="../deal.II/step_24.html" title="The wave equation with absorbing boundary conditions. Extracting point values." alt="" coords="669,657,704,685"/>
+<area shape="rect" id="node25" href="../deal.II/step_25.html" title="The nonlinear sine-Gordon soliton equation" alt="" coords="681,761,716,789"/>
</map>
my @steps = (1,2,3,4,5,6,7,8,9,
10,11,12,13,14,15,16,17,18,19,
20,21,22,23,24,25, 27,28,29,
- 30,31, 33,34,35,36, 39,
- 45);
+ 30,31,32,33,34,35,36, 39,
+ 40,45);
;
# List of additional node attributes to highlight purpose and state of the example
my %style = (
- "basic" => ',height=.7,width=.7,shape="octagon",fillcolor="green"',
- "techniques" => ',fillcolor="orange"',
- "fluids" => ',fillcolor="yellow"',
- "solids" => ',fillcolor="lightblue"',
- "time dependent" => ',fillcolor="blue"',
- "unfinished" => ',style="dashed"'
+ "basic" => ',height=.8,width=.8,shape="octagon",fillcolor="green"',
+ "techniques" => ',height=.35,width=.35,fillcolor="orange"',
+ "fluids" => ',height=.25,width=.25,fillcolor="yellow"',
+ "solids" => ',height=.25,width=.25,fillcolor="lightblue"',
+ "time dependent" => ',height=.25,width=.25,fillcolor="blue"',
+ "unfinished" => ',height=.25,width=.25,style="dashed"'
);
--- /dev/null
+#! /bin/bash
+
+# This program comes with ABSOLUTELY NO WARRANTY.
+
+# unpack under current directory
+UNPACK=`pwd`
+# choose names for fast and debug compilation directories
+BUILD_FAST="$UNPACK/p4est-build/FAST"
+BUILD_DEBUG="$UNPACK/p4est-build/DEBUG"
+
+function busage() {
+ echo "Usage: `basename $0` <p4est_tar.gz_file> <p4est_install_directory>"
+}
+function bdie () {
+ echo "Error: $@"
+ exit 1
+}
+
+if test -z "$CFLAGS" -a -z "$P4EST_CFLAGS_FAST" ; then
+ export CFLAGS_FAST="-O2"
+else
+ export CFLAGS_FAST="$CFLAGS $P4EST_CFLAGS_FAST"
+fi
+echo "CFLAGS_FAST: $CFLAGS_FAST"
+if test -z "$CFLAGS" -a -z "$P4EST_CFLAGS_DEBUG" ; then
+ export CFLAGS_DEBUG="-O0 -g"
+else
+ export CFLAGS_DEBUG="$CFLAGS $P4EST_CFLAGS_DEBUG"
+fi
+echo "CFLAGS_DEBUG: $CFLAGS_DEBUG"
+
+TGZ="$1"; shift
+if test ! -f "$TGZ" ; then
+ busage
+ bdie "File not found"
+fi
+if ! (echo "$TGZ" | grep -q 'p4est.*.tar.gz') ; then
+ busage
+ bdie "File name mismatch"
+fi
+
+# choose names for fast and debug installation directories
+INSTALL_DIR=$1
+shift
+if test -z "$INSTALL_DIR" ; then
+ INSTALL_FAST="$UNPACK/p4est-install/FAST"
+ INSTALL_DEBUG="$UNPACK/p4est-install/DEBUG"
+else
+ INSTALL_FAST="$INSTALL_DIR/FAST"
+ INSTALL_DEBUG="$INSTALL_DIR/DEBUG"
+fi
+
+echo
+echo "This script tries to unpack, configure and build the p4est library."
+echo "Build FAST: $BUILD_FAST"
+echo "Build DEBUG: $BUILD_DEBUG"
+echo "Install FAST: $INSTALL_FAST"
+echo "Install DEBUG: $INSTALL_DEBUG"
+echo "Checking environment: CFLAGS P4EST_CFLAGS_FAST P4EST_CFLAGS_DEBUG"
+
+
+if test -d $UNPACK/p4est-build ; then
+ rm -rf $UNPACK/p4est-build
+fi
+
+DIR=`echo "$TGZ" | sed 's/\(p4est.*\).tar.gz/\1/'`
+DIR=`basename $DIR`
+echo "Unpack directory: $UNPACK/$DIR"
+if test -d "$UNPACK/$DIR" ; then
+ echo \
+ "Directory found (remove it and also the build directories" \
+ "to start over)"
+else
+ echo -n "Unpacking... "
+ tar -xvz -f "$TGZ" -C "$UNPACK" >/dev/null
+ echo "done"
+fi
+test -f "$UNPACK/$DIR/src/p4est.h" || bdie "Main header file missing"
+test -f "$UNPACK/$DIR/configure" || bdie "Configure script missing"
+
+echo
+echo "See output in files .../config.output and .../make.output"
+echo "Build FAST version in $BUILD_FAST"
+mkdir -p "$BUILD_FAST"
+cd "$BUILD_FAST"
+("$UNPACK/$DIR/configure" --enable-mpi --enable-shared --disable-vtk-binary --without-blas \
+ --prefix="$INSTALL_FAST" CFLAGS="$CFLAGS_FAST" \
+ "$@" || bdie "Error in configure" ) | tee config.output
+(make -C sc -j 8 || bdie "Error in make sc") | tee make.output
+(make src/libp4est.la -j 8 \
+ || bdie "Error in make p4est") | tee -a make.output
+(make install || bdie "Error in make install") | tee -a make.output
+echo "FAST version installed in $INSTALL_FAST"
+
+echo
+echo "Build DEBUG version in $BUILD_DEBUG"
+mkdir -p "$BUILD_DEBUG"
+cd "$BUILD_DEBUG"
+if test -z "$CFLAGS" ; then
+ export CFLAGS="-g -O0"
+fi
+("$UNPACK/$DIR/configure" --enable-mpi --enable-shared --disable-vtk-binary --without-blas --enable-debug \
+ --prefix="$INSTALL_DEBUG" CFLAGS="$CFLAGS_DEBUG" \
+ "$@" || bdie "Error in configure") | tee config.output
+(make -C sc -j 8 || bdie "Error in make sc") | tee make.output
+(make src/libp4est.la -j 8 \
+ || bdie "Error in make p4est") | tee -a make.output
+(make install || bdie "Error in make install") | tee -a make.output
+echo "DEBUG version installed in $INSTALL_DEBUG"
+echo
--- /dev/null
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Frameset//EN"
+ "http://www.w3.org/TR/REC-html40/frameset.dtd">
+<html>
+ <head>
+ <link href="../screen.css" rel="StyleSheet" media="screen">
+ <title>The deal.II Readme</title>
+ <meta name="author" content="the deal.II authors <authors@dealii.org>">
+ <meta name="keywords" content="deal.II">
+ </head>
+
+ <body>
+
+
+ <h1>Using and installing instructions for the p4est library</h1>
+
+ <p>
+ <a href="http://www.p4est.org/" target="_top">p4est</a> is a
+ library that manages meshes that are distributed across multiple
+ processors. It forms the basis of deal.II's implementation of
+ finite element solvers that can use meshes that are too large to
+ be held on each processor individually.
+ </p>
+
+ <p>
+ You need to install p4est before deal.II. To do so, you can
+ download it
+ from <a href="http://users.ices.utexas.edu/~carsten/tmp/031/p4est-0.3.1.55-67fe1.tar.gz"
+ target="_top">here</a>, copy it to a fresh directory
+ into which you should also copy
+ the <a href="p4est-setup.sh">p4est-setup.sh script</a>. Then
+ call the script as follows:
+ <code>
+ <pre>
+ ./p4est-setup.sh p4est-x-y-z.tar.gz /path/to/installation
+ </pre>
+ </code>
+ where <code>p4est-x-y-z.tar.gz</code> is the name of the p4est
+ distribution file, and <code>/path/to/installation</code> is a
+ directory into which you want to install p4est.
+ </p>
+
+ <p>
+ After this, you need to configure and build deal.II using a line
+ like
+ <code>
+ <pre>
+ ./configure --with-p4est=/path/to/installation --enable-mpi
+ make all
+ </pre>
+ </code>
+ Obviously, you can also add additional flags
+ to <code>./configure</code> as described in the
+ general <a href="../readme.html">ReadMe file</a>.
+ </p>
+
+ <hr>
+
+ <address>
+ <a href="mail.html">The deal.II mailing list</a>
+ </address>
+ </body>
+</html>
processor cores. If this is not desired,
use <code>--disable-threads</code>.
+ <li>
+ <p>
+ <code>--enable-mpi</code>: If given this
+ flag, <code>./configure</code> chooses <code>mpiCC</code>
+ and <code>mpicc</code> as the C++ and C compilers, respectively,
+ unless the <code>$CXX</code> and <code>$CC</code> environment
+ variables have specifically been set to something else. If these
+ compilers exist and indeed support MPI, then this also switches
+ on support for MPI in the library.
+ </p>
+
<li>
<p>
<code>--with-cpu=...</code>: Enable specific optimization
flags for a particular processor. Programs compiled with these options
- might not execute on an other system, but will be faster by up to
- 30 percent on the CPU selected.
+ might not execute on an other system, but may be faster on the
+ particular CPU selected.
</p>
<p>
For a complete list of supported values of this switch, you may
take a look at the file <code>aclocal.m4</code> in the top-level
directory. However, the most commonly used value
- is <code>--with-cpu=native</code> indicating that the compiler
+ is <code>--with-cpu=native</code>, indicating that the compiler
should determine the CPU we are running on by itself and
optimize for it.
</p>
- <p>
- Optimizations currently only work in conjunction with gcc. Without the
- option <code>--with-cpu=xxx</code>, no cpu specific optimization is applied.
- Furthermore, CPU specific optimization is switched on only for the compilation
- of the optimized library versions.
- </p>
-
<li>
<p>
<code>--with-doxygen=...</code>: Select the specified executable
</p>
</dd>
+ <a name="p4est"></a>
+ <dt>p4est</dt>
+ <dd>
+ <p>
+ p4est is a library that <acronym>deal.II</acronym> uses to
+ distribute very large meshes across multiple processors (think
+ meshes with a billion cells on 10,000 processors). Using and
+ installing p4est is discussed <a href="external-libs/p4est.html"
+ target="body">here</a>.
+ To configure <acronym>deal.II</acronym> with p4est, you will
+ need to use the <code>--with-path=/path/to/path</code>
+ switch to the <code>./configure</code> script.
+ </p>
+ </dd>
+
<a name="blas"></a>
<dt>BLAS, LAPACK</dt>
<a name="mumps"></a>
<dt>MUltifrontal Massively Parallel sparse direct Solver (MUMPS)</dt>
<dd>
- It is possible to make use some subroutines that make use of the
+ It is possible to make use some subroutines that make use of the
<a href="http://mumps.enseeiht.fr"
- target="_top">MUltifrontal Massively Parallel sparse direct Solver
- (MUMPS)</a>. For a detailed description of how to compile MUMPS (and
+ target="_top">MUltifrontal Massively Parallel sparse direct Solver
+ (MUMPS)</a>. For a detailed description of how to compile MUMPS (and
some dependencies) and linking with deal.II, see
<a href="external-libs/mumps.html" target="body">this page</a>.
</dd>
<a name="ARPACK"></a>
<dt>ARPACK</dt>
<dd>
- There is a wrapper for the
+ There is a wrapper for the
<a href="http://www.caam.rice.edu/software/ARPACK/">ARPACK</a>
library, which has its own license; if you want to use it with deal.II,
please read it and make sure that you agree with it.
-step-17
+step-17 step-19
# shall be deleted when calling `make clean'. Object and backup files,
# executables and the like are removed anyway. Here, we give a list of
# files in the various output formats that deal.II supports.
-clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk
+clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk *d2
-step-31 step-17
+step-31 step-17 step-40
//TODO: - adjust stopping criteria for solvers
// - better refinement at the start?
// - check solver stability
+// - Q2 Mapping useful?
/* $Id$ */
#include <lac/full_matrix.h>
#include <lac/solver_bicgstab.h>
#include <lac/solver_cg.h>
+#include <lac/solver_gmres.h>
#include <lac/constraint_matrix.h>
#include <lac/block_sparsity_pattern.h>
#include <lac/trilinos_block_vector.h>
#include <lac/trilinos_sparse_matrix.h>
#include <lac/trilinos_block_sparse_matrix.h>
#include <lac/trilinos_precondition.h>
+#include <lac/trilinos_solver.h>
#include <grid/tria.h>
#include <grid/grid_generator.h>
// matrices.
#include <base/index_set.h>
+#include <distributed/dof_handler.h>
+#include <distributed/tria.h>
+#include <distributed/solution_transfer.h>
+#include <distributed/grid_refinement.h>
+
+#include "timeblock.h"
+
+
+
+ #include <iostream>
+ #include <sstream>
+ #include <string>
+ #include <stdexcept>
+
+class BadConversion : public std::runtime_error {
+ public:
+ BadConversion(std::string const& s)
+ : std::runtime_error(s)
+ { }
+};
+
+inline std::string stringify(double x)
+{
+ std::ostringstream o;
+ if (!(o << x))
+ throw BadConversion("stringify(double)");
+ return o.str();
+}
+
+
+
+#define CONF_LOG_SOLVER
+//#define CONF_COMPARE_SOLVER
+//#define CONF_BENCHMARK
- // Next, we import all deal.II
- // names into global namespace:
using namespace dealii;
+void print_it(Utilities::System::MinMaxAvg & result)
+{
+ std::cout// << "sum: " << result.sum
+ << " avg: " << (long)result.avg/1024
+ << " min: " << (long)result.min/1024 << " @" << result.min_index
+ << " max: " << (long)result.max/1024 << " @" << result.max_index
+ << std::endl;
+}
+
+void print_memory_stats()
+{
+ int myid = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ Utilities::System::MemoryStats stats;
+ Utilities::System::get_memory_stats(stats);
+ Utilities::System::MinMaxAvg r;
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmPeak, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmPeak: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmSize, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmSize: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmHWM, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmHWM: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmRSS, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmRSS: ";
+ print_it(r);
+ }
+}
+
+
+static int out_index=0;
// @sect3{Equation data}
// In the following namespace, we define the
const double h = R1-R0;
const double s = (r-R0)/h;
+// see http://www.wolframalpha.com/input/?i=plot+(sqrt(x^2%2By^2)*0.95%2B0.05*sin(6*atan2(x,y))),+x%3D-1+to+1,+y%3D-1+to+1
- return T1+(T0-T1)*((1-s)*(1-s));
+ double s_mod = s*0.95 + 0.05*sin(6.0*atan2(p(0),p(1)));
+//alternative: http://www.wolframalpha.com/input/?i=plot+atan((sqrt(x^2%2By^2)*0.95%2B0.05*sin(6*atan2(x,y))-0.5)*10)/pi%2B0.5,+x%3D-1+to+1,+y%3D-1+to+1
+// s_mod = atan((s_mod-0.5)*10.0)/dealii::numbers::PI+0.5;
+
+ return T1+(T0-T1)*(1.0-s_mod);
+
+ //old:
+// return T1+(T0-T1)*((1-s)*(1-s));
+// return T1+(T0-T1)*((1-s));
}
// name.
namespace LinearSolvers
{
+ template <class PreconditionerA, class PreconditionerMp>
+ class RightPrecond : public Subscriptor
+ {
+ public:
+ RightPrecond (
+ const TrilinosWrappers::BlockSparseMatrix &S,
+ const TrilinosWrappers::BlockSparseMatrix &Spre,
+ const PreconditionerMp &Mppreconditioner,
+ const PreconditionerA &Apreconditioner)
+ :
+ stokes_matrix (&S),
+ stokes_preconditioner_matrix (&Spre),
+ mp_preconditioner (Mppreconditioner),
+ a_preconditioner (Apreconditioner)
+ {}
+
+ void solve_S(TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ SolverControl cn(5000, 1e-5);//src.l2_norm()*1e-5);
+
+ TrilinosWrappers::SolverBicgstab solver(cn);
+
+ solver.solve(stokes_preconditioner_matrix->block(1,1),
+ dst, src,
+ mp_preconditioner);
+
+#ifdef CONF_LOG_SOLVER
+ TrilinosWrappers::MPI::Vector res(src);
+ double resid =
+ stokes_preconditioner_matrix->block(1,1).residual(res,dst,src);
+
+ double srcl2 = src.l2_norm();
+#endif
+ dst*=-1.0;
+
+#ifdef CONF_LOG_SOLVER
+ int myid=Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ if (myid==0)
+ {
+ std::cout << " solve_S it=" << cn.last_step()
+ << " startres=" << stringify(srcl2)
+ << " res=" << stringify(resid)
+ << " reduction=" << stringify(resid / srcl2)
+ << std::endl;
+
+ }
+#endif
+
+ }
+
+ void solve_A(TrilinosWrappers::MPI::Vector &dst,
+ const TrilinosWrappers::MPI::Vector &src) const
+ {
+ SolverControl cn(5000, src.l2_norm()*1e-4);
+ TrilinosWrappers::SolverBicgstab solver(cn);
+ solver.solve(stokes_matrix->block(0,0), dst, src, a_preconditioner);
+
+#ifdef CONF_LOG_SOLVER
+ double srcl2 = src.l2_norm();
+ TrilinosWrappers::MPI::Vector res(src);
+ double resid = stokes_matrix->block(0,0).residual(res,dst,src);
+
+ int myid=Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ if (myid==0)
+ {
+ std::cout << " solve_A it=" << cn.last_step()
+ << " startres=" << stringify(srcl2)
+ << " res=" << stringify(resid)
+ << " reduction=" << stringify(resid / srcl2)
+ << std::endl;
+
+ }
+#endif
+ }
+
+ void vmult (TrilinosWrappers::MPI::BlockVector &dst,
+ const TrilinosWrappers::MPI::BlockVector &src) const
+ {
+ TrilinosWrappers::MPI::Vector utmp(src.block(0));
+
+ solve_S(dst.block(1), src.block(1));
+
+ stokes_matrix->block(0,1).vmult(utmp, dst.block(1)); //B^T
+ utmp*=-1.0;
+ utmp.add(src.block(0));
+
+ solve_A(dst.block(0), utmp);
+ }
+
+ private:
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_matrix;
+ const SmartPointer<const TrilinosWrappers::BlockSparseMatrix> stokes_preconditioner_matrix;
+ const PreconditionerMp &mp_preconditioner;
+ const PreconditionerA &a_preconditioner;
+ };
+
template <class PreconditionerA, class PreconditionerMp>
class BlockSchurPreconditioner : public Subscriptor
{
{
public:
BoussinesqFlowProblem ();
- void run ();
+ void run (unsigned int ref);
private:
void setup_dofs ();
ConditionalOStream pcout;
- Triangulation<dim> triangulation;
+ parallel::distributed::Triangulation<dim> triangulation;
double global_Omega_diameter;
const unsigned int stokes_degree;
FESystem<dim> stokes_fe;
- DoFHandler<dim> stokes_dof_handler;
+ DoFHandler<dim> stokes_dof_handler;
ConstraintMatrix stokes_constraints;
TrilinosWrappers::BlockSparseMatrix stokes_matrix;
TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
- TrilinosWrappers::BlockVector stokes_solution;
- TrilinosWrappers::BlockVector old_stokes_solution;
+ TrilinosWrappers::MPI::BlockVector stokes_solution;
+ TrilinosWrappers::MPI::BlockVector old_stokes_solution;
TrilinosWrappers::MPI::BlockVector stokes_rhs;
const unsigned int temperature_degree;
FE_Q<dim> temperature_fe;
- DoFHandler<dim> temperature_dof_handler;
+ DoFHandler<dim> temperature_dof_handler;
ConstraintMatrix temperature_constraints;
TrilinosWrappers::SparseMatrix temperature_mass_matrix;
TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
TrilinosWrappers::SparseMatrix temperature_matrix;
- TrilinosWrappers::Vector temperature_solution;
- TrilinosWrappers::Vector old_temperature_solution;
- TrilinosWrappers::Vector old_old_temperature_solution;
+ TrilinosWrappers::MPI::Vector temperature_solution;
+ TrilinosWrappers::MPI::Vector old_temperature_solution;
+ TrilinosWrappers::MPI::Vector old_old_temperature_solution;
TrilinosWrappers::MPI::Vector temperature_rhs;
get_this_mpi_process(MPI_COMM_WORLD)
== 0)),
- triangulation (Triangulation<dim>::maximum_smoothing),
+ triangulation (MPI_COMM_WORLD,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement | Triangulation<dim>::smoothing_on_coarsening
+ )
+ ),
stokes_degree (1),
stokes_fe (FE_Q<dim>(stokes_degree+1), dim,
cg.solve (temperature_mass_matrix, solution, rhs, preconditioner_mass);
- old_temperature_solution = solution;
- temperature_constraints.distribute (old_temperature_solution);
+ temperature_constraints.distribute (solution);
+// old_temperature_solution = solution;
+ old_temperature_solution.reinit(solution, false, true);
}
else
coupling[c][d] = DoFTools::none;
- DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
+ DoFTools::make_sparsity_pattern (static_cast<const DoFHandler<dim>&>(stokes_dof_handler),
+ coupling, sp,
stokes_constraints, false,
Utilities::System::
get_this_mpi_process(MPI_COMM_WORLD));
else
coupling[c][d] = DoFTools::none;
- DoFTools::make_sparsity_pattern (stokes_dof_handler, coupling, sp,
+ DoFTools::make_sparsity_pattern (static_cast<const DoFHandler<dim>&>(stokes_dof_handler),
+ coupling, sp,
stokes_constraints, false,
Utilities::System::
get_this_mpi_process(MPI_COMM_WORLD));
TrilinosWrappers::SparsityPattern sp (temperature_partitioner,
MPI_COMM_WORLD);
- DoFTools::make_sparsity_pattern (temperature_dof_handler, sp,
+ DoFTools::make_sparsity_pattern (static_cast<const DoFHandler<dim>&>(temperature_dof_handler), sp,
temperature_constraints, false,
Utilities::System::
get_this_mpi_process(MPI_COMM_WORLD));
void BoussinesqFlowProblem<dim>::setup_dofs ()
{
computing_timer.enter_section("Setup dof systems");
+ TimeBlock<ConditionalOStream> t(pcout, "**dof_setup", false);
+
std::vector<unsigned int> stokes_sub_blocks (dim+1,0);
stokes_sub_blocks[dim] = 1;
-
- GridTools::partition_triangulation (Utilities::System::
- get_n_mpi_processes(MPI_COMM_WORLD),
- triangulation);
-
{
+ TimeBlock<ConditionalOStream> t(pcout, "***dof_distribute");
+
stokes_dof_handler.distribute_dofs (stokes_fe);
- DoFRenumbering::subdomain_wise (stokes_dof_handler);
DoFRenumbering::component_wise (stokes_dof_handler, stokes_sub_blocks);
- stokes_constraints.clear ();
- DoFTools::make_hanging_node_constraints (stokes_dof_handler,
- stokes_constraints);
-
- std::vector<bool> velocity_mask (dim+1, true);
- velocity_mask[dim] = false;
- VectorTools::interpolate_boundary_values (stokes_dof_handler,
- 0,
- ZeroFunction<dim>(dim+1),
- stokes_constraints,
- velocity_mask);
-
- std::set<unsigned char> no_normal_flux_boundaries;
- no_normal_flux_boundaries.insert (1);
- VectorTools::compute_no_normal_flux_constraints (stokes_dof_handler, 0,
- no_normal_flux_boundaries,
- stokes_constraints);
- stokes_constraints.close ();
- }
- {
temperature_dof_handler.distribute_dofs (temperature_fe);
- DoFRenumbering::subdomain_wise (temperature_dof_handler);
-
- temperature_constraints.clear ();
- VectorTools::interpolate_boundary_values (temperature_dof_handler,
- 0,
- EquationData::TemperatureInitialValues<dim>(),
- temperature_constraints);
- VectorTools::interpolate_boundary_values (temperature_dof_handler,
- 1,
- EquationData::TemperatureInitialValues<dim>(),
- temperature_constraints);
- DoFTools::make_hanging_node_constraints (temperature_dof_handler,
- temperature_constraints);
- temperature_constraints.close ();
}
+
std::vector<unsigned int> stokes_dofs_per_block (2);
DoFTools::count_dofs_per_block (stokes_dof_handler, stokes_dofs_per_block,
stokes_sub_blocks);
n_T = temperature_dof_handler.n_dofs();
pcout << "Number of active cells: "
- << triangulation.n_active_cells()
+ << triangulation.n_global_active_cells()
<< " (on "
<< triangulation.n_levels()
<< " levels)"
<< std::endl
<< std::endl;
- std::vector<IndexSet> stokes_partitioning;
- IndexSet temperature_partitioning (n_T);
+
+
+ std::vector<IndexSet> stokes_partitioning, stokes_relevant_partitioning;
+ IndexSet temperature_partitioning (n_T), temperature_relevant_partitioning (n_T);
+ IndexSet stokes_relevant_set;
{
+ TimeBlock<ConditionalOStream> t(pcout, "***index_sets");
const unsigned int my_id =
Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
- IndexSet stokes_index_set =
- DoFTools::dof_indices_with_subdomain_association(stokes_dof_handler,
- my_id);
+ IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
stokes_partitioning.push_back(stokes_index_set.get_view(0,n_u));
stokes_partitioning.push_back(stokes_index_set.get_view(n_u,n_u+n_p));
- temperature_partitioning =
- DoFTools::dof_indices_with_subdomain_association(temperature_dof_handler,
- my_id);
+ DoFTools::extract_locally_relevant_dofs (stokes_dof_handler,
+ stokes_relevant_set);
+ stokes_relevant_partitioning.push_back(stokes_relevant_set.get_view(0,n_u));
+ stokes_relevant_partitioning.push_back(stokes_relevant_set.get_view(n_u,n_u+n_p));
+
+ temperature_partitioning = temperature_dof_handler.locally_owned_dofs();
+ DoFTools::extract_locally_relevant_dofs (temperature_dof_handler,
+ temperature_relevant_partitioning);
+ }
+
+ {
+
+ stokes_constraints.clear ();
+// IndexSet stokes_la;
+// DoFTools::extract_locally_active_dofs (stokes_dof_handler,
+// stokes_la);
+ stokes_constraints.reinit(stokes_relevant_set);
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "***make_hanging_nodes_vel");
+
+ DoFTools::make_hanging_node_constraints (static_cast<const DoFHandler<dim>&>(stokes_dof_handler),
+ stokes_constraints);
+ }
+
+ TimeBlock<ConditionalOStream> t(pcout, "***boundary_values_vel");
+
+ std::vector<bool> velocity_mask (dim+1, true);
+ velocity_mask[dim] = false;
+ VectorTools::interpolate_boundary_values (static_cast<const DoFHandler<dim>&>(stokes_dof_handler),
+ 0,
+ ZeroFunction<dim>(dim+1),
+ stokes_constraints,
+ velocity_mask);
+
+ std::set<unsigned char> no_normal_flux_boundaries;
+ no_normal_flux_boundaries.insert (1);
+ VectorTools::compute_no_normal_flux_constraints (static_cast<const DoFHandler<dim>&>(stokes_dof_handler), 0,
+ no_normal_flux_boundaries,
+ stokes_constraints);
+ stokes_constraints.close ();
+ }
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "***hanging_nodes_and_bv_temperature");
+ temperature_constraints.clear ();
+// IndexSet temp_locally_active;
+// DoFTools::extract_locally_active_dofs (temperature_dof_handler,
+// temp_locally_active);
+ temperature_constraints.reinit(temperature_relevant_partitioning);//temp_locally_active);
+
+ VectorTools::interpolate_boundary_values (static_cast<const DoFHandler<dim>&>(temperature_dof_handler),
+ 0,
+ EquationData::TemperatureInitialValues<dim>(),
+ temperature_constraints);
+ VectorTools::interpolate_boundary_values (static_cast<const DoFHandler<dim>&>(temperature_dof_handler),
+ 1,
+ EquationData::TemperatureInitialValues<dim>(),
+ temperature_constraints);
+ DoFTools::make_hanging_node_constraints (static_cast<const DoFHandler<dim>&>(temperature_dof_handler),
+ temperature_constraints);
+ temperature_constraints.close ();
}
if (Utilities::System::job_supports_mpi() == false)
}
else
{
+ TimeBlock<ConditionalOStream> t(pcout, "***setup_stokes_matrix");
setup_stokes_matrix (stokes_partitioning);
+ t.close();
+
+ TimeBlock<ConditionalOStream> t2(pcout, "***setup_stokes_preconditioner");
setup_stokes_preconditioner (stokes_partitioning);
+ t2.close();
+
+ TimeBlock<ConditionalOStream> t3(pcout, "***setup_temperature_matrix");
setup_temperature_matrices (temperature_partitioning);
}
+ TimeBlock<ConditionalOStream> t2(pcout, "***init_vectors");
+
stokes_rhs.reinit (stokes_partitioning, MPI_COMM_WORLD);
- stokes_solution.reinit (stokes_rhs);
+ stokes_solution.reinit (stokes_relevant_partitioning, MPI_COMM_WORLD);
old_stokes_solution.reinit (stokes_solution);
temperature_rhs.reinit (temperature_partitioning, MPI_COMM_WORLD);
- temperature_solution.reinit (temperature_rhs);
+ temperature_solution.reinit (temperature_relevant_partitioning, MPI_COMM_WORLD);
old_temperature_solution.reinit (temperature_solution);
old_old_temperature_solution.reinit (temperature_solution);
(1./EquationData::eta) *
EquationData::pressure_scaling *
EquationData::pressure_scaling *
- scratch.phi_p[i] * scratch.phi_p[j])
+ (scratch.phi_p[i] * scratch.phi_p[j]))
* scratch.stokes_fe_values.JxW(q);
}
}
void
BoussinesqFlowProblem<dim>::build_stokes_preconditioner ()
{
+ TimeBlock<ConditionalOStream> t(pcout, "*build_stokes_preconditioner");
+
if (rebuild_stokes_preconditioner == false)
return;
- computing_timer.enter_section (" Build Stokes preconditioner");
- pcout << " Rebuilding Stokes preconditioner..." << std::flush;
+// computing_timer.enter_section (" Build Stokes preconditioner");
+// pcout << " Rebuilding Stokes preconditioner..." << std::flush;
assemble_stokes_preconditioner ();
Amg_data.elliptic = true;
Amg_data.higher_order_elements = true;
Amg_data.smoother_sweeps = 2;
- Amg_data.aggregation_threshold = 0.02;
+// Amg_data.aggregation_threshold = 0.02;
Mp_preconditioner->initialize (stokes_preconditioner_matrix.block(1,1));
Amg_preconditioner->initialize (stokes_preconditioner_matrix.block(0,0),
rebuild_stokes_preconditioner = false;
- pcout << std::endl;
- computing_timer.exit_section();
+// pcout << std::endl;
+// computing_timer.exit_section();
}
// @sect5{Stokes system assembly}
template <int dim>
void BoussinesqFlowProblem<dim>::assemble_stokes_system ()
{
- pcout << " Assembling..." << std::flush;
+ TimeBlock<ConditionalOStream> t(pcout, "*assemble_stokes");
- computing_timer.enter_section (" Assemble Stokes system");
+// computing_timer.enter_section (" Assemble Stokes system");
if (rebuild_stokes_matrix == true)
stokes_matrix=0;
rebuild_stokes_matrix = false;
- pcout << std::endl;
- computing_timer.exit_section();
+// pcout << std::endl;
+// computing_timer.exit_section();
}
template <int dim>
void BoussinesqFlowProblem<dim>::assemble_temperature_matrix ()
{
+ TimeBlock<ConditionalOStream> t(pcout, "*assemble_temp_matrix");
if (rebuild_temperature_matrices == false)
return;
- computing_timer.enter_section (" Assemble temperature matrices");
+// computing_timer.enter_section (" Assemble temperature matrices");
temperature_mass_matrix = 0;
temperature_stiffness_matrix = 0;
rebuild_temperature_matrices = false;
rebuild_temperature_preconditioner = true;
- computing_timer.exit_section();
+// computing_timer.exit_section();
}
template <int dim>
void BoussinesqFlowProblem<dim>::solve ()
{
- computing_timer.enter_section (" Solve Stokes system");
- pcout << " Solving..." << std::endl;
+// computing_timer.enter_section (" Solve Stokes system");
{
+ TimeBlock<ConditionalOStream> t(pcout, "*solve_stokes", false);
+
const LinearSolvers::BlockSchurPreconditioner<TrilinosWrappers::PreconditionAMG,
TrilinosWrappers::PreconditionILU>
preconditioner (stokes_matrix, *Mp_preconditioner, *Amg_preconditioner);
+ const LinearSolvers::RightPrecond<TrilinosWrappers::PreconditionAMG,
+ TrilinosWrappers::PreconditionILU>
+ preconditioner_right (stokes_matrix, stokes_preconditioner_matrix,
+ *Mp_preconditioner, *Amg_preconditioner);
+
TrilinosWrappers::MPI::BlockVector
distributed_stokes_solution (stokes_rhs);
- distributed_stokes_solution = stokes_solution;
+// distributed_stokes_solution = stokes_solution;
+ distributed_stokes_solution.block(0).reinit(stokes_solution.block(0),false,true);
+ distributed_stokes_solution.block(1).reinit(stokes_solution.block(1),false,true);
+
const unsigned int
start = (distributed_stokes_solution.block(0).size() +
if (stokes_constraints.is_constrained (i))
distributed_stokes_solution(i) = 0;
- SolverControl solver_control (stokes_matrix.m(), 1e-22*stokes_rhs.l2_norm());
- SolverBicgstab<TrilinosWrappers::MPI::BlockVector>
- bicgstab (solver_control, false);
+#ifdef CONF_COMPARE_SOLVER
+ {
+ TrilinosWrappers::MPI::BlockVector x(distributed_stokes_solution);
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "*TMP: solve bicg", false);
+
+ PrimitiveVectorMemory< TrilinosWrappers::MPI::BlockVector > mem;
+ SolverControl solver_control (stokes_matrix.m(), 1e-24*stokes_rhs.l2_norm());
+ SolverBicgstab<TrilinosWrappers::MPI::BlockVector>
+ solver (solver_control, mem, false);
+ solver.solve(stokes_matrix, x, stokes_rhs,
+ preconditioner);
+
+ pcout << " xx BICG: "
+ << solver_control.last_step()
+ << " iterations,"
+ << " reduced res by " << stringify(solver_control.last_value()/solver_control.initial_value())
+ << std::endl;
+ }
+ {
+ TrilinosWrappers::MPI::BlockVector
+ res (stokes_rhs);
+ double residual = stokes_matrix.residual(res, x, stokes_rhs);
- bicgstab.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs,
- preconditioner);
+ pcout << " x BICG: real reduced res by " << stringify(residual
+ /stokes_rhs.l2_norm())
+ << std::endl;
+ }
+ }
+#endif
+
+ SolverControl solver_control (stokes_matrix.m(), 1e-7*stokes_rhs.l2_norm());
+ {
+#ifdef CONF_COMPARE_SOLVER
+ TimeBlock<ConditionalOStream> t(pcout, "*TMP: solve fgmres", false);
+#endif
+ PrimitiveVectorMemory< TrilinosWrappers::MPI::BlockVector > mem;
+ SolverFGMRES<TrilinosWrappers::MPI::BlockVector>
+ solver(solver_control, mem,
+ SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(50, true));
+ solver.solve(stokes_matrix, distributed_stokes_solution, stokes_rhs,
+ preconditioner_right);
+
+ pcout << " xx FGMRES: "
+ << solver_control.last_step()
+ << " iterations,"
+ << " reduced res by " << stringify(solver_control.last_value()/solver_control.initial_value())
+ << std::endl;
+ }
+ {
+ TrilinosWrappers::MPI::BlockVector
+ res (stokes_rhs);
+ double residual = stokes_matrix.residual(res, distributed_stokes_solution, stokes_rhs);
+
+ pcout << " x FGMRES: real reduced res by " << stringify(residual
+ /stokes_rhs.l2_norm())
+ << std::endl;
+ }
+
+ stokes_constraints.distribute (distributed_stokes_solution);
+ //stokes_solution = distributed_stokes_solution;
+ stokes_solution.block(0).reinit(distributed_stokes_solution.block(0), false, true);
+ stokes_solution.block(1).reinit(distributed_stokes_solution.block(1), false, true);
- stokes_solution = distributed_stokes_solution;
pcout << " "
<< solver_control.last_step()
- << " BiCGStab iterations for Stokes subsystem."
+ << " iterations for Stokes subsystem."
+ << " reduced res by " << stringify(solver_control.last_value()/solver_control.initial_value())
<< std::endl;
- stokes_constraints.distribute (stokes_solution);
+
}
- computing_timer.exit_section();
+// computing_timer.exit_section();
- computing_timer.enter_section (" Assemble temperature rhs");
+// computing_timer.enter_section (" Assemble temperature rhs");
old_time_step = time_step;
const double maximal_velocity = get_maximal_velocity();
- if (maximal_velocity > 1e-10)
- time_step = 1./(1.6*dim*std::sqrt(1.*dim)) /
- temperature_degree *
- GridTools::minimal_cell_diameter(triangulation) /
- maximal_velocity;
- else
- time_step = 1./(1.6*dim*std::sqrt(1.*dim)) /
- temperature_degree *
- GridTools::minimal_cell_diameter(triangulation) /
- 1e-10;
+ double local_time_step = 1./(1.6*dim*std::sqrt(1.*dim)) /
+ temperature_degree *
+ GridTools::minimal_cell_diameter(triangulation) /
+ std::max(1e-10,maximal_velocity);
+
+ // calculate the minimum allowed time step
+ // size
+ MPI_Allreduce (&local_time_step, &time_step, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
pcout << " Maximal velocity: "
<< maximal_velocity * EquationData::year_in_seconds * 100
<< std::endl;
temperature_solution = old_temperature_solution;
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "*assemble_temp");
- assemble_temperature_system (maximal_velocity);
+ assemble_temperature_system (maximal_velocity);
+ }
- computing_timer.exit_section ();
- computing_timer.enter_section (" Solve temperature system");
+// computing_timer.exit_section ();
+
+// computing_timer.enter_section (" Solve temperature system");
{
+ TimeBlock<ConditionalOStream> t(pcout, "*solve_temp", false);
SolverControl solver_control (temperature_matrix.m(),
1e-12*temperature_rhs.l2_norm());
SolverCG<TrilinosWrappers::MPI::Vector> cg (solver_control);
TrilinosWrappers::MPI::Vector
distributed_temperature_solution (temperature_rhs);
- distributed_temperature_solution = temperature_solution;
+// distributed_temperature_solution = temperature_solution;
+ distributed_temperature_solution.reinit(temperature_solution, false, true);
cg.solve (temperature_matrix, distributed_temperature_solution,
temperature_rhs, *T_preconditioner);
- temperature_solution = distributed_temperature_solution;
- temperature_constraints.distribute (temperature_solution);
+ temperature_constraints.distribute (distributed_temperature_solution);
+// temperature_solution = distributed_temperature_solution;
+ temperature_solution.reinit(distributed_temperature_solution, false, true);
pcout << " "
<< solver_control.last_step()
<< " CG iterations for temperature" << std::endl;
- computing_timer.exit_section();
+// computing_timer.exit_section();
- double min_temperature = temperature_solution(0),
- max_temperature = temperature_solution(0);
- for (unsigned int i=1; i<temperature_solution.size(); ++i)
+ // extract temperature range
+ std::vector<double> temperature (2), global_temperature (2);
+ temperature[0] = distributed_temperature_solution.trilinos_vector()[0][0],
+ temperature[1] = temperature[0];
+ for (unsigned int i=1; i<distributed_temperature_solution.local_size(); ++i)
{
- min_temperature = std::min<double> (min_temperature,
- temperature_solution(i));
- max_temperature = std::max<double> (max_temperature,
- temperature_solution(i));
+ temperature[0] = std::min<double> (temperature[0],
+ distributed_temperature_solution.trilinos_vector()[0][i]);
+ temperature[1] = std::max<double> (temperature[1],
+ distributed_temperature_solution.trilinos_vector()[0][i]);
}
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ temperature[0] *= -1.0;
+ MPI_Allreduce (&temperature[0], &global_temperature[0],
+ 2, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
+ global_temperature[0] *= -1.0;
+#else
+ global_temperature = local_temperature;
+#endif
pcout << " Temperature range: "
- << min_temperature << ' ' << max_temperature
+ << global_temperature[0] << ' ' << global_temperature[1]
<< std::endl;
}
}
+
// @sect4{BoussinesqFlowProblem::output_results}
// This function does mostly what the
template <int dim>
void BoussinesqFlowProblem<dim>::output_results ()
{
- if (timestep_number % 25 != 0)
- return;
+ pcout << "outputting " << out_index << std::endl;
computing_timer.enter_section ("Postprocessing");
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+
+ KellyErrorEstimator<dim>::estimate (static_cast<const DoFHandler<dim>&>(temperature_dof_handler),
+ QGauss<dim-1>(temperature_degree+1),
+ typename FunctionMap<dim>::type(),
+ temperature_solution,
+ estimated_error_per_cell);
+
+ const FESystem<dim> joint_fe (stokes_fe, 1,
+ temperature_fe, 1,
+ FE_DGQ<dim>(0), 1,
+ FE_DGQ<dim>(0), 1,
+ temperature_fe, 1);
+
+ DoFHandler<dim> joint_dof_handler (triangulation);
+ joint_dof_handler.distribute_dofs (joint_fe);
+ Assert (joint_dof_handler.n_global_dofs() ==
+ stokes_dof_handler.n_global_dofs() +
+ temperature_dof_handler.n_global_dofs() +
+ 2*triangulation.n_global_active_cells() +
+ temperature_dof_handler.n_global_dofs(),
+ ExcInternalError());
+
+ TrilinosWrappers::MPI::Vector joint_solution;
+ IndexSet locally_relevant_dofs(joint_dof_handler.n_dofs());
+ DoFTools::extract_locally_relevant_dofs (joint_dof_handler, locally_relevant_dofs);
+ joint_solution.reinit (locally_relevant_dofs, MPI_COMM_WORLD);
- if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0)
- {
-
- const FESystem<dim> joint_fe (stokes_fe, 1,
- temperature_fe, 1,
- FE_DGQ<dim>(0), 1);
- DoFHandler<dim> joint_dof_handler (triangulation);
- joint_dof_handler.distribute_dofs (joint_fe);
- Assert (joint_dof_handler.n_dofs() ==
- stokes_dof_handler.n_dofs() +
- temperature_dof_handler.n_dofs() +
- triangulation.n_active_cells(),
- ExcInternalError());
-
- Vector<double> joint_solution (joint_dof_handler.n_dofs());
-
- {
- double minimal_pressure = stokes_solution.block(1)(0);
- for (unsigned int i=0; i<stokes_solution.block(1).size(); ++i)
- minimal_pressure = std::min<double> (stokes_solution.block(1)(i),
- minimal_pressure);
-
- std::vector<unsigned int> local_joint_dof_indices (joint_fe.dofs_per_cell);
- std::vector<unsigned int> local_stokes_dof_indices (stokes_fe.dofs_per_cell);
- std::vector<unsigned int> local_temperature_dof_indices (temperature_fe.dofs_per_cell);
-
- typename DoFHandler<dim>::active_cell_iterator
- joint_cell = joint_dof_handler.begin_active(),
- joint_endc = joint_dof_handler.end(),
- stokes_cell = stokes_dof_handler.begin_active(),
- temperature_cell = temperature_dof_handler.begin_active();
- for (; joint_cell!=joint_endc; ++joint_cell, ++stokes_cell, ++temperature_cell)
- {
- joint_cell->get_dof_indices (local_joint_dof_indices);
- stokes_cell->get_dof_indices (local_stokes_dof_indices);
- temperature_cell->get_dof_indices (local_temperature_dof_indices);
+ {
+ //double minimal_pressure = stokes_solution.block(1)(0);
+ //for (unsigned int i=0; i<stokes_solution.block(1).size(); ++i)
+ // minimal_pressure = std::min<double> (stokes_solution.block(1)(i),
+ // minimal_pressure);
+
+ std::vector<unsigned int> local_joint_dof_indices (joint_fe.dofs_per_cell);
+ std::vector<unsigned int> local_stokes_dof_indices (stokes_fe.dofs_per_cell);
+ std::vector<unsigned int> local_temperature_dof_indices (temperature_fe.dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ joint_cell = joint_dof_handler.begin_active(),
+ joint_endc = joint_dof_handler.end(),
+ stokes_cell = stokes_dof_handler.begin_active(),
+ temperature_cell = temperature_dof_handler.begin_active();
+ for (unsigned int cell_index=0; joint_cell!=joint_endc; ++joint_cell, ++stokes_cell, ++temperature_cell, ++cell_index)
+ if (!joint_cell->is_artificial() && !joint_cell->is_ghost())
+ {
+ joint_cell->get_dof_indices (local_joint_dof_indices);
+ stokes_cell->get_dof_indices (local_stokes_dof_indices);
+ temperature_cell->get_dof_indices (local_temperature_dof_indices);
- for (unsigned int i=0; i<joint_fe.dofs_per_cell; ++i)
+ for (unsigned int i=0; i<joint_fe.dofs_per_cell; ++i)
+ {
if (joint_fe.system_to_base_index(i).first.first == 0)
{
Assert (joint_fe.system_to_base_index(i).second
joint_solution(local_joint_dof_indices[i])
= ((stokes_solution(local_stokes_dof_indices
[joint_fe.system_to_base_index(i).second])
- -
- minimal_pressure)
+ )
*
EquationData::pressure_scaling);
}
}
- else if (joint_fe.system_to_base_index(i).first.first == 1)
- {
- Assert (joint_fe.system_to_base_index(i).second
- <
- local_temperature_dof_indices.size(),
- ExcInternalError());
- joint_solution(local_joint_dof_indices[i])
- = temperature_solution(local_temperature_dof_indices
- [joint_fe.system_to_base_index(i).second]);
- }
+ else if (joint_fe.system_to_base_index(i).first.first == 1)
+ {
+ Assert (joint_fe.system_to_base_index(i).second
+ <
+ local_temperature_dof_indices.size(),
+ ExcInternalError());
+ joint_solution(local_joint_dof_indices[i])
+ = temperature_solution(local_temperature_dof_indices
+ [joint_fe.system_to_base_index(i).second]);
+ }
+ else if (joint_fe.system_to_base_index(i).first.first == 2)
+ {
+ Assert (joint_fe.system_to_base_index(i).second
+ == 0,
+ ExcInternalError());
+ joint_solution(local_joint_dof_indices[i])
+ = joint_cell->subdomain_id();
+ }
+ else if (joint_fe.system_to_base_index(i).first.first == 3)
+ {
+ Assert (joint_fe.system_to_base_index(i).first.first == 3,
+ ExcInternalError());
+ Assert (joint_fe.system_to_base_index(i).second
+ == 0,
+ ExcInternalError());
+ joint_solution(local_joint_dof_indices[i])
+ = estimated_error_per_cell (cell_index);
+ }
else
{
- Assert (joint_fe.system_to_base_index(i).first.first == 2,
- ExcInternalError());
- Assert (joint_fe.system_to_base_index(i).second
- == 0,
- ExcInternalError());
- joint_solution(local_joint_dof_indices[i])
- = joint_cell->subdomain_id();
- }
- }
- }
+ Assert (joint_fe.system_to_base_index(i).first.first == 4,
+ ExcInternalError());
- std::vector<std::string> joint_solution_names (dim, "velocity");
- joint_solution_names.push_back ("p");
- joint_solution_names.push_back ("T");
- joint_solution_names.push_back ("partition");
+ joint_solution(local_joint_dof_indices[i])
+ = local_temperature_dof_indices
+ [joint_fe.system_to_base_index(i).second];
- DataOut<dim> data_out;
-
- data_out.attach_dof_handler (joint_dof_handler);
-
- std::vector<DataComponentInterpretation::DataComponentInterpretation>
- data_component_interpretation
- (dim+3, DataComponentInterpretation::component_is_scalar);
- for (unsigned int i=0; i<dim; ++i)
- data_component_interpretation[i]
- = DataComponentInterpretation::component_is_part_of_vector;
+ }
- data_out.add_data_vector (joint_solution, joint_solution_names,
- DataOut<dim>::type_dof_data,
- data_component_interpretation);
- data_out.build_patches (std::min(stokes_degree, temperature_degree));
+ }
+ }
+ }
- std::ostringstream filename;
- filename << "solution-" << Utilities::int_to_string(timestep_number, 5)
- << ".vtk";
+ std::vector<std::string> joint_solution_names (dim, "velocity");
+ joint_solution_names.push_back ("p");
+ joint_solution_names.push_back ("T");
+ joint_solution_names.push_back ("partition");
+ joint_solution_names.push_back ("error");
+ joint_solution_names.push_back ("TDofIndex");
+
+ DataOut<dim> data_out;
+
+ data_out.attach_dof_handler (joint_dof_handler);
+
+ std::vector<DataComponentInterpretation::DataComponentInterpretation>
+ data_component_interpretation
+ (dim+5, DataComponentInterpretation::component_is_scalar);
+ for (unsigned int i=0; i<dim; ++i)
+ data_component_interpretation[i]
+ = DataComponentInterpretation::component_is_part_of_vector;
+
+ data_out.add_data_vector (joint_solution, joint_solution_names,
+ DataOut<dim>::type_dof_data,
+ data_component_interpretation);
+ data_out.build_patches (1+std::min(stokes_degree, temperature_degree));
+
+ const std::string filename = ("out/solution-" +
+ Utilities::int_to_string (out_index, 5) +
+ "." +
+ Utilities::int_to_string
+ (triangulation.locally_owned_subdomain(), 4) +
+ ".vtu");
+ std::ofstream output (filename.c_str());
+ data_out.write_vtu (output);
- std::ofstream output (filename.str().c_str());
- data_out.write_vtk (output);
+ if (Utilities::System::get_this_mpi_process(MPI_COMM_WORLD) == 0)
+ {
+ std::vector<std::string> filenames;
+ for (unsigned int i=0; i<Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD); ++i)
+ filenames.push_back (std::string("solution-") +
+ Utilities::int_to_string (out_index, 5) +
+ "." +
+ Utilities::int_to_string(i, 4) +
+ ".vtu");
+ const std::string
+ master_filename = ("out/solution-" +
+ Utilities::int_to_string (out_index, 5) +
+ ".pvtu");
+ std::ofstream master (master_filename.c_str());
+ data_out.write_pvtu_record (master, filenames);
}
computing_timer.exit_section ();
+ out_index++;
}
template <int dim>
void BoussinesqFlowProblem<dim>::refine_mesh (const unsigned int max_grid_level)
{
+ TimeBlock<ConditionalOStream> t(pcout, "*refine_mesh", false);
+
computing_timer.enter_section ("Refine mesh structure, part 1");
- Vector<float> local_estimated_error_per_cell (triangulation.n_active_cells());
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "**kelly");
- KellyErrorEstimator<dim>::estimate (temperature_dof_handler,
+ KellyErrorEstimator<dim>::estimate (static_cast<const DoFHandler<dim>&>(temperature_dof_handler),
QGauss<dim-1>(temperature_degree+1),
typename FunctionMap<dim>::type(),
temperature_solution,
- local_estimated_error_per_cell,
+ estimated_error_per_cell,
std::vector<bool>(),
0,
0,
- Utilities::System::get_this_mpi_process(MPI_COMM_WORLD));
+ triangulation.locally_owned_subdomain());
+
+ }
+
+
+#ifdef CONF_BENCHMARK
+ /* pure refinement to double the number of
+ cells in benchmark mode if this is
+ prerefinement. Keep the number of cells
+ constant later on. */
+ if (timestep_number==0)
+ {
+ pcout << "refine_mesh: prerefine, factor 2." << std::endl;
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3333, 0.0);
+ }
+ else
+ {
+ pcout << "refine_mesh: keeping mesh constant." << std::endl;
+ TimeBlock<ConditionalOStream> t(pcout, "**refine&coarsen_fixed_nb");
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3333, 0.0);//0.1, 0.4 ?
+ }
- Vector<double> x_local_estimated_error_per_cell (triangulation.n_active_cells());
- x_local_estimated_error_per_cell = local_estimated_error_per_cell;
- Vector<double> estimated_error_per_cell (triangulation.n_active_cells());
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
- MPI_Allreduce (&x_local_estimated_error_per_cell(0),
- &estimated_error_per_cell(0),
- triangulation.n_active_cells(), MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
#else
- estimated_error_per_cell = x_local_estimated_error_per_cell;
-#endif
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_fraction (triangulation,
+ estimated_error_per_cell,
+ 0.6, 0.2);
- GridRefinement::refine_and_coarsen_fixed_fraction (triangulation,
- estimated_error_per_cell,
- 0.6, 0.2);
+ // limit maximum refinement level
if (triangulation.n_levels() > max_grid_level)
for (typename Triangulation<dim>::active_cell_iterator
cell = triangulation.begin_active(max_grid_level);
cell != triangulation.end(); ++cell)
cell->clear_refine_flag ();
+#endif
- std::vector<TrilinosWrappers::Vector> x_temperature (2);
- x_temperature[0] = temperature_solution;
- x_temperature[1] = old_temperature_solution;
- TrilinosWrappers::BlockVector x_stokes = stokes_solution;
+ TimeBlock<ConditionalOStream> t_a(pcout, "**prepare_solution_transfer", false);
+ std::vector<const TrilinosWrappers::MPI::Vector*> x_temperature (2);
+ x_temperature[0] = &temperature_solution;
+ x_temperature[1] = &old_temperature_solution;
+ TrilinosWrappers::MPI::BlockVector x_stokes = stokes_solution;
- SolutionTransfer<dim,TrilinosWrappers::Vector>
+ parallel::distributed::SolutionTransfer<dim,TrilinosWrappers::MPI::Vector>
temperature_trans(temperature_dof_handler);
- SolutionTransfer<dim,TrilinosWrappers::BlockVector>
+ parallel::distributed::SolutionTransfer<dim,TrilinosWrappers::MPI::BlockVector>
stokes_trans(stokes_dof_handler);
triangulation.prepare_coarsening_and_refinement();
temperature_trans.prepare_for_coarsening_and_refinement(x_temperature);
stokes_trans.prepare_for_coarsening_and_refinement(x_stokes);
+ t_a.close();
+ TimeBlock<ConditionalOStream> t_c(pcout, "**execute_c&r", false);
+
triangulation.execute_coarsening_and_refinement ();
computing_timer.exit_section();
+ t_c.close();
+
setup_dofs ();
+ TimeBlock<ConditionalOStream> t_b(pcout, "**do_solution_transfer");
computing_timer.enter_section ("Refine mesh structure, part 2");
- std::vector<TrilinosWrappers::Vector> tmp (2);
- tmp[0].reinit (temperature_solution);
- tmp[1].reinit (temperature_solution);
- temperature_trans.interpolate(x_temperature, tmp);
+ TrilinosWrappers::MPI::Vector
+ distributed_temp1 (temperature_rhs);
+ TrilinosWrappers::MPI::Vector
+ distributed_temp2 (temperature_rhs);
+
+ std::vector<TrilinosWrappers::MPI::Vector*> tmp (2);
+ tmp[0] = &(distributed_temp1);
+ tmp[1] = &(distributed_temp2);
+ temperature_trans.interpolate(tmp);
+
+// temperature_solution = distributed_temp1;
+ temperature_solution.reinit(distributed_temp1, false, true);
+// old_temperature_solution = distributed_temp2;
+ old_temperature_solution.reinit(distributed_temp2, false, true);
+
+ TrilinosWrappers::MPI::BlockVector
+ distributed_stokes (stokes_rhs);
- temperature_solution = tmp[0];
- old_temperature_solution = tmp[1];
- temperature_constraints.distribute(temperature_solution);
- temperature_constraints.distribute(old_temperature_solution);
+ stokes_trans.interpolate (distributed_stokes);
+// stokes_solution = distributed_stokes;
+ stokes_solution.block(0).reinit(distributed_stokes.block(0), false, true);
+ stokes_solution.block(1).reinit(distributed_stokes.block(1), false, true);
- TrilinosWrappers::BlockVector x_stokes_new = stokes_solution;
- stokes_trans.interpolate (x_stokes, x_stokes_new);
- stokes_solution = x_stokes_new;
rebuild_stokes_matrix = true;
rebuild_stokes_preconditioner = true;
// <code>VectorTools::project</code>, the
// rest is as before.
template <int dim>
-void BoussinesqFlowProblem<dim>::run ()
+void BoussinesqFlowProblem<dim>::run (unsigned int ref)
{
- const unsigned int initial_refinement = (dim == 2 ? 5 : 2);
- const unsigned int n_pre_refinement_steps = (dim == 2 ? 2 : 2);
+ pcout << "this is step-32. ref=" << ref << std::endl;
+ #ifdef CONF_BENCHMARK
+ // create approximately 12*4^(ref/2) cells:
+ const unsigned int initial_refinement = (ref-1)/2;
+ const unsigned int n_pre_refinement_steps = 2+(ref-1)%2;
+#else
+ const unsigned int initial_refinement = ref;//(dim == 2 ? 5 : 2);
+ const unsigned int n_pre_refinement_steps = 2;//(dim == 2 ? 4 : 2);
+#endif
GridGenerator::hyper_shell (triangulation,
Point<dim>(),
EquationData::R1,
12,
true);
-
static HyperShellBoundary<dim> boundary;
triangulation.set_boundary (0, boundary);
triangulation.set_boundary (1, boundary);
+ //GridGenerator::hyper_cube (triangulation, EquationData::R0, EquationData::R1);
+
global_Omega_diameter = GridTools::diameter (triangulation);
triangulation.refine_global (initial_refinement);
pcout << std::endl;
+#ifdef CONF_BENCHMARK
+ static int bench_step=0;
+ if (time>=1e5*EquationData::year_in_seconds)
+ {
+ ++bench_step;
+ if (bench_step>1)
+ break;
+ pcout << "***BEGIN BENCHSTEP" << std::endl;
+ refine_mesh (initial_refinement + n_pre_refinement_steps);
+ }
+ else
+#endif
if ((timestep_number == 0) &&
(pre_refinement_step < n_pre_refinement_steps))
{
++pre_refinement_step;
goto start_time_iteration;
}
+#ifndef CONF_BENCHMARK
else
if ((timestep_number > 0) && (timestep_number % 10 == 0))
refine_mesh (initial_refinement + n_pre_refinement_steps);
+#endif
- output_results ();
+#ifdef CONF_BENCHMARK
+ if (false)//bench_step)
+#else
+ if (timestep_number % 10 == 0 &&
+ Utilities::System::get_n_mpi_processes(MPI_COMM_WORLD) <= 10)
+#endif
+ output_results ();
time += time_step;
++timestep_number;
Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
BoussinesqFlowProblem<2> flow_problem;
- flow_problem.run ();
+
+ unsigned int ref=5;
+ if (argc>=2)
+ {
+ ref = (unsigned int)Utilities::string_to_int(argv[1]);
+ }
+
+ flow_problem.run (ref);
+ print_memory_stats();
}
catch (std::exception &exc)
{
--- /dev/null
+/**
+ * copyright:
+ * Research Group on Numerical Methods for PDEs
+ * University of Göttingen
+ *
+ * lube@math.uni-goettingen.de
+ */
+
+#ifndef TIMEBLOCK_H
+#define TIMEBLOCK_H
+
+template<typename STREAM>
+class TimeBlock
+{
+ public:
+ TimeBlock(STREAM & stream_, const char* blockname, bool singleline=true)
+ : m_blockname(blockname),
+ m_singleline(singleline),
+ m_running(true),
+ m_timer(MPI_COMM_WORLD, true),
+ stream(stream_)
+ {
+ MPI_Barrier(MPI_COMM_WORLD);
+ stream << blockname << " ... ";
+ if (singleline)
+ stream << std::flush;
+ else
+ stream << std::endl;
+ m_timer.start();
+ }
+
+ ~TimeBlock()
+ {
+ close();
+ }
+
+ void close()
+ {
+ if (!m_running)
+ return;
+ m_running = false;
+
+ m_timer.stop();
+
+ if (!m_singleline)
+ stream << m_blockname << " took ";
+
+ stream << std::fixed << std::setprecision(4);
+
+ m_timer.print_data(stream);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+
+ private:
+ const char* m_blockname;
+ bool m_singleline;
+ bool m_running;
+ dealii::Timer m_timer;
+ STREAM & stream;
+};
+
+#endif
--- /dev/null
+# $Id$
+
+
+# For the small projects Makefile, you basically need to fill in only
+# four fields.
+#
+# The first is the name of the application. It is assumed that the
+# application name is the same as the base file name of the single C++
+# file from which the application is generated.
+target = $(basename $(shell echo step-*.cc))
+
+# The second field determines whether you want to run your program in
+# debug or optimized mode. The latter is significantly faster, but no
+# run-time checking of parameters and internal states is performed, so
+# you should set this value to `on' while you develop your program,
+# and to `off' when running production computations.
+debug-mode = on
+
+
+# As third field, we need to give the path to the top-level deal.II
+# directory. You need to adjust this to your needs. Since this path is
+# probably the most often needed one in the Makefile internals, it is
+# designated by a single-character variable, since that can be
+# reference using $D only, i.e. without the parentheses that are
+# required for most other parameters, as e.g. in $(target).
+D = ../../
+
+
+# The last field specifies the names of data and other files that
+# shall be deleted when calling `make clean'. Object and backup files,
+# executables and the like are removed anyway. Here, we give a list of
+# files in the various output formats that deal.II supports.
+clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk *d2
+
+
+
+
+#
+#
+# Usually, you will not need to change anything beyond this point.
+#
+#
+# The next statement tell the `make' program where to find the
+# deal.II top level directory and to include the file with the global
+# settings
+include $D/common/Make.global_options
+
+################################################################
+# This example program will only work if PETSc is installed. If this
+# is not the case, then simply redefine the main targets to do nothing
+#
+# The same holds true if we use threads: PETSc and threads doesn't
+# seem to work well together...
+ifneq ($(USE_CONTRIB_PETSC)$(enable-threads),yesno)
+ ifneq ($(USE_CONTRIB_PETSC),yes)
+default run clean:
+ @echo
+ @echo "==========================================================="
+ @echo "= This program cannot be compiled without PETSc. Make ="
+ @echo "= sure you have PETSc installed and detected during ="
+ @echo "= configuration of deal.II ="
+ @echo "==========================================================="
+ @echo
+ else
+default run clean:
+ @echo
+ @echo "==========================================================="
+ @echo "= PETSc can not be used when running programs on ="
+ @echo "= multiple threads. Make sure you have specified the ="
+ @echo "= --disable-threads flag upon configuration of deal.II ="
+ @echo "==========================================================="
+ @echo
+ endif
+else
+#
+################################################################
+
+
+
+
+# Since the whole project consists of only one file, we need not
+# consider difficult dependencies. We only have to declare the
+# libraries which we want to link to the object file, and there need
+# to be two sets of libraries: one for the debug mode version of the
+# application and one for the optimized mode. Here we have selected
+# the versions for 2d. Note that the order in which the libraries are
+# given here is important and that your applications won't link
+# properly if they are given in another order.
+#
+# You may need to augment the lists of libraries when compiling your
+# program for other dimensions, or when using third party libraries
+libs.g = $(lib-deal2-2d.g) \
+ $(lib-deal2-3d.g) \
+ $(lib-lac.g) \
+ $(lib-base.g)
+libs.o = $(lib-deal2-2d.o) \
+ $(lib-deal2-3d.o) \
+ $(lib-lac.o) \
+ $(lib-base.o)
+
+
+# We now use the variable defined above which switch between debug and
+# optimized mode to select the set of libraries to link with. Included
+# in the list of libraries is the name of the object file which we
+# will produce from the single C++ file. Note that by default we use
+# the extension .g.o for object files compiled in debug mode and .o for
+# object files in optimized mode (or whatever the local default on your
+# system is instead of .o).
+ifeq ($(debug-mode),on)
+ libraries = $(target).g.$(OBJEXT) $(libs.g)
+else
+ libraries = $(target).$(OBJEXT) $(libs.o)
+endif
+
+
+# Now comes the first production rule: how to link the single object
+# file produced from the single C++ file into the executable. Since
+# this is the first rule in the Makefile, it is the one `make' selects
+# if you call it without arguments.
+$(target) : $(libraries)
+ @echo ============================ Linking $@
+ @$(CXX) -o $@$(EXEEXT) $^ $(LIBS) $(LDFLAGS)
+
+
+# To make running the application somewhat independent of the actual
+# program name, we usually declare a rule `run' which simply runs the
+# program. You can then run it by typing `make run'. This is also
+# useful if you want to call the executable with arguments which do
+# not change frequently. You may then want to add them to the
+# following rule:
+run: $(target)
+ @echo ============================ Running $<
+ @./$(target)$(EXEEXT)
+
+
+
+# As a last rule to the `make' program, we define what to do when
+# cleaning up a directory. This usually involves deleting object files
+# and other automatically created files such as the executable itself,
+# backup files, and data files. Since the latter are not usually quite
+# diverse, you needed to declare them at the top of this file.
+clean:
+ -rm -f *.$(OBJEXT) *~ Makefile.dep $(target)$(EXEEXT) $(clean-up-files)
+
+
+# Since we have not yet stated how to make an object file from a C++
+# file, we should do so now. Since the many flags passed to the
+# compiler are usually not of much interest, we suppress the actual
+# command line using the `at' sign in the first column of the rules
+# and write the string indicating what we do instead.
+./%.g.$(OBJEXT) :
+ @echo ==============debug========= $(<F)
+ @$(CXX) $(CXXFLAGS.g) -c $< -o $@
+./%.$(OBJEXT) :
+ @echo ==============optimized===== $(<F)
+ @$(CXX) $(CXXFLAGS.o) -c $< -o $@
+
+
+
+# The following statement tells make that the rules `run' and `clean'
+# are not expected to produce files of the same name as Makefile rules
+# usually do.
+.PHONY: run clean
+
+
+# Finally there is a rule which you normally need not care much about:
+# since the executable depends on some include files from the library,
+# besides the C++ application file of course, it is necessary to
+# re-generate the executable when one of the files it depends on has
+# changed. The following rule to created a dependency file
+# `Makefile.dep', which `make' uses to determine when to regenerate
+# the executable. This file is automagically remade whenever needed,
+# i.e. whenever one of the cc-/h-files changed. Make detects whether
+# to remake this file upon inclusion at the bottom of this file.
+#
+# If the creation of Makefile.dep fails, blow it away and fail
+Makefile.dep: $(target).cc Makefile \
+ $(shell echo $D/*/include/*/*.h)
+ @echo ============================ Remaking $@
+ @$D/common/scripts/make_dependencies $(INCLUDE) -B. $(target).cc \
+ > $@ \
+ || (rm -f $@ ; false)
+ @if test -s $@ ; then : else rm $@ ; fi
+
+
+# To make the dependencies known to `make', we finally have to include
+# them:
+include Makefile.dep
+
+
+endif # USE_CONTRIB_TRILINOS
--- /dev/null
+step-6 step-17
--- /dev/null
+<br>
+
+<i>This program was contributed by Timo Heister, Martin Kronbichler and Wolfgang
+Bangerth.
+<br>
+This material is based upon work partly supported by the National
+Science Foundation under Award No. EAR-0426271 and The California Institute of
+Technology. Any opinions, findings, and conclusions or recommendations
+expressed in this publication are those of the author and do not
+necessarily reflect the views of the National Science Foundation or of The
+California Institute of Technology.
+</i>
+
+
+<a name="Intro"></a>
+<h1>Introduction</h1>
+
+Given today's computers, most finite element computations can be done on
+a single machine. The majority of previous tutorial programs therefore
+shows only this, possibly splitting up work among a number of
+processors that, however, can all access the same, shared memory
+space. That said, there are problems that are simply too big for a
+single machine and in that case the problem has to be split up in a
+suitable way among multiple machines each of which contributes its
+part to the whole. A simple way to do that was shown in step-17 and
+step-18, where we show how a program can use <a
+href="http://www.mpi-forum.org/" target="_top">MPI</a> to parallelize
+assembling the linear system, storing it, solving it, and computing
+error estimators. All of these operations scale relatively trivially,
+but there was one significant drawback: for this to be moderately
+simple to implement, each MPI processor had to keep its own copy of
+the entire Triangulation and DoFHandler objects. Consequently, while
+we can suspect (with good reasons) that the operations listed above
+can scale to thousands of computers and problem sizes of billions of
+cells and billions of degrees of freedom, building the one big mesh for the
+entire problem these thousands of computers are solving on every last
+processor is clearly not going to scale: it is going to take forever,
+and maybe more importantly no single machine will have enough memory
+to store a mesh that has a billion cells (at least not at the time of
+writing this). In reality, programs like step-17 and step-18 can
+therefore not be run on more than maybe 100 or 200 processors and even
+there storing the Triangulation and DoFHandler objects consumes the
+vast majority of memory on each machine.
+
+Consequently, we need to approach the problem differently: to scale to
+very large problems each processor can only store its own little piece
+of the Triangulation and DoFHandler objects. deal.II implements such a
+scheme in the parallel::distributed namespace and the classes
+therein. It builds on an external library, <a
+href="http://www.p4est.org/">p4est</a> (a play on the expression
+<i>parallel forest</i> that describes the parallel storage of a
+hierarchically constructed mesh as a forest of quad- or
+oct-trees). You need to <a
+href="../../external-libs/p4est.html">install and configure p4est</a>
+but apart from that all of its workings are hidden under the surface
+of deal.II.
+
+In essence, what the parallel::distributed::Triangulation class and
+associated parallel::distributed::DoFHandler objects do is to split
+the global mesh so that every processor only stores a small bit it
+"owns" along with one layer of "ghost" cells that surround the ones it
+owns. What happens in the rest of the domain on which we want to solve
+the partial differential equation is unknown to each processor and can
+only be inferred through communication with other machines if such
+information is needed. This implies that we also have to think about
+problems in a different way than we did in, for example, step-17 and
+step-18: no processor can have the entire solution vector for
+postprocessing, for example, and every part of a program has to be
+parallelized because no processor has all the information necessary
+for sequential operations.
+
+A general overview of how this parallelization happens is described in
+the @ref distributed documentation module. You should read it for a
+top-level overview before reading through the source code of this
+program. A concise discussion of many terms we will use in the program
+is also provided in the @ref distributed_paper "Distributed Computing paper".
+It is probably worthwhile reading it for background information on how
+things work internally in this program.
+
+
+<h3>The testcase</h3>
+
+This program solves essentially re-solves what we already do in
+step-6, i.e. it solves the Laplace equation
+@f{align*}
+ -\Delta u &= f \qquad &&\text{in}\ \Omega=[0,1]^2, \\
+ u &= 0 \qquad &&\text{on}\ \partial\Omega.
+@f}
+The difference of course is now that we want to do so on a mesh that
+may have a billion cells, with a billion or so degrees of
+freedom. There is no doubt that doing so is completely silly for such
+a simple problem, but the point of a tutorial program is, after all,
+not to do something useful but to show how useful programs can be
+implemented using deal.II. Be that as it may, to make things at least
+a tiny bit interesting, we choose the right hand side as a
+discontinuous function,
+@f{align*}
+ f(x,y)
+ =
+ \left\{
+ \begin{array}{ll}
+ 1 & \text{if}\ y < \frac 12 + \frac 14 \sin(4\pi x), \\
+ -1 & \text{otherwise},
+ \end{array}
+ \right.
+@f}
+so that the solution has a singularity along the sinusoidal line
+snaking its way through the domain. As a consequence, mesh refinement
+will be concentrated along this line. You can see this in the mesh
+picture shown below in the results section.
+
+Rather than continuing here and giving a long introduction, let us go
+straight to the program code. If you have read through step-6 and the
+@ref distributed documentation module, most of things that are going
+to happen should be familiar to you already.
--- /dev/null
+techniques
--- /dev/null
+<h1>Results</h1>
+
+DOCUMENT HOW TO RUN THE PROGRAM
+
+When run on a sufficiently large number of machines (say a few
+thousand), this program can relatively easily solve problems with well
+over one billion unknowns in less than a minute. On the other hand,
+such big problems can no longer be visualized, so we also ran the
+program on only 16 processors. Here are a mesh, along with its
+partitioning onto the 16 processors, and the corresponding solution:
+
+<TABLE WIDTH="100%">
+<tr>
+<td>
+ @image html step-40.mesh.png
+</td>
+<td>
+ @image html step-40.solution.png
+</td>
+</tr>
+</table>
+
+The mesh on the left has a mere 7,069 cells. This is of course a
+problem we would easily have been able to solve already on a single
+processor using step-6, but the point of the program was to show how
+to write a program that scales to many more machines. For example,
+here are two graphs that show how the run time of a large number of parts
+of the program scales on problems with around 52 and 375 million degrees of
+freedom if we take more and more processors (these and the next couple of
+graphs are taken from the @ref distributed_paper "Distributed Computing paper"):
+
+<TABLE WIDTH="100%">
+<tr>
+<td>
+ @image html step-40.strong2.png
+</td>
+<td>
+ @image html step-40.strong.png
+</td>
+</tr>
+</table>
+
+As can clearly be seen, the program scales nicely to very large
+numbers of processors. The curves, in particular the linear solver, become a
+bit wobble at the right end of the graphs since each processor has too little
+to do to offset the cost of communication (the part of the whole problem each
+processor has to solve in the above two examples is only 13,000 and 90,000
+degrees of freedom when 4,096 processors are used).
+
+While the strong scaling graphs above show that we can solve a problem of
+fixed size faster and faster if we take more and more processors, the more
+interesting question may be how big problems can become so that they can still
+be solved within a reasonable time on a machine of a particular size. We show
+this in the following two graphs for 256 and 4096 processors:
+
+<TABLE WIDTH="100%">
+<tr>
+<td>
+ @image html step-40.256.png
+</td>
+<td>
+ @image html step-40.4096.png
+</td>
+</tr>
+</table>
+
+What these graphs show is that all parts of the program scale linearly with
+the number of degrees of freedom. This time, lines are wobbly at the left as
+the size of local problems is too small. For more discussions of these results
+we refer to the @ref distributed_paper "Distributed Computing paper".
+
+So how large are the largest problems one can solve? At the time of writing
+this problem, the
+limiting factor is that the program uses the BoomerAMG algebraic
+multigrid method from the <a
+href="http://acts.nersc.gov/hypre/" target="_top">hypre package</a> as
+a preconditioner, which unfortunately uses signed 32-bit integers to
+index the elements of a %distributed matrix. This limits the size of
+problems to $2^31-1=2,147,483,647$ degrees of freedom. From the graphs
+above it is obvious that the scalability would extend beyond this
+number, and one could expect that given more than the 4,096 machines
+shown above would also further reduce the compute time. That said, one
+can certainly expect that this limit will eventually be lifted by the
+hypre developers.
+
+
+
+<a name="extensions"></a>
+<h3>Possibilities for extensions</h3>
+
+In a sense, this program is the ultimate solver for the Laplace
+equation: it can essentially solve the equation to whatever accuracy
+you want, if only you have enough processors available. Since the
+Laplace equation by itself is not terribly interesting at this level
+of accuracy, the more interesting possibilities for extension
+therefore concern not so much this program but what comes beyond
+it. For example, several of the other programs in this tutorial have
+significant run times, especially in 3d. It would therefore be
+interesting to use the techniques explained here to extend other
+programs to support parallel distributed computations. We have done
+this for step-31 in the step-32 tutorial program, but the same would
+apply to, for example, step-23 and step-25 for hyperbolic time
+dependent problems, step-33 for gas dynamics, or step-35 for the
+Navier-Stokes equations.
+
+Maybe equally interesting is the problem of postprocessing. As
+mentioned above, we only show pictures of the solution and the mesh
+for 16 processors because 4,096 processors solving 1 billion unknowns
+would produce graphical output on the order of several 10
+gigabyte. Currently, no program is able to visualize this amount of
+data in any reasonable way unless it also runs on at least several
+hundred processors. There are, however, approaches where visualization
+program directly communicate with solvers on each processor with each
+visualization process rendering the part of the scene computed by the
+solver on this processor. Implementing such an interface would allow
+to quickly visualize things that are otherwise not amenable to
+graphical display.
--- /dev/null
+Solving the Laplace equation on adaptive meshes on thousands of processors.
--- /dev/null
+/* $Id$ */
+/* Author: Wolfgang Bangerth, Texas A&M University, 2009, 2010 */
+/* Timo Heister, University of Goettingen, 2009, 2010 */
+
+/* $Id$ */
+/* */
+/* Copyright (C) 2009, 2010 by Timo Heister and the deal.II authors */
+/* */
+/* This file is subject to QPL and may not be distributed */
+/* without copyright and license information. Please refer */
+/* to the file deal.II/doc/license.html for the text and */
+/* further information on this license. */
+
+#include <base/quadrature_lib.h>
+#include <base/function.h>
+#include <base/utilities.h>
+#include <base/conditional_ostream.h>
+#include <base/index_set.h>
+#include <base/timer.h>
+#include <lac/vector.h>
+#include <lac/full_matrix.h>
+#include <lac/solver_cg.h>
+#include <lac/constraint_matrix.h>
+#include <lac/compressed_simple_sparsity_pattern.h>
+#include <lac/sparsity_tools.h>
+
+#include <lac/petsc_parallel_sparse_matrix.h>
+#include <lac/petsc_parallel_vector.h>
+#include <lac/petsc_solver.h>
+#include <lac/petsc_precondition.h>
+
+#include <grid/grid_generator.h>
+#include <grid/tria_accessor.h>
+#include <grid/tria_iterator.h>
+#include <dofs/dof_accessor.h>
+#include <dofs/dof_tools.h>
+#include <fe/fe_values.h>
+#include <fe/fe_q.h>
+#include <numerics/vectors.h>
+#include <numerics/data_out.h>
+#include <numerics/error_estimator.h>
+
+#include <distributed/tria.h>
+#include <distributed/grid_refinement.h>
+
+#include <fstream>
+#include <iostream>
+
+using namespace dealii;
+
+
+template <int dim>
+class LaplaceProblem
+{
+ public:
+ LaplaceProblem ();
+ ~LaplaceProblem ();
+
+ void run (const unsigned int initial_global_refine);
+
+ private:
+ void setup_system ();
+ void assemble_system ();
+ void solve ();
+ void refine_grid ();
+ void output_results (const unsigned int cycle) const;
+
+ MPI_Comm mpi_communicator;
+
+ parallel::distributed::Triangulation<dim> triangulation;
+
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
+
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+
+ ConstraintMatrix constraints;
+
+ PETScWrappers::MPI::SparseMatrix system_matrix;
+ PETScWrappers::MPI::Vector locally_relevant_solution;
+ PETScWrappers::MPI::Vector system_rhs;
+
+ ConditionalOStream pcout;
+};
+
+
+
+template <int dim>
+LaplaceProblem<dim>::LaplaceProblem ()
+ :
+ mpi_communicator (MPI_COMM_WORLD),
+ triangulation (mpi_communicator,
+ typename Triangulation<dim>::MeshSmoothing
+ (Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening)),
+ dof_handler (triangulation),
+ fe (2),
+ pcout (std::cout,
+ (Utilities::System::
+ get_this_mpi_process(mpi_communicator)
+ == 0))
+{}
+
+
+
+template <int dim>
+LaplaceProblem<dim>::~LaplaceProblem ()
+{
+ dof_handler.clear ();
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::setup_system ()
+{
+ locally_relevant_solution.reinit (mpi_communicator,
+ locally_owned_dofs,
+ locally_relevant_dofs);
+ system_rhs.reinit (mpi_communicator,
+ dof_handler.n_dofs(),
+ dof_handler.n_locally_owned_dofs());
+
+ locally_relevant_solution = 0;
+ system_rhs = 0;
+
+ constraints.clear ();
+ constraints.reinit (locally_relevant_dofs);
+
+ DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ VectorTools::interpolate_boundary_values (dof_handler,
+ 0,
+ ZeroFunction<dim>(),
+ constraints);
+ constraints.close ();
+
+
+ CompressedSimpleSparsityPattern csp (dof_handler.n_dofs(),
+ dof_handler.n_dofs(),
+ locally_relevant_dofs);
+ DoFTools::make_sparsity_pattern (dof_handler,
+ csp,
+ constraints, false);
+ SparsityTools::distribute_sparsity_pattern (csp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ mpi_communicator,
+ locally_relevant_dofs);
+ system_matrix.reinit (mpi_communicator,
+ csp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ Utilities::System::get_this_mpi_process(mpi_communicator));
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::assemble_system ()
+{
+ const QGauss<dim> quadrature_formula(3);
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points |
+ update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->subdomain_id() == triangulation.locally_owned_subdomain())
+ {
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ fe_values.reinit (cell);
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad(i,q_point) *
+ fe_values.shape_grad(j,q_point) *
+ fe_values.JxW(q_point));
+
+ cell_rhs(i) += (fe_values.shape_value(i,q_point) *
+ (fe_values.quadrature_point(q_point)[1]
+ >
+ 0.5+0.25*sin(4.0*numbers::PI*fe_values.quadrature_point(q_point)[0])
+ ? 1 : -1) *
+ fe_values.JxW(q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+ constraints.distribute_local_to_global (cell_matrix,
+ cell_rhs,
+ local_dof_indices,
+ system_matrix,
+ system_rhs);
+ }
+
+ system_matrix.compress ();
+ system_rhs.compress ();
+}
+
+
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::solve ()
+{
+ PETScWrappers::MPI::Vector
+ completely_distributed_solution (mpi_communicator,
+ dof_handler.n_dofs(),
+ dof_handler.n_locally_owned_dofs());
+
+ SolverControl solver_control (dof_handler.n_dofs(), 1e-12);
+
+ PETScWrappers::SolverCG solver(solver_control, mpi_communicator);
+ PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
+
+ solver.solve (system_matrix, completely_distributed_solution, system_rhs,
+ preconditioner);
+
+ pcout << " Solved in " << solver_control.last_step()
+ << " iterations." << std::endl;
+
+ constraints.distribute (completely_distributed_solution);
+
+ locally_relevant_solution = completely_distributed_solution;
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::refine_grid ()
+{
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+ KellyErrorEstimator<dim>::estimate (dof_handler,
+ QGauss<dim-1>(3),
+ typename FunctionMap<dim>::type(),
+ locally_relevant_solution,
+ estimated_error_per_cell);
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.03);
+ triangulation.execute_coarsening_and_refinement ();
+}
+
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::output_results (const unsigned int cycle) const
+{
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+ data_out.add_data_vector (locally_relevant_solution, "u");
+
+ Vector<float> subdomain (triangulation.n_active_cells());
+ // could just fill entire vector with subdomain_id()
+ {
+ unsigned int index = 0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ subdomain(index) = (cell->is_ghost() || cell->is_artificial()
+ ?
+ -1
+ :
+ cell->subdomain_id());
+ }
+ data_out.add_data_vector (subdomain, "subdomain");
+ data_out.build_patches ();
+
+ const std::string filename = ("solution-" +
+ Utilities::int_to_string (cycle, 2) +
+ "." +
+ Utilities::int_to_string
+ (triangulation.locally_owned_subdomain(), 4));
+
+ std::ofstream output ((filename + ".vtu").c_str());
+ data_out.write_vtu (output);
+
+ if (Utilities::System::get_this_mpi_process(mpi_communicator) == 0)
+ {
+ std::vector<std::string> filenames;
+ for (unsigned int i=0;
+ i<Utilities::System::get_n_mpi_processes(mpi_communicator);
+ ++i)
+ filenames.push_back ("solution-" +
+ Utilities::int_to_string (cycle, 2) +
+ "." +
+ Utilities::int_to_string (i, 4) +
+ ".vtu");
+
+ std::ofstream master_output ((filename + ".pvtu").c_str());
+ data_out.write_pvtu_record (master_output, filenames);
+ }
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::run (const unsigned int initial_global_refine)
+{
+ const unsigned int n_cycles = 12;
+ for (unsigned int cycle=0; cycle<n_cycles; ++cycle)
+ {
+ pcout << "Cycle " << cycle << ':' << std::endl;
+
+ if (cycle == 0)
+ {
+ GridGenerator::hyper_cube (triangulation);
+ triangulation.refine_global (initial_global_refine);
+ }
+ else
+ refine_grid ();
+
+ pcout << " Number of active cells: "
+ << triangulation.n_global_active_cells()
+ << std::endl;
+
+ dof_handler.distribute_dofs (fe);
+
+ // eliminate
+ locally_owned_dofs = dof_handler.locally_owned_dofs ();
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+
+ pcout << " Number of degrees of freedom: "
+ << dof_handler.n_dofs()
+ << std::endl;
+
+ setup_system ();
+
+ assemble_system ();
+ solve ();
+
+ if (Utilities::System::get_n_mpi_processes(mpi_communicator) <= 100)
+ output_results (cycle);
+
+ pcout << std::endl;
+ }
+}
+
+
+
+int main(int argc, char *argv[])
+{
+ try
+ {
+ PetscInitialize(&argc, &argv);
+ deallog.depth_console (0);
+
+ int refine=5;
+ if (argc>1)
+ {
+ refine = (unsigned int)Utilities::string_to_int(argv[1]);
+ }
+
+ {
+ LaplaceProblem<2> laplace_problem_2d;
+ laplace_problem_2d.run (refine);
+ }
+
+ PetscFinalize();
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+/* $Id$ */
+/* Author: Wolfgang Bangerth, Texas A&M University, 2009 */
+/* Timo Heister, University of Goettingen, 2009 */
+
+/* $Id$ */
+/* */
+/* Copyright (C) 2009, 2010 by Timo Heister and the deal.II authors */
+/* */
+/* This file is subject to QPL and may not be distributed */
+/* without copyright and license information. Please refer */
+/* to the file deal.II/doc/license.html for the text and */
+/* further information on this license. */
+
+// comment to use PETSc:
+//#define USE_TRILINOS
+
+
+#include <base/quadrature_lib.h>
+#include <base/function.h>
+#include <base/logstream.h>
+#include <base/utilities.h>
+#include <base/conditional_ostream.h>
+#include <base/index_set.h>
+#include <base/timer.h>
+#include <lac/vector.h>
+#include <lac/full_matrix.h>
+#include <lac/sparse_matrix.h>
+#include <lac/solver_cg.h>
+#include <lac/precondition.h>
+#include <lac/constraint_matrix.h>
+#include <lac/compressed_simple_sparsity_pattern.h>
+#include <lac/sparsity_tools.h>
+
+#ifdef USE_TRILINOS
+#include <lac/trilinos_precondition.h>
+#include <lac/trilinos_sparse_matrix.h>
+#include <lac/trilinos_sparsity_pattern.h>
+#include <lac/trilinos_vector.h>
+#else
+#include <lac/petsc_parallel_sparse_matrix.h>
+#include <lac/petsc_parallel_vector.h>
+#include <lac/petsc_solver.h>
+#include <lac/petsc_precondition.h>
+#endif
+
+#include <grid/grid_generator.h>
+#include <grid/tria_accessor.h>
+#include <grid/tria_iterator.h>
+#include <grid/tria_boundary_lib.h>
+#include <grid/grid_out.h>
+#include <grid/filtered_iterator.h>
+#include <dofs/dof_accessor.h>
+#include <dofs/dof_tools.h>
+#include <fe/fe_values.h>
+#include <fe/fe_q.h>
+#include <numerics/vectors.h>
+#include <numerics/matrices.h>
+#include <numerics/data_out.h>
+#include <numerics/error_estimator.h>
+
+#include <distributed/tria.h>
+#include <distributed/grid_refinement.h>
+#include <distributed/solution_transfer.h>
+
+#include <fstream>
+#include <iostream>
+
+#include "timeblock.h"
+
+using namespace dealii;
+
+void print_it(Utilities::System::MinMaxAvg & result)
+{
+ std::cout// << "sum: " << result.sum
+ << " avg: " << (long)result.avg/1024
+ << " min: " << (long)result.min/1024 << " @" << result.min_index
+ << " max: " << (long)result.max/1024 << " @" << result.max_index
+ << std::endl;
+}
+
+void print_memory_stats()
+{
+ int myid = Utilities::System::get_this_mpi_process(MPI_COMM_WORLD);
+ Utilities::System::MemoryStats stats;
+ Utilities::System::get_memory_stats(stats);
+ Utilities::System::MinMaxAvg r;
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmPeak, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmPeak: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmSize, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmSize: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmHWM, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmHWM: ";
+ print_it(r);
+ }
+ Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD, stats.VmRSS, r);
+ if (myid==0)
+ {
+ std::cout << "MEM: VmRSS: ";
+ print_it(r);
+ }
+}
+
+
+template <int dim>
+class LaplaceProblem
+{
+ public:
+ LaplaceProblem ();
+ ~LaplaceProblem ();
+
+ void run (unsigned int refine);
+
+ private:
+ void setup_system ();
+ void assemble_system ();
+ void solve ();
+ void refine_grid ();
+ void output_results (const unsigned int cycle) const;
+
+ parallel::distributed::Triangulation<dim> triangulation;
+
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
+
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+
+ ConstraintMatrix constraints;
+
+#ifdef USE_TRILINOS
+ TrilinosWrappers::SparseMatrix system_matrix;
+ TrilinosWrappers::MPI::Vector solution;
+ TrilinosWrappers::MPI::Vector system_rhs;
+ parallel::distributed::SolutionTransfer<dim, TrilinosWrappers::MPI::Vector> soltrans;
+#else
+ PETScWrappers::MPI::SparseMatrix system_matrix;
+ PETScWrappers::MPI::Vector solution;
+ PETScWrappers::MPI::Vector system_rhs;
+ parallel::distributed::SolutionTransfer<dim, PETScWrappers::MPI::Vector> soltrans;
+#endif
+
+ ConditionalOStream pcout;
+
+
+};
+
+
+
+template <int dim>
+LaplaceProblem<dim>::LaplaceProblem ()
+ :
+ triangulation (MPI_COMM_WORLD
+ ,typename Triangulation<dim>::MeshSmoothing (Triangulation<dim>::smoothing_on_refinement | Triangulation<dim>::smoothing_on_coarsening)
+ ),
+ dof_handler (triangulation),
+ fe (2),
+ soltrans(dof_handler),
+ pcout (std::cout,
+ (Utilities::System::
+ get_this_mpi_process(MPI_COMM_WORLD)
+ == 0))
+{}
+
+
+
+template <int dim>
+LaplaceProblem<dim>::~LaplaceProblem ()
+{
+ dof_handler.clear ();
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::setup_system ()
+{
+ TimeBlock<ConditionalOStream> t(pcout, "setup_system", false);
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "_init_vectors");
+#ifdef USE_TRILINOS
+ solution.reinit (locally_relevant_dofs, MPI_COMM_WORLD);
+ system_rhs.reinit (locally_owned_dofs, MPI_COMM_WORLD);
+#else
+ solution.reinit ( MPI_COMM_WORLD, locally_owned_dofs, locally_relevant_dofs);
+ system_rhs.reinit (MPI_COMM_WORLD, dof_handler.n_dofs(),
+ dof_handler.n_locally_owned_dofs());
+
+ solution = 0;
+ system_rhs = 0;
+#endif
+ }
+
+ constraints.clear ();
+ constraints.reinit (locally_relevant_dofs);
+
+ {
+
+ TimeBlock<ConditionalOStream> t(pcout, "_make_hn");
+
+ DoFTools::make_hanging_node_constraints (static_cast<const DoFHandler<dim>&>(dof_handler),
+ constraints);
+ }
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "_interpol_bv");
+ VectorTools::interpolate_boundary_values (static_cast<const DoFHandler<dim>&>(dof_handler),
+ 0,
+ ZeroFunction<dim>(),
+ constraints);
+ constraints.close ();
+ }
+
+
+ CompressedSimpleSparsityPattern csp (dof_handler.n_dofs(),
+ dof_handler.n_dofs(),
+ locally_relevant_dofs);
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "_make_sp", false);
+
+ DoFTools::make_sparsity_pattern (static_cast<const DoFHandler<dim>&>(dof_handler),
+ csp,
+ constraints, false);
+ }
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "_init_matrix", false);
+#ifdef USE_TRILINOS
+ system_matrix.reinit (locally_owned_dofs, csp, MPI_COMM_WORLD, true);
+#else
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "__prep_csp", false);
+ SparsityTools::distribute_sparsity_pattern<>(csp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ MPI_COMM_WORLD,
+ locally_relevant_dofs);
+ }
+
+ system_matrix.reinit (MPI_COMM_WORLD,
+ csp,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ Utilities::System::get_this_mpi_process(MPI_COMM_WORLD));
+#if (PETSC_VERSION_MAJOR <= 2)
+// MatSetOption (system_matrix, MAT_YES_NEW_NONZERO_LOCATIONS);
+#else
+// MatSetOption (system_matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE);
+// MatSetOption (system_matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_TRUE);
+#endif
+
+#endif
+ }
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::assemble_system ()
+{
+ TimeBlock<ConditionalOStream> t(pcout, "assemble");
+
+ const QGauss<dim> quadrature_formula(3);
+
+ FEValues<dim> fe_values (fe, quadrature_formula,
+ update_values | update_gradients |
+ update_quadrature_points |
+ update_JxW_values);
+
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = quadrature_formula.size();
+
+ FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
+ Vector<double> cell_rhs (dofs_per_cell);
+
+ std::vector<unsigned int> local_dof_indices (dofs_per_cell);
+
+ typename DoFHandler<dim>::active_cell_iterator
+ cell = dof_handler.begin_active(),
+ endc = dof_handler.end();
+ for (; cell!=endc; ++cell)
+ if (cell->subdomain_id() == triangulation.locally_owned_subdomain())
+ {
+ cell_matrix = 0;
+ cell_rhs = 0;
+
+ fe_values.reinit (cell);
+
+ for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ cell_matrix(i,j) += (fe_values.shape_grad(i,q_point) *
+ fe_values.shape_grad(j,q_point) *
+ fe_values.JxW(q_point));
+
+ double v = fe_values.quadrature_point(q_point)[0];
+ v = 0.5+0.25*sin(4.0*numbers::PI*v);
+
+ cell_rhs(i) += (fe_values.shape_value(i,q_point) *
+ (v < fe_values.quadrature_point(q_point)[1]
+ ? 1 : -1) *
+ fe_values.JxW(q_point));
+ }
+
+ cell->get_dof_indices (local_dof_indices);
+
+ constraints.distribute_local_to_global (cell_matrix,
+ cell_rhs,
+ local_dof_indices,
+ system_matrix,
+ system_rhs);
+ }
+
+#ifdef USE_TRILINOS
+ system_rhs.compress (Add);
+#else
+ system_rhs.compress ();
+#endif
+ system_matrix.compress ();
+}
+
+
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::solve ()
+{
+ // we do not want to get spammed with solver info.
+ deallog.depth_console (0);
+
+ TimeBlock<ConditionalOStream> t(pcout, "solve", false);
+
+#ifdef USE_TRILINOS
+ TrilinosWrappers::MPI::Vector distributed_solution (locally_owned_dofs,
+ MPI_COMM_WORLD);
+
+ distributed_solution.reinit(solution, false, true);
+#else
+
+ PETScWrappers::MPI::Vector distributed_solution(MPI_COMM_WORLD,
+ dof_handler.n_dofs(),
+ dof_handler.n_locally_owned_dofs());
+
+ distributed_solution = solution;
+#endif
+
+ print_memory_stats();
+
+ SolverControl solver_control (solution.size(), 1e-12);
+#ifdef USE_TRILINOS
+ SolverCG<TrilinosWrappers::MPI::Vector> solver (solver_control);
+
+ TrilinosWrappers::PreconditionAMG preconditioner;
+ preconditioner.initialize(system_matrix);
+#else
+ PETScWrappers::SolverCG solver(solver_control, MPI_COMM_WORLD);
+ PETScWrappers::PreconditionBlockJacobi preconditioner(system_matrix);
+
+#endif
+
+ { // memory consumption
+ pcout << "Mem: Tria (p4est) DofH Constraints Mat X Rhs I_lo I_lr"
+ << std::endl;
+// MPI_Barrier(MPI_COMM_WORLD);
+
+ pcout << "MEM: "
+ << " " << triangulation.memory_consumption()
+ << " " << triangulation.memory_consumption_p4est()
+ << " " << dof_handler.memory_consumption()
+ << " " << constraints.memory_consumption()
+ << " " << system_matrix.memory_consumption()
+ << " " << solution.memory_consumption()
+ << " " << system_rhs.memory_consumption()
+ << " " << locally_owned_dofs.memory_consumption()
+ << " " << locally_relevant_dofs.memory_consumption()
+// << " " << preconditioner.memory_consumption()
+ << " ~sum=" << (triangulation.memory_consumption() + dof_handler.memory_consumption()
+ + constraints.memory_consumption() + system_matrix.memory_consumption()
+ + solution.memory_consumption() + system_rhs.memory_consumption()
+ + locally_owned_dofs.memory_consumption()
+ + locally_relevant_dofs.memory_consumption() )/1024
+ << " kB"
+ << std::endl;
+
+// MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+
+ solver.solve (system_matrix, distributed_solution, system_rhs,
+ preconditioner);
+
+ print_memory_stats();
+
+ pcout << " Solved in " << solver_control.last_step()
+ << " iterations." << std::endl;
+
+ constraints.distribute (distributed_solution);
+ solution = distributed_solution;
+
+#ifndef USE_TRILINOS
+ solution.update_ghost_values();
+#endif
+
+ deallog.depth_console (1);
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::refine_grid ()
+{
+ TimerOutput computing_timer(pcout, TimerOutput::summary,
+ TimerOutput::wall_times);
+
+ computing_timer.enter_section ("Estimating");
+
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+
+ {
+
+ TimeBlock<ConditionalOStream> t(pcout, "kelly");
+
+ KellyErrorEstimator<dim>::estimate (static_cast<const DoFHandler<dim>&>(dof_handler),
+ QGauss<dim-1>(3),
+ typename FunctionMap<dim>::type(),
+ solution,
+ estimated_error_per_cell);
+ }
+
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("Marking");
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "marking");
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_number (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.03);
+ }
+
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("Refining");
+
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "prep_c&r");
+
+ triangulation.prepare_coarsening_and_refinement();
+ }
+
+ if (0)
+ {
+ //output refinement information for
+ //debugging
+
+ const std::string filename = ("ref." +
+ Utilities::int_to_string
+ (triangulation.locally_owned_subdomain(), 4) +
+ ".vtk");
+ std::ofstream output (filename.c_str());
+
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+
+ unsigned int n_coarse=0;
+ Vector<float> subdomain (triangulation.n_active_cells());
+ {
+ unsigned int index = 0;
+
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ {
+ subdomain(index)=0;
+
+ if (cell->is_ghost() || cell->is_artificial())
+ subdomain(index)=-4;
+
+ if (cell->refine_flag_set())
+ subdomain(index)+=1;
+ if (cell->coarsen_flag_set())
+ {
+ subdomain(index)+=2;
+ ++n_coarse;
+ }
+ }
+ }
+ std::cout << "id=" << triangulation.locally_owned_subdomain() << " n_coarsen=" << n_coarse << std::endl;
+
+ data_out.add_data_vector (subdomain, "info");
+ data_out.build_patches ();
+ data_out.write_vtk (output);
+ }
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "soltrans.prep");
+
+ soltrans.prepare_for_coarsening_and_refinement(solution);
+ }
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "c&r", false);
+
+ triangulation.execute_coarsening_and_refinement ();
+ }
+
+ computing_timer.exit_section();
+}
+
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::output_results (const unsigned int cycle) const
+{
+ const std::string filename = ("solution-" +
+ Utilities::int_to_string (cycle, 2) +
+ "." +
+ Utilities::int_to_string
+ (triangulation.locally_owned_subdomain(), 4) +
+ ".d2");
+ std::ofstream output (filename.c_str());
+
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+ data_out.add_data_vector (solution, "u");
+
+ Vector<float> subdomain (triangulation.n_active_cells());
+ {
+ unsigned int index = 0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell, ++index)
+ subdomain(index) = (cell->is_ghost() || cell->is_artificial()
+ ?
+ -1
+ :
+ cell->subdomain_id());
+ }
+ data_out.add_data_vector (subdomain, "subdomain");
+
+ data_out.build_patches ();
+
+ data_out.write_deal_II_intermediate (output);
+}
+
+
+
+template <int dim>
+void LaplaceProblem<dim>::run (unsigned int refine)
+{
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "empty");
+ }
+
+ unsigned int cycles = 12;
+
+ pcout << "running " << cycles << " cycles with refine= " << refine <<
+#ifdef USE_TRILINOS
+ " with Trilinos"
+#else
+ " with PETSc"
+#endif
+ << std::endl;
+
+ for (unsigned int cycle=0; cycle<cycles; ++cycle)
+ {
+ TimerOutput computing_timer(pcout, TimerOutput::summary,
+ TimerOutput::wall_times);
+
+ pcout << "Cycle " << cycle << ':' << std::endl;
+
+ computing_timer.enter_section ("Mesh handling");
+ if (cycle == 0)
+ {
+ GridGenerator::hyper_cube (triangulation);
+ triangulation.refine_global (refine);
+ }
+ else
+ refine_grid ();
+
+
+
+ pcout << " Number of active cells: "
+ << triangulation.n_global_active_cells()
+ << std::endl;
+/* pcout << " (";
+ for (unsigned int i=0; i<triangulation.n_active_cells().size(); ++i)
+ pcout << triangulation.n_active_cells()[i]
+ << (i != triangulation.n_active_cells().size()-1 ? "+" : "");
+ pcout << ")" << std::endl;*/
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("DoFs handling");
+
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "dist_dofs");
+ dof_handler.distribute_dofs (fe);
+ }
+
+
+ locally_owned_dofs = dof_handler.locally_owned_dofs ();
+ DoFTools::extract_locally_relevant_dofs (dof_handler,
+ locally_relevant_dofs);
+
+ pcout << " Number of degrees of freedom: "
+ << dof_handler.n_dofs()
+ << std::endl;
+/* pcout << " (";
+ for (unsigned int i=0; i<dof_handler.n_dofs().size(); ++i)
+ pcout << dof_handler.n_dofs()[i]
+ << (i != dof_handler.n_dofs().size()-1 ? "+" : "");
+ pcout << ")" << std::endl;*/
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("Setting up system");
+ setup_system ();
+
+ if (cycle>0)
+ {
+ TimeBlock<ConditionalOStream> t(pcout, "soltrans.interp");
+
+ soltrans.interpolate(solution);
+ }
+
+
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("Assembling system");
+ assemble_system ();
+ computing_timer.exit_section();
+
+ computing_timer.enter_section ("Solving system");
+ solve ();
+ computing_timer.exit_section();
+
+// output_results (cycle);
+
+ pcout << std::endl;
+ }
+}
+
+
+
+int main(int argc, char *argv[])
+{
+ try
+ {
+#ifdef USE_TRILINOS
+ Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+#else
+ PetscInitialize(&argc, &argv);
+#endif
+
+ deallog.depth_console (1);
+
+ int refine=5;
+ if (argc>1)
+ {
+ refine = (unsigned int)Utilities::string_to_int(argv[1]);
+ }
+
+ {
+ LaplaceProblem<2> laplace_problem_2d;
+ laplace_problem_2d.run (refine);
+ }
+
+ print_memory_stats();
+
+#ifndef USE_TRILINOS
+ PetscLogPrintSummary(MPI_COMM_WORLD, "petsc.log" );
+ PetscFinalize();
+#endif
+
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+/**
+ * copyright:
+ * Research Group on Numerical Methods for PDEs
+ * University of Göttingen
+ *
+ * lube@math.uni-goettingen.de
+ */
+
+#ifndef TIMEBLOCK_H
+#define TIMEBLOCK_H
+
+template<typename STREAM>
+class TimeBlock
+{
+ public:
+ TimeBlock(STREAM & stream_, const char* blockname, bool singleline=true)
+ : m_blockname(blockname),
+ m_singleline(singleline),
+ m_timer(MPI_COMM_WORLD, true),
+ stream(stream_)
+ {
+ MPI_Barrier(MPI_COMM_WORLD);
+ stream << blockname << " ... ";
+ if (singleline)
+ stream << std::flush;
+ else
+ stream << std::endl;
+ m_timer.start();
+ }
+
+ ~TimeBlock()
+ {
+ m_timer.stop();
+
+ if (!m_singleline)
+ stream << m_blockname << " took ";
+
+ stream << std::fixed << std::setprecision(4);
+
+ m_timer.print_data(stream);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+
+ private:
+ const char* m_blockname;
+ bool m_singleline;
+ bool m_issection;
+ dealii::Timer m_timer;
+ STREAM & stream;
+};
+
+#endif
#include <base/config.h>
#include <base/table.h>
#include <base/smartpointer.h>
+#include <base/memory_consumption.h>
#include <lac/block_indices.h>
#include <lac/exceptions.h>
#include <lac/full_matrix.h>
*/
const BlockIndices & get_column_indices () const;
+ /**
+ * Determine an estimate for the memory
+ * consumption (in bytes) of this
+ * object. Note that only the memory
+ * reserved on the current processor is
+ * returned in case this is called in
+ * an MPI-based program.
+ */
+ unsigned int memory_consumption () const;
+
/** @addtogroup Exceptions
* @{ */
}
+template <class MatrixType>
+unsigned int
+BlockMatrixBase<MatrixType>::memory_consumption () const
+{
+ unsigned int mem =
+ MemoryConsumption::memory_consumption(row_block_indices)+
+ MemoryConsumption::memory_consumption(column_block_indices)+
+ MemoryConsumption::memory_consumption(sub_objects)+
+ MemoryConsumption::memory_consumption(counter_within_block)+
+ MemoryConsumption::memory_consumption(column_indices)+
+ MemoryConsumption::memory_consumption(column_values);
+
+ for (unsigned int r=0; r<n_block_rows(); ++r)
+ for (unsigned int c=0; c<n_block_cols(); ++c)
+ {
+ MatrixType *p = this->sub_objects[r][c];
+ mem += MemoryConsumption::memory_consumption(*p);
+ }
+
+ return mem;
+}
+
+
template <class MatrixType>
inline
*/
BlockCompressedSimpleSparsityPattern (const std::vector<unsigned int>& row_block_sizes,
const std::vector<unsigned int>& col_block_sizes);
-
+
/**
- * Resize the matrix to a tensor
- * product of matrices with
- * dimensions defined by the
- * arguments.
+ * Initialize the pattern with symmetric
+ * blocks. The number of IndexSets in the
+ * vector determine the number of rows
+ * and columns of blocks. The size of
+ * each block is determined by the size()
+ * of the respective IndexSet. Each block
+ * only stores the rows given by the
+ * values in the IndexSet, which is
+ * useful for distributed memory parallel
+ * computations and usually corresponds
+ * to the locally owned DoFs.
+ */
+ BlockCompressedSimpleSparsityPattern (const std::vector<IndexSet> & partitioning);
+
+ /**
+ * Resize the pattern to a tensor product
+ * of matrices with dimensions defined by
+ * the arguments.
*
* The matrix will have as many
* block rows and columns as
void reinit (const std::vector< unsigned int > &row_block_sizes,
const std::vector< unsigned int > &col_block_sizes);
+ /**
+ * Resize the pattern with symmetric
+ * blocks determined by the size() of
+ * each IndexSet. See the constructor
+ * taking a vector of IndexSets for
+ * details.
+ */
+ void reinit(const std::vector<IndexSet> & partitioning);
+
/**
* Allow the use of the reinit
* functions of the base class as
/**
* Initialize the pattern with an array
- * of index sets that specifies both
- * rows and columns of the matrix (so
- * the final matrix will be a square
- * matrix), where the IndexSet
- * specifies the parallel distribution
- * of the degrees of freedom on the
- * individual block. This function is
- * equivalent to calling the second
- * constructor with the length of the
- * mapping vector and then entering the
- * index values.
+ * of index sets that specifies both rows
+ * and columns of the matrix (so the
+ * final matrix will be a square matrix),
+ * where the size() of the IndexSets
+ * specifies the size of the blocks and
+ * the values in each IndexSet denotes
+ * the rows that are going to be saved in
+ * each block.
*/
BlockSparsityPattern (const std::vector<IndexSet>& parallel_partitioning,
const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Resize the matrix to a square tensor
- * product of matrices with parallel
- * distribution according to the
- * specifications in the array of
- * Epetra_Maps.
+ * product of matrices. See the
+ * constructor that takes a vector of
+ * IndexSets for details.
*/
void reinit (const std::vector<IndexSet>& parallel_partitioning,
const MPI_Comm & communicator = MPI_COMM_WORLD);
*/
/**
- * Add a new line to the
- * matrix. If the line already
- * exists, then the function
- * simply returns without doing
- * anything.
+ * Add a new line to the matrix. If the
+ * line already exists, then the function
+ * simply returns without doing anything.
*/
void add_line (const unsigned int line);
* or Trilinos vector wrapper class, or
* any other type having the same
* interface.
+ *
+ * Note that if called with a @p
+ * TrilinosWrappers::MPI::Vector it may
+ * not contain ghost elements.
*/
template <class VectorType>
void distribute (VectorType &vec) const;
explicit Vector (const MPI_Comm &communicator,
const unsigned int n,
const unsigned int local_size);
-
+
/**
* Copy-constructor from deal.II
const unsigned int local_size);
- /**
+ /**
* Constructs a new parallel PETSc
* vector from an Indexset. Note that
* @p local must be contiguous and
void reinit (const Vector &v,
const bool fast = false);
- /**
+ /**
* Reinit as a ghosted vector. See
* constructor with same signature
* for more details.
void ratio (const VectorBase &a,
const VectorBase &b);
- /**
+ /**
* Updates the ghost values of this
* vector. This is necessary after any
* modification before reading ghost
*/
bool ghosted;
- /**
+ /**
* This vector contains the global
* indices of the ghost values. The
* location in this vector denotes the
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
/**
* Communciate rows in a compressed
- * sparsity pattern over MPI. The parameter
- * @p csp is modified inline. All entries
- * in rows that belong to a different
- * processor are send to them and added
- * there. The ownership is determined by
- * the parameter @p rows_per_cpu. The
- * IndexSet @p myrange should be the one
- * used in the constructor of the
+ * sparsity pattern over MPI.
+ *
+ * @param csp is the sparsity
+ * pattern that has been built
+ * locally and for which we need to
+ * exchange entries with other
+ * processors to make sure that
+ * each processor knows all the
+ * elements of the rows of a matrix
+ * it stores and that may
+ * eventually be written to. This
+ * sparsity pattern will be changes
+ * as a result of this function:
+ * All entries in rows that belong
+ * to a different processor are
+ * sent to them and added there.
+ *
+ * @param rows_per_cpu determines ownership of rows.
+ *
+ * @param mpi_comm is the MPI
+ * communicator that is shared
+ * between the processors that all
+ * participate in this operation.
+ *
+ * @param myrange indicates the
+ * range of elements stored locally
+ * and should be the one used in
+ * the constructor of the
* CompressedSimpleSparsityPattern. Only
- * the rows in @p csp contained in @p
- * myrange are checked for transfer. This
- * function needs to be used with
- * PETScWrappers::MPI::SparseMatrix for it
- * to work correctly in a parallel
- * calculation.
+ * rows contained in myrange are
+ * checked in csp for transfer.
+ * This function needs to be used
+ * with
+ * PETScWrappers::MPI::SparseMatrix
+ * for it to work correctly in a
+ * parallel computation.
*/
template <class CSP_t>
void distribute_sparsity_pattern(CSP_t & csp,
const IndexSet & myrange);
#endif
-
+
/**
* Exception
*/
// the content -- this is what we need
// since we're going to overwrite that
// anyway in the vmult operation.
- temp_vector.reinit(dst, true);
+ // TODO[TH]: workaround, because fast is
+ // not reliable with MPI right now.
+ temp_vector.reinit(dst, false);
vmult (temp_vector, src);
dst += temp_vector;
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008, 2009 by the deal.II authors
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* to avoid using the wrong data, you need to call Vector::compress()
* before you actually use the vectors.
*
- * <h3>Parallel communication model</h3>
+ * <h3>Parallel communication model</h3>
*
* The parallel functionality of Trilinos is built on top of the Message
* Passing Interface (MPI). MPI's communication model is built on
#ifdef DEAL_II_USE_TRILINOS
+#include <base/utilities.h>
# include <base/std_cxx1x/shared_ptr.h>
# include <base/subscriptor.h>
# include <lac/exceptions.h>
/**
* Estimate for the memory
- * consumption (not implemented
- * for this class).
+ * consumption in bytes.
*/
unsigned int memory_consumption () const;
//@}
void
VectorBase::compress (const Epetra_CombineMode given_last_action)
{
+ Epetra_CombineMode mode = (last_action != Zero) ?
+ last_action : given_last_action;
+ #ifdef DEBUG
+ #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ // check that every process has decided
+ // to use the same mode. This will
+ // otherwise result in undefined
+ // behaviour in the call to
+ // GlobalAssemble().
+ double double_mode = mode;
+ struct Utilities::System::MinMaxAvg result;
+ Utilities::System::calculate_collective_mpi_min_max_avg(
+ dynamic_cast<const Epetra_MpiComm*>(&vector_partitioner().Comm())->GetMpiComm(),
+ double_mode,
+ result);
+ Assert(result.max-result.min<1e-5, ExcTrilinosError(0));
+
+ #endif
+ #endif
+
// Now pass over the information about
// what we did last to the vector.
- const int ierr = vector->GlobalAssemble(last_action != Zero ?
- last_action :
- given_last_action);
+ const int ierr = vector->GlobalAssemble(mode);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
last_action = Zero;
const unsigned int *indices,
const TrilinosScalar *values)
{
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
+ Assert (vector->Map().UniqueGIDs()==true,
+ ExcGhostsPresent());
+
if (last_action == Add)
vector->GlobalAssemble(Add);
const unsigned int *indices,
const TrilinosScalar *values)
{
+ // if we have ghost values, do not allow
+ // writing to this vector at all.
+ Assert (vector->Map().UniqueGIDs()==true,
+ ExcGhostsPresent());
+
if (last_action != Add)
{
if (last_action == Insert)
}
+template <>
+void
+BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::print(std::ostream& out) const
+{
+ unsigned int k=0;
+ for (unsigned int ib=0;ib<n_block_rows();++ib)
+ {
+ for (unsigned int i=0;i<block(ib,0).n_rows();++i)
+ {
+ out << '[' << i+k;
+ unsigned int l=0;
+ for (unsigned int jb=0;jb<n_block_cols();++jb)
+ {
+ const CompressedSimpleSparsityPattern & b = block(ib,jb);
+ if (b.row_index_set().size()==0 || b.row_index_set().is_element(i))
+ for (unsigned int j=0;j<b.n_cols();++j)
+ if (b.exists(i,j))
+ out << ',' << l+j;
+ l += b.n_cols();
+ }
+ out << ']' << std::endl;
+ }
+ k += block(ib,0).n_rows();
+ }
+}
+
+
template <class SparsityPatternBase>
void
BlockSparsityPatternBase<SparsityPatternBase>::print_gnuplot(std::ostream &out) const
}
+BlockCompressedSimpleSparsityPattern::
+BlockCompressedSimpleSparsityPattern (const std::vector<IndexSet> & partitioning)
+ :
+ BlockSparsityPatternBase<CompressedSimpleSparsityPattern>(partitioning.size(),
+ partitioning.size())
+{
+ for (unsigned int i=0;i<partitioning.size();++i)
+ for (unsigned int j=0;j<partitioning.size();++j)
+ this->block(i,j).reinit(partitioning[i].size(),
+ partitioning[j].size(),
+ partitioning[i]);
+ this->collect_sizes();
+}
+
+
void
BlockCompressedSimpleSparsityPattern::reinit (
this->collect_sizes();
}
+void
+BlockCompressedSimpleSparsityPattern::reinit (
+ const std::vector< IndexSet > & partitioning)
+{
+ BlockSparsityPatternBase<CompressedSimpleSparsityPattern>::
+ reinit(partitioning.size(), partitioning.size());
+ for (unsigned int i=0;i<partitioning.size();++i)
+ for (unsigned int j=0;j<partitioning.size();++j)
+ this->block(i,j).reinit(partitioning[i].size(),
+ partitioning[j].size(),
+ partitioning[i]);
+ this->collect_sizes();
+}
#ifdef DEAL_II_USE_TRILINOS
cols = n;
rowset=rowset_;
+ Assert(rowset.size()==0 || rowset.size() == m, ExcInvalidConstructorCall());
+
std::vector<Line> new_lines (rowset.size()==0 ? rows : rowset.n_elements());
lines.swap (new_lines);
}
vec(it->line) = new_value;
}
- vec.compress ();
+ // some processes might not apply
+ // constraints, so we need to explicitly
+ // state, that the others are doing an
+ // insert here:
+ vec.compress (Insert);
}
}
}
+ // force every processor to write something
+ unsigned int idx = vec.block(0).local_range().first;
+ vec(idx) = vec(idx);
vec.compress ();
}
//see below)
const unsigned int
- local_col_end = local_col_start + local_columns_per_process[this_process];
-
+ local_col_end = local_col_start + local_columns_per_process[this_process];
+
// then count the elements in- and
// out-of-window for the rows we own
#ifdef PETSC_USE_64BIT_INDICES
0, &row_lengths_out_of_window[0],
&matrix);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-
+
#else //PETSC_VERSION>=2.3.3
// new version to create the matrix. We
// do not set row length but set the
// now copy over the information
// from the sparsity pattern.
- {
+ {
#ifdef PETSC_USE_64BIT_INDICES
PetscInt
#else
std::copy(row_start, row_end, ptr);
ptr += row_end - row_start;
- }
+ }
}
-
+
// then call the petsc function
// that summarily allocates these
// this is only needed for old
// PETSc versions:
- // for some reason, it does not
+ // for some reason, it does not
// seem to be possible to force
// actual allocation of actual
// entries by using the last
#else
ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-#endif
+#endif
#else
int ierr;
#ifdef DEBUG
VectorBase::operator = (v);
}
-
+
Vector::Vector (const MPI_Comm &communicator,
const IndexSet & local,
}
-
+
void
Vector::reinit (const MPI_Comm &comm,
const IndexSet & local,
#endif
- }
+ }
-}
+ }
}
#ifdef DEAL_II_USE_PETSC
+# include <base/memory_consumption.h>
# include <lac/petsc_vector.h>
# include <lac/petsc_parallel_vector.h>
# include <cmath>
if (dynamic_cast<const PETScWrappers::Vector *>(&vector) != 0)
{
#if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
- PetscScalar *ptr;
+ PetscScalar *ptr;
int ierr
= VecGetArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
}
-
unsigned int
VectorBase::memory_consumption () const
- {
- AssertThrow(false, ExcNotImplemented() );
- return 0;
+ {
+ unsigned int mem = sizeof(Vec)+sizeof(LastAction::Values)
+ +MemoryConsumption::memory_consumption(ghosted)
+ +MemoryConsumption::memory_consumption(ghost_indices);
+
+ // TH: I am relatively sure that PETSc is
+ // storing the local data in a contigious
+ // block without indices:
+ mem += local_size()*sizeof(PetscScalar);
+ // assume that PETSc is storing one index
+ // and one double per ghost element
+ if (ghosted)
+ mem += ghost_indices.n_elements()*(sizeof(PetscScalar)+sizeof(int));
+
+ //TODO[TH]: size of constant memory for PETSc?
+ return mem;
}
AssertThrow (ierr == 0, ExcPETScError(ierr));
ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-}
-
+ }
+
}
{
row_idx+=rows_per_cpu[myid]-1;
continue;
-}
+ }
unsigned int rlen = csp.row_length(row);
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008 by the deal.II authors
+// Copyright (C) 2008, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
//---------------------------------------------------------------------------
+#include <base/memory_consumption.h>
#include <lac/trilinos_vector_base.h>
#ifdef DEAL_II_USE_TRILINOS
unsigned int
VectorBase::memory_consumption () const
{
- AssertThrow(false, ExcNotImplemented() );
- return 0;
+ //TODO[TH]: No accurate memory
+ //consumption for Trilinos vectors
+ //yet. This is a rough approximation with
+ //one index and the value per local
+ //entry.
+ return sizeof(*this)
+ + this->local_size()*( sizeof(double)+sizeof(int) );
}
} /* end of namespace TrilinosWrappers */