From: heltai Date: Thu, 26 Dec 2013 19:05:08 +0000 (+0000) Subject: Merged from trunk. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2ed10e44bb4e2c03abf789738740a091a0eae8ec;p=dealii-svn.git Merged from trunk. git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@32125 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/CMakeLists.txt b/deal.II/CMakeLists.txt index 8ae7818a1e..c48544e743 100644 --- a/deal.II/CMakeLists.txt +++ b/deal.II/CMakeLists.txt @@ -76,8 +76,7 @@ INCLUDE(setup_cached_variables) # # Now, set the project and set up the rest: # -PROJECT(deal.II CXX) -ENABLE_LANGUAGE_OPTIONAL(C) +PROJECT(deal.II CXX C) ENABLE_LANGUAGE_OPTIONAL(Fortran) INCLUDE(setup_deal_ii) diff --git a/deal.II/bundled/CMakeLists.txt b/deal.II/bundled/CMakeLists.txt index 30e62c7d3f..c2ba25e553 100644 --- a/deal.II/bundled/CMakeLists.txt +++ b/deal.II/bundled/CMakeLists.txt @@ -18,7 +18,7 @@ # Compile and install enabled bundled features: # -MESSAGE(STATUS "Setup bundled features") +MESSAGE(STATUS "Setting up bundled features") IF(FEATURE_BOOST_BUNDLED_CONFIGURED) @@ -33,15 +33,9 @@ IF(FEATURE_BOOST_BUNDLED_CONFIGURED) ) ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/serialization/src) + ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/iostreams/src) - IF(DEAL_II_WITH_THREADS AND NOT DEAL_II_USE_CXX11) - # - # If the C++ compiler doesn't completely support the C++11 standard - # (and consequently we can't use std::thread, std::mutex, etc), then - # include all the files that form BOOST's thread implementation so that - # we don't have to build BOOST itself only to get at this small part of - # it. it also ensures that we use the correct compiler and flags - # + IF(DEAL_II_WITH_THREADS AND NOT DEAL_II_WITH_CXX11) ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/thread/src) ENDIF() ENDIF() @@ -74,4 +68,4 @@ IF(FEATURE_UMFPACK_BUNDLED_CONFIGURED) ENDIF() -MESSAGE(STATUS "Setup bundled features - Done") +MESSAGE(STATUS "Setting up bundled features - Done") diff --git a/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp b/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp index a6ad99cae4..1f60f10b94 100644 --- a/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp +++ b/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp @@ -39,7 +39,7 @@ namespace boost // should only be used by deal.II and dependent projects... // // - Maier, 2013 -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 template struct weak_ptr_traits > { typedef std::shared_ptr shared_type; @@ -54,7 +54,7 @@ namespace boost typedef boost::weak_ptr weak_type; }; // as above -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 template struct shared_ptr_traits > { typedef std::weak_ptr weak_type; diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt new file mode 100644 index 0000000000..07e336c8cc --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/CMakeLists.txt @@ -0,0 +1,43 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +SET(src_boost_iostreams + file_descriptor.cpp + gzip.cpp + mapped_file.cpp + ) + +IF(DEAL_II_WITH_ZLIB) + SET(src_boost_iostreams + ${src_boost_iostreams} + zlib.cpp + ) +ELSE() + MESSAGE(STATUS "BOOST::Iostreams will not support gzipped streams because libz or its header files could not be found") +ENDIF() + +IF(DEALII_WITH_BZIP2) + SET(src_boost_iostreams + ${src_boost_iostreams} + bzip2.cpp + ) +ELSE() + MESSAGE(STATUS "BOOST::Iostreams will not support bz2'ed streams because libbz2 or its header files could not be found") +ENDIF() + + +DEAL_II_ADD_LIBRARY(obj_boost_iostreams OBJECT ${src_boost_iostreams}) + diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/bzip2.cpp b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/bzip2.cpp new file mode 100644 index 0000000000..7e4274919b --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/bzip2.cpp @@ -0,0 +1,173 @@ +// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) +// (C) Copyright 2003-2007 Jonathan Turkanis +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) + +// See http://www.boost.org/libs/iostreams for documentation. + +// To configure Boost to work with libbz2, see the +// installation instructions here: +// http://boost.org/libs/iostreams/doc/index.html?path=7 + +// Define BOOST_IOSTREAMS_SOURCE so that +// knows that we are building the library (possibly exporting code), rather +// than using it (possibly importing code). +#define BOOST_IOSTREAMS_SOURCE + +#include +#include +#include +#include "bzlib.h" // Julian Seward's "bzip.h" header. + // To configure Boost to work with libbz2, see the + // installation instructions here: + // http://boost.org/libs/iostreams/doc/index.html?path=7 + +namespace boost { namespace iostreams { + +namespace bzip2 { + + // Status codes + +const int ok = BZ_OK; +const int run_ok = BZ_RUN_OK; +const int flush_ok = BZ_FLUSH_OK; +const int finish_ok = BZ_FINISH_OK; +const int stream_end = BZ_STREAM_END; +const int sequence_error = BZ_SEQUENCE_ERROR; +const int param_error = BZ_PARAM_ERROR; +const int mem_error = BZ_MEM_ERROR; +const int data_error = BZ_DATA_ERROR; +const int data_error_magic = BZ_DATA_ERROR_MAGIC; +const int io_error = BZ_IO_ERROR; +const int unexpected_eof = BZ_UNEXPECTED_EOF; +const int outbuff_full = BZ_OUTBUFF_FULL; +const int config_error = BZ_CONFIG_ERROR; + + // Action codes + +const int finish = BZ_FINISH; +const int run = BZ_RUN; + +} // End namespace bzip2. + +//------------------Implementation of bzip2_error-----------------------------// + +bzip2_error::bzip2_error(int error) + : BOOST_IOSTREAMS_FAILURE("bzip2 error"), error_(error) + { } + +void bzip2_error::check BOOST_PREVENT_MACRO_SUBSTITUTION(int error) +{ + switch (error) { + case BZ_OK: + case BZ_RUN_OK: + case BZ_FLUSH_OK: + case BZ_FINISH_OK: + case BZ_STREAM_END: + return; + case BZ_MEM_ERROR: + boost::throw_exception(std::bad_alloc()); + default: + boost::throw_exception(bzip2_error(error)); + } +} + +//------------------Implementation of bzip2_base------------------------------// + +namespace detail { + +bzip2_base::bzip2_base(const bzip2_params& params) + : params_(params), stream_(new bz_stream), ready_(false) + { } + +bzip2_base::~bzip2_base() { delete static_cast(stream_); } + +void bzip2_base::before( const char*& src_begin, const char* src_end, + char*& dest_begin, char* dest_end ) +{ + bz_stream* s = static_cast(stream_); + s->next_in = const_cast(src_begin); + s->avail_in = static_cast(src_end - src_begin); + s->next_out = reinterpret_cast(dest_begin); + s->avail_out= static_cast(dest_end - dest_begin); +} + +void bzip2_base::after(const char*& src_begin, char*& dest_begin) +{ + bz_stream* s = static_cast(stream_); + src_begin = const_cast(s->next_in); + dest_begin = s->next_out; +} + +int bzip2_base::check_end(const char* src_begin, const char* dest_begin) +{ + bz_stream* s = static_cast(stream_); + if( src_begin == s->next_in && + s->avail_in == 0 && + dest_begin == s->next_out) { + return bzip2::unexpected_eof; + } else { + return bzip2::ok; + } +} + +void bzip2_base::end(bool compress) +{ + if(!ready_) return; + ready_ = false; + bz_stream* s = static_cast(stream_); + bzip2_error::check BOOST_PREVENT_MACRO_SUBSTITUTION( + compress ? + BZ2_bzCompressEnd(s) : + BZ2_bzDecompressEnd(s) + ); +} + +int bzip2_base::compress(int action) +{ + return BZ2_bzCompress(static_cast(stream_), action); +} + +int bzip2_base::decompress() +{ + return BZ2_bzDecompress(static_cast(stream_)); +} + +void bzip2_base::do_init + ( bool compress, + #if !BOOST_WORKAROUND(BOOST_MSVC, < 1300) + bzip2::alloc_func /* alloc */, + bzip2::free_func /* free */, + #endif + void* derived ) +{ + bz_stream* s = static_cast(stream_); + + // Current interface for customizing memory management + // is non-conforming and has been disabled: + //#if !BOOST_WORKAROUND(BOOST_MSVC, < 1300) + // s->bzalloc = alloc; + // s->bzfree = free; + //#else + s->bzalloc = 0; + s->bzfree = 0; + //#endif + s->opaque = derived; + bzip2_error::check BOOST_PREVENT_MACRO_SUBSTITUTION( + compress ? + BZ2_bzCompressInit( s, + params_.block_size, + 0, + params_.work_factor ) : + BZ2_bzDecompressInit( s, + 0, + params_.small ) + ); + ready_ = true; +} + +} // End namespace detail. + +//----------------------------------------------------------------------------// + +} } // End namespaces iostreams, boost. diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/file_descriptor.cpp b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/file_descriptor.cpp new file mode 100644 index 0000000000..c1af69527a --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/file_descriptor.cpp @@ -0,0 +1,593 @@ +// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) +// (C) Copyright 2003-2007 Jonathan Turkanis +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) + +// See http://www.boost.org/libs/iostreams for documentation. + +// Define BOOST_IOSTREAMS_SOURCE so that +// knows that we are building the library (possibly exporting code), rather +// than using it (possibly importing code). +#define BOOST_IOSTREAMS_SOURCE + +#include +#include +#include // SEEK_SET, etc. +#include // BOOST_JOIN +#include +#include +#include // BOOST_IOSTREAMS_FD_XXX +#include +#include +#include // openmodes, failure. +#include +#include +#include + + // OS-specific headers for low-level i/o. + +#include // file opening flags. +#include // file access permissions. +#ifdef BOOST_IOSTREAMS_WINDOWS +# include // low-level file i/o. +# define WINDOWS_LEAN_AND_MEAN +# include +# ifndef INVALID_SET_FILE_POINTER +# define INVALID_SET_FILE_POINTER ((DWORD)-1) +# endif +#else +# include // mode_t. +# include // low-level file i/o. +#endif + +namespace boost { namespace iostreams { + +//------------------Definition of file_descriptor_impl------------------------// + +namespace detail { + +// Contains the platform dependant implementation +struct file_descriptor_impl { + // Note: These need to match file_desciptor_flags + enum flags { + never_close = 0, + close_on_exit = 1, + close_on_close = 2, + close_always = 3 + }; + + file_descriptor_impl(); + ~file_descriptor_impl(); + void open(file_handle fd, flags); +#ifdef BOOST_IOSTREAMS_WINDOWS + void open(int fd, flags); +#endif + void open(const detail::path&, BOOST_IOS::openmode); + bool is_open() const; + void close(); + void close_impl(bool close_flag, bool throw_); + std::streamsize read(char* s, std::streamsize n); + std::streamsize write(const char* s, std::streamsize n); + std::streampos seek(stream_offset off, BOOST_IOS::seekdir way); + static file_handle invalid_handle(); + file_handle handle_; + int flags_; +}; + +//------------------Implementation of file_descriptor_impl--------------------// + +file_descriptor_impl::file_descriptor_impl() + : handle_(invalid_handle()), flags_(0) + { } + +file_descriptor_impl::~file_descriptor_impl() +{ + close_impl(flags_ & close_on_exit, false); +} + +void file_descriptor_impl::open(file_handle fd, flags f) +{ + // Using 'close' to close the existing handle so that it will throw an + // exception if it fails. + // + // Only closing after assigning the new handle, so that the class will + // take ownership of the handle regardless of whether close throws. + + file_descriptor_impl tmp; + tmp.handle_ = handle_; + tmp.flags_ = flags_ & close_on_exit ? close_on_close : never_close; + + handle_ = fd; + flags_ = f; + + tmp.close(); +} + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +void file_descriptor_impl::open(int fd, flags f) +{ open(reinterpret_cast(_get_osfhandle(fd)), f); } + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +void file_descriptor_impl::open(const detail::path& p, BOOST_IOS::openmode mode) +{ + close_impl(flags_ & close_on_exit, true); + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + DWORD dwDesiredAccess; + DWORD dwCreationDisposition; + if ( (mode & (BOOST_IOS::in | BOOST_IOS::out)) + == + (BOOST_IOS::in | BOOST_IOS::out) ) + { + if (mode & BOOST_IOS::app) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + dwCreationDisposition = + (mode & BOOST_IOS::trunc) ? + CREATE_ALWAYS : + OPEN_EXISTING; + } else if (mode & BOOST_IOS::in) { + if (mode & (BOOST_IOS::app | BOOST_IOS::trunc)) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + dwDesiredAccess = GENERIC_READ; + dwCreationDisposition = OPEN_EXISTING; + } else if (mode & BOOST_IOS::out) { + if ( (mode & (BOOST_IOS::app | BOOST_IOS::trunc)) + == + (BOOST_IOS::app | BOOST_IOS::trunc) ) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + if (mode & BOOST_IOS::app) { + dwCreationDisposition = OPEN_ALWAYS; + dwDesiredAccess = + FILE_APPEND_DATA | + FILE_WRITE_ATTRIBUTES | + FILE_WRITE_EA | + STANDARD_RIGHTS_WRITE | + SYNCHRONIZE; + } else { + dwDesiredAccess = GENERIC_WRITE; + dwCreationDisposition = CREATE_ALWAYS; + } + } else { + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + } + + HANDLE handle = p.is_wide() ? + ::CreateFileW( p.c_wstr(), + dwDesiredAccess, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, // lpSecurityAttributes + dwCreationDisposition, + FILE_ATTRIBUTE_NORMAL, + NULL ) : // hTemplateFile + ::CreateFileA( p.c_str(), + dwDesiredAccess, + FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, // lpSecurityAttributes + dwCreationDisposition, + FILE_ATTRIBUTE_NORMAL, + NULL ); // hTemplateFile + if (handle != INVALID_HANDLE_VALUE) { + handle_ = handle; + flags_ = close_always; + } else { + flags_ = 0; + throw_system_failure("failed opening file"); + } +#else // #ifdef BOOST_IOSTREAMS_WINDOWS //------------------------------------// + + // Calculate oflag argument to open. + + int oflag = 0; + if ( (mode & (BOOST_IOS::in | BOOST_IOS::out)) + == + (BOOST_IOS::in | BOOST_IOS::out) ) + { + if( mode & BOOST_IOS::app ) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + oflag |= O_RDWR; + if( mode & BOOST_IOS::trunc ) { + oflag |= O_TRUNC; + oflag |= O_CREAT; + } + } else if (mode & BOOST_IOS::in) { + if( mode & (BOOST_IOS::app | BOOST_IOS::trunc) ) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + oflag |= O_RDONLY; + } else if (mode & BOOST_IOS::out) { + if( (mode & (BOOST_IOS::app | BOOST_IOS::trunc)) + == + (BOOST_IOS::app | BOOST_IOS::trunc) ) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + oflag |= O_WRONLY; + if (mode & BOOST_IOS::app) + oflag |= O_APPEND; + else { + oflag |= O_CREAT; + oflag |= O_TRUNC; + } + } else { + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad open mode")); + } + #ifdef _LARGEFILE64_SOURCE + oflag |= O_LARGEFILE; + #endif + + // Calculate pmode argument to open. + + mode_t pmode = S_IRUSR | S_IWUSR | + S_IRGRP | S_IWGRP | + S_IROTH | S_IWOTH; + + // Open file. + + int fd = BOOST_IOSTREAMS_FD_OPEN(p.c_str(), oflag, pmode); + if (fd == -1) { + boost::throw_exception(system_failure("failed opening file")); + } else { + handle_ = fd; + flags_ = close_always; + } +#endif // #ifndef BOOST_IOSTREAMS_WINDOWS //----------------------------------// +} + +bool file_descriptor_impl::is_open() const +{ return handle_ != invalid_handle(); } + +void file_descriptor_impl::close() +{ + close_impl(flags_ & close_on_close, true); +} + +void file_descriptor_impl::close_impl(bool close_flag, bool throw_) { + if (handle_ != invalid_handle()) { + if (close_flag) { + bool success = + #ifdef BOOST_IOSTREAMS_WINDOWS + ::CloseHandle(handle_) == 1; + #else + BOOST_IOSTREAMS_FD_CLOSE(handle_) != -1; + #endif + if (!success && throw_) + throw_system_failure("failed closing file"); + } + handle_ = invalid_handle(); + flags_ = 0; + } +} + +std::streamsize file_descriptor_impl::read(char* s, std::streamsize n) +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + DWORD result; + if (!::ReadFile(handle_, s, n, &result, NULL)) + throw_system_failure("failed reading"); + return result == 0 ? -1 : static_cast(result); +#else // #ifdef BOOST_IOSTREAMS_WINDOWS + errno = 0; + std::streamsize result = BOOST_IOSTREAMS_FD_READ(handle_, s, n); + if (errno != 0) + throw_system_failure("failed reading"); + return result == 0 ? -1 : result; +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS +} + +std::streamsize file_descriptor_impl::write(const char* s, std::streamsize n) +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + DWORD ignore; + if (!::WriteFile(handle_, s, n, &ignore, NULL)) + throw_system_failure("failed writing"); + return n; +#else // #ifdef BOOST_IOSTREAMS_WINDOWS + int amt = BOOST_IOSTREAMS_FD_WRITE(handle_, s, n); + if (amt < n) // Handles blocking fd's only. + throw_system_failure("failed writing"); + return n; +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS +} + +std::streampos file_descriptor_impl::seek + (stream_offset off, BOOST_IOS::seekdir way) +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + LONG lDistanceToMove = static_cast(off & 0xffffffff); + LONG lDistanceToMoveHigh = static_cast(off >> 32); + DWORD dwResultLow = + ::SetFilePointer( handle_, + lDistanceToMove, + &lDistanceToMoveHigh, + way == BOOST_IOS::beg ? + FILE_BEGIN : + way == BOOST_IOS::cur ? + FILE_CURRENT : + FILE_END ); + if ( dwResultLow == INVALID_SET_FILE_POINTER && + ::GetLastError() != NO_ERROR ) + { + boost::throw_exception(system_failure("failed seeking")); + } else { + return offset_to_position( + (stream_offset(lDistanceToMoveHigh) << 32) + dwResultLow + ); + } +#else // #ifdef BOOST_IOSTREAMS_WINDOWS + if ( off > integer_traits::const_max || + off < integer_traits::const_min ) + { + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("bad offset")); + } + stream_offset result = + BOOST_IOSTREAMS_FD_SEEK( + handle_, + static_cast(off), + ( way == BOOST_IOS::beg ? + SEEK_SET : + way == BOOST_IOS::cur ? + SEEK_CUR : + SEEK_END ) + ); + if (result == -1) + boost::throw_exception(system_failure("failed seeking")); + return offset_to_position(result); +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS +} + +// Returns the value stored in a file_handle variable when no file is open +file_handle file_descriptor_impl::invalid_handle() +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + return INVALID_HANDLE_VALUE; +#else + return -1; +#endif +} + +} // End namespace detail. + +//------------------Implementation of file_descriptor-------------------------// + +file_descriptor::file_descriptor() : pimpl_(new impl_type) { } + +file_descriptor::file_descriptor(handle_type fd, file_descriptor_flags f) + : pimpl_(new impl_type) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor::file_descriptor(handle_type fd, bool close_on_exit) + : pimpl_(new impl_type) +{ open(fd, close_on_exit); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +file_descriptor::file_descriptor(int fd, file_descriptor_flags f) + : pimpl_(new impl_type) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor::file_descriptor(int fd, bool close_on_exit) + : pimpl_(new impl_type) +{ open(fd, close_on_exit); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +file_descriptor::file_descriptor( const std::string& path, + BOOST_IOS::openmode mode ) + : pimpl_(new impl_type) +{ open(path, mode); } + +file_descriptor::file_descriptor( const char* path, + BOOST_IOS::openmode mode ) + : pimpl_(new impl_type) +{ open(path, mode); } + +file_descriptor::file_descriptor(const file_descriptor& other) + : pimpl_(other.pimpl_) + { } + +void file_descriptor::open(handle_type fd, file_descriptor_flags f) +{ pimpl_->open(fd, static_cast(f)); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor::open(handle_type fd, bool close_on_exit) +{ pimpl_->open(fd, close_on_exit ? + detail::file_descriptor_impl::close_always : + detail::file_descriptor_impl::close_on_close); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +void file_descriptor::open(int fd, file_descriptor_flags f) +{ pimpl_->open(fd, static_cast(f)); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor::open(int fd, bool close_on_exit) +{ pimpl_->open(fd, close_on_exit ? + detail::file_descriptor_impl::close_always : + detail::file_descriptor_impl::close_on_close); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +void file_descriptor::open(const std::string& path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +void file_descriptor::open(const char* path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +bool file_descriptor::is_open() const { return pimpl_->is_open(); } + +void file_descriptor::close() { pimpl_->close(); } + +std::streamsize file_descriptor::read(char_type* s, std::streamsize n) +{ return pimpl_->read(s, n); } + +std::streamsize file_descriptor::write(const char_type* s, std::streamsize n) +{ return pimpl_->write(s, n); } + +std::streampos file_descriptor::seek(stream_offset off, BOOST_IOS::seekdir way) +{ return pimpl_->seek(off, way); } + +detail::file_handle file_descriptor::handle() const { return pimpl_->handle_; } + +void file_descriptor::init() { pimpl_.reset(new impl_type); } + +void file_descriptor::open( + const detail::path& path, + BOOST_IOS::openmode mode, + BOOST_IOS::openmode base ) +{ + mode |= base; + pimpl_->open(path, mode); +} + +//------------------Implementation of file_descriptor_source------------------// + +file_descriptor_source::file_descriptor_source( + handle_type fd, file_descriptor_flags f) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor_source::file_descriptor_source( + handle_type fd, bool close_on_exit) +{ open(fd, close_on_exit); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +file_descriptor_source::file_descriptor_source(int fd, file_descriptor_flags f) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor_source::file_descriptor_source(int fd, bool close_on_exit) +{ open(fd, close_on_exit); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +file_descriptor_source::file_descriptor_source( + const std::string& path, BOOST_IOS::openmode mode) +{ open(path, mode); } + +file_descriptor_source::file_descriptor_source( + const char* path, BOOST_IOS::openmode mode) +{ open(path, mode); } + +file_descriptor_source::file_descriptor_source( + const file_descriptor_source& other) + : file_descriptor(static_cast(other)) + { } + +void file_descriptor_source::open(handle_type fd, file_descriptor_flags f) +{ file_descriptor::open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor_source::open(handle_type fd, bool close_on_exit) +{ file_descriptor::open(fd, close_on_exit); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +void file_descriptor_source::open(int fd, file_descriptor_flags f) +{ file_descriptor::open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor_source::open(int fd, bool close_on_exit) +{ file_descriptor::open(fd, close_on_exit); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +void file_descriptor_source::open( + const std::string& path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +void file_descriptor_source::open( + const char* path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +void file_descriptor_source::open( + const detail::path& path, BOOST_IOS::openmode mode) +{ + if (mode & (BOOST_IOS::out | BOOST_IOS::app | BOOST_IOS::trunc)) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("invalid mode")); + file_descriptor::open(path, mode, BOOST_IOS::in); +} + +//------------------Implementation of file_descriptor_sink--------------------// + +file_descriptor_sink::file_descriptor_sink( + handle_type fd, file_descriptor_flags f) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor_sink::file_descriptor_sink( + handle_type fd, bool close_on_exit) +{ open(fd, close_on_exit); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +file_descriptor_sink::file_descriptor_sink(int fd, file_descriptor_flags f) +{ open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +file_descriptor_sink::file_descriptor_sink(int fd, bool close_on_exit) +{ open(fd, close_on_exit); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +file_descriptor_sink::file_descriptor_sink( + const std::string& path, BOOST_IOS::openmode mode) +{ open(path, mode); } + +file_descriptor_sink::file_descriptor_sink( + const char* path, BOOST_IOS::openmode mode) +{ open(path, mode); } + +file_descriptor_sink::file_descriptor_sink(const file_descriptor_sink& other) + : file_descriptor(static_cast(other)) + { } + +void file_descriptor_sink::open(handle_type fd, file_descriptor_flags f) +{ file_descriptor::open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor_sink::open(handle_type fd, bool close_on_exit) +{ file_descriptor::open(fd, close_on_exit); } +#endif + +#ifdef BOOST_IOSTREAMS_WINDOWS //---------------------------------------------// + +void file_descriptor_sink::open(int fd, file_descriptor_flags f) +{ file_descriptor::open(fd, f); } + +#if defined(BOOST_IOSTREAMS_USE_DEPRECATED) +void file_descriptor_sink::open(int fd, bool close_on_exit) +{ file_descriptor::open(fd, close_on_exit); } +#endif + +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS //-----------------------------------// + +void file_descriptor_sink::open( + const std::string& path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +void file_descriptor_sink::open( + const char* path, BOOST_IOS::openmode mode) +{ open(detail::path(path), mode); } + +void file_descriptor_sink::open( + const detail::path& path, BOOST_IOS::openmode mode) +{ + if (mode & BOOST_IOS::in) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("invalid mode")); + file_descriptor::open(path, mode, BOOST_IOS::out); +} + +} } // End namespaces iostreams, boost. diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/gzip.cpp b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/gzip.cpp new file mode 100644 index 0000000000..04cb71a54d --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/gzip.cpp @@ -0,0 +1,174 @@ +// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) +// (C) Copyright 2003-2007 Jonathan Turkanis +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) + +// See http://www.boost.org/libs/iostreams for documentation. + +// To configure Boost to work with libbz2, see the +// installation instructions here: +// http://boost.org/libs/iostreams/doc/index.html?path=7 + +// Define BOOST_IOSTREAMS_SOURCE so that +// knows that we are building the library (possibly exporting code), rather +// than using it (possibly importing code). +#define BOOST_IOSTREAMS_SOURCE + +#include +#include +#include + +namespace boost { namespace iostreams { + +//------------------Implementation of gzip_header-----------------------------// + +namespace detail { + +void gzip_header::process(char c) +{ + uint8_t value = static_cast(c); + switch (state_) { + case s_id1: + if (value != gzip::magic::id1) + boost::throw_exception(gzip_error(gzip::bad_header)); + state_ = s_id2; + break; + case s_id2: + if (value != gzip::magic::id2) + boost::throw_exception(gzip_error(gzip::bad_header)); + state_ = s_cm; + break; + case s_cm: + if (value != gzip::method::deflate) + boost::throw_exception(gzip_error(gzip::bad_method)); + state_ = s_flg; + break; + case s_flg: + flags_ = value; + state_ = s_mtime; + break; + case s_mtime: + mtime_ += value << (offset_ * 8); + if (offset_ == 3) { + state_ = s_xfl; + offset_ = 0; + } else { + ++offset_; + } + break; + case s_xfl: + state_ = s_os; + break; + case s_os: + os_ = value; + if (flags_ & gzip::flags::extra) { + state_ = s_extra; + } else if (flags_ & gzip::flags::name) { + state_ = s_name; + } else if (flags_ & gzip::flags::comment) { + state_ = s_comment; + } else if (flags_ & gzip::flags::header_crc) { + state_ = s_hcrc; + } else { + state_ = s_done; + } + break; + case s_xlen: + xlen_ += value << (offset_ * 8); + if (offset_ == 1) { + state_ = s_extra; + offset_ = 0; + } else { + ++offset_; + } + break; + case s_extra: + if (--xlen_ == 0) { + if (flags_ & gzip::flags::name) { + state_ = s_name; + } else if (flags_ & gzip::flags::comment) { + state_ = s_comment; + } else if (flags_ & gzip::flags::header_crc) { + state_ = s_hcrc; + } else { + state_ = s_done; + } + } + break; + case s_name: + if (c != 0) { + file_name_ += c; + } else if (flags_ & gzip::flags::comment) { + state_ = s_comment; + } else if (flags_ & gzip::flags::header_crc) { + state_ = s_hcrc; + } else { + state_ = s_done; + } + break; + case s_comment: + if (c != 0) { + comment_ += c; + } else if (flags_ & gzip::flags::header_crc) { + state_ = s_hcrc; + } else { + state_ = s_done; + } + break; + case s_hcrc: + if (offset_ == 1) { + state_ = s_done; + offset_ = 0; + } else { + ++offset_; + } + break; + default: + BOOST_ASSERT(0); + } +} + +void gzip_header::reset() +{ + file_name_.clear(); + comment_.clear(); + os_ = flags_ = offset_ = xlen_ = 0; + mtime_ = 0; + state_ = s_id1; +} + +//------------------Implementation of gzip_footer-----------------------------// + +void gzip_footer::process(char c) +{ + uint8_t value = static_cast(c); + if (state_ == s_crc) { + crc_ += value << (offset_ * 8); + if (offset_ == 3) { + state_ = s_isize; + offset_ = 0; + } else { + ++offset_; + } + } else if (state_ == s_isize) { + isize_ += value << (offset_ * 8); + if (offset_ == 3) { + state_ = s_done; + offset_ = 0; + } else { + ++offset_; + } + } else { + BOOST_ASSERT(0); + } +} + +void gzip_footer::reset() +{ + crc_ = isize_ = offset_ = 0; + state_ = s_crc; +} + +} // End namespace boost::iostreams::detail. + +} } // End namespaces iostreams, boost. diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/mapped_file.cpp b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/mapped_file.cpp new file mode 100644 index 0000000000..59a942e9e5 --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/mapped_file.cpp @@ -0,0 +1,496 @@ +// (C) Copyright Craig Henderson 2002 'boost/memmap.hpp' from sandbox +// (C) Copyright Jonathan Turkanis 2004. +// (C) Copyright Jonathan Graehl 2004. +// (C) Copyright Jorge Lodos 2008. +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) + +// Define BOOST_IOSTREAMS_SOURCE so that +// knows that we are building the library (possibly exporting code), rather +// than using it (possibly importing code). +#define BOOST_IOSTREAMS_SOURCE + +#include +#include +#include +#include +#include +#include +#include + +#ifdef BOOST_IOSTREAMS_WINDOWS +# define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers +# include +#else +# include +# include +# include // mmap, munmap. +# include +# include // struct stat. +# include // sysconf. +#endif + +namespace boost { namespace iostreams { + +namespace detail { + +// Class containing the platform-sepecific implementation +// Invariant: The members params_, data_, size_, handle_ (and mapped_handle_ +// on Windows) either +// - all have default values (or INVALID_HANDLE_VALUE for +// Windows handles), or +// - all have values reflecting a successful mapping. +// In the first case, error_ may be true, reflecting a recent unsuccessful +// open or close attempt; in the second case, error_ is always false. +class mapped_file_impl { +public: + typedef mapped_file_source::size_type size_type; + typedef mapped_file_source::param_type param_type; + typedef mapped_file_source::mapmode mapmode; + BOOST_STATIC_CONSTANT( + size_type, max_length = mapped_file_source::max_length); + mapped_file_impl(); + ~mapped_file_impl(); + void open(param_type p); + bool is_open() const { return data_ != 0; } + void close(); + bool error() const { return error_; } + mapmode flags() const { return params_.flags; } + std::size_t size() const { return size_; } + char* data() const { return data_; } + void resize(stream_offset new_size); + static int alignment(); +private: + void open_file(param_type p); + void try_map_file(param_type p); + void map_file(param_type& p); + bool unmap_file(); + void clear(bool error); + void cleanup_and_throw(const char* msg); + param_type params_; + char* data_; + stream_offset size_; + file_handle handle_; +#ifdef BOOST_IOSTREAMS_WINDOWS + file_handle mapped_handle_; +#endif + bool error_; +}; + +mapped_file_impl::mapped_file_impl() { clear(false); } + +mapped_file_impl::~mapped_file_impl() +{ try { close(); } catch (...) { } } + +void mapped_file_impl::open(param_type p) +{ + if (is_open()) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("file already open")); + p.normalize(); + open_file(p); + map_file(p); // May modify p.hint + params_ = p; +} + +void mapped_file_impl::close() +{ + if (data_ == 0) + return; + bool error = false; + error = !unmap_file() || error; + error = + #ifdef BOOST_IOSTREAMS_WINDOWS + !::CloseHandle(handle_) + #else + ::close(handle_) != 0 + #endif + || error; + clear(error); + if (error) + throw_system_failure("failed closing mapped file"); +} + +void mapped_file_impl::resize(stream_offset new_size) +{ + if (!is_open()) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("file is closed")); + if (flags() & mapped_file::priv) + boost::throw_exception( + BOOST_IOSTREAMS_FAILURE("can't resize private mapped file") + ); + if (!(flags() & mapped_file::readwrite)) + boost::throw_exception( + BOOST_IOSTREAMS_FAILURE("can't resize readonly mapped file") + ); + if (params_.offset >= new_size) + boost::throw_exception( + BOOST_IOSTREAMS_FAILURE("can't resize below mapped offset") + ); + if (!unmap_file()) + cleanup_and_throw("failed unmapping file"); +#ifdef BOOST_IOSTREAMS_WINDOWS + stream_offset offset = ::SetFilePointer(handle_, 0, NULL, FILE_CURRENT); + if (offset == INVALID_SET_FILE_POINTER && ::GetLastError() != NO_ERROR) + cleanup_and_throw("failed querying file pointer"); + LONG sizehigh = (new_size >> (sizeof(LONG) * 8)); + LONG sizelow = (new_size & 0xffffffff); + DWORD result = ::SetFilePointer(handle_, sizelow, &sizehigh, FILE_BEGIN); + if ((result == INVALID_SET_FILE_POINTER && ::GetLastError() != NO_ERROR) + || !::SetEndOfFile(handle_)) + cleanup_and_throw("failed resizing mapped file"); + sizehigh = (offset >> (sizeof(LONG) * 8)); + sizelow = (offset & 0xffffffff); + ::SetFilePointer(handle_, sizelow, &sizehigh, FILE_BEGIN); +#else + if (BOOST_IOSTREAMS_FD_TRUNCATE(handle_, new_size) == -1) + cleanup_and_throw("failed resizing mapped file"); +#endif + size_ = new_size; + param_type p(params_); + map_file(p); // May modify p.hint + params_ = p; +} + +int mapped_file_impl::alignment() +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + SYSTEM_INFO info; + ::GetSystemInfo(&info); + return static_cast(info.dwAllocationGranularity); +#else + return static_cast(sysconf(_SC_PAGESIZE)); +#endif +} + +void mapped_file_impl::open_file(param_type p) +{ + bool readonly = p.flags != mapped_file::readwrite; +#ifdef BOOST_IOSTREAMS_WINDOWS + + // Open file + DWORD dwDesiredAccess = + readonly ? + GENERIC_READ : + (GENERIC_READ | GENERIC_WRITE); + DWORD dwCreationDisposition = (p.new_file_size != 0 && !readonly) ? + CREATE_ALWAYS : + OPEN_EXISTING; + DWORD dwFlagsandAttributes = + readonly ? + FILE_ATTRIBUTE_READONLY : + FILE_ATTRIBUTE_TEMPORARY; + handle_ = p.path.is_wide() ? + ::CreateFileW( + p.path.c_wstr(), + dwDesiredAccess, + FILE_SHARE_READ, + NULL, + dwCreationDisposition, + dwFlagsandAttributes, + NULL ) : + ::CreateFileA( + p.path.c_str(), + dwDesiredAccess, + FILE_SHARE_READ, + NULL, + dwCreationDisposition, + dwFlagsandAttributes, + NULL ); + if (handle_ == INVALID_HANDLE_VALUE) + cleanup_and_throw("failed opening file"); + + // Set file size + if (p.new_file_size != 0 && !readonly) { + LONG sizehigh = (p.new_file_size >> (sizeof(LONG) * 8)); + LONG sizelow = (p.new_file_size & 0xffffffff); + DWORD result = ::SetFilePointer(handle_, sizelow, &sizehigh, FILE_BEGIN); + if ((result == INVALID_SET_FILE_POINTER && ::GetLastError() != NO_ERROR) + || !::SetEndOfFile(handle_)) + cleanup_and_throw("failed setting file size"); + } + + // Determine file size. Dynamically locate GetFileSizeEx for compatibility + // with old Platform SDK (thanks to Pavel Vozenilik). + typedef BOOL (WINAPI *func)(HANDLE, PLARGE_INTEGER); + HMODULE hmod = ::GetModuleHandleA("kernel32.dll"); + func get_size = + reinterpret_cast(::GetProcAddress(hmod, "GetFileSizeEx")); + if (get_size) { + LARGE_INTEGER info; + if (get_size(handle_, &info)) { + boost::intmax_t size = + ( (static_cast(info.HighPart) << 32) | + info.LowPart ); + size_ = + static_cast( + p.length != max_length ? + std::min(p.length, size) : + size + ); + } else { + cleanup_and_throw("failed querying file size"); + return; + } + } else { + DWORD hi; + DWORD low; + if ( (low = ::GetFileSize(handle_, &hi)) + != + INVALID_FILE_SIZE ) + { + boost::intmax_t size = + (static_cast(hi) << 32) | low; + size_ = + static_cast( + p.length != max_length ? + std::min(p.length, size) : + size + ); + } else { + cleanup_and_throw("failed querying file size"); + return; + } + } +#else // #ifdef BOOST_IOSTREAMS_WINDOWS + + // Open file + int flags = (readonly ? O_RDONLY : O_RDWR); + if (p.new_file_size != 0 && !readonly) + flags |= (O_CREAT | O_TRUNC); + #ifdef _LARGEFILE64_SOURCE + flags |= O_LARGEFILE; + #endif + errno = 0; + handle_ = ::open(p.path.c_str(), flags, S_IRWXU); + if (errno != 0) + cleanup_and_throw("failed opening file"); + + //--------------Set file size---------------------------------------------// + + if (p.new_file_size != 0 && !readonly) + if (BOOST_IOSTREAMS_FD_TRUNCATE(handle_, p.new_file_size) == -1) + cleanup_and_throw("failed setting file size"); + + //--------------Determine file size---------------------------------------// + + bool success = true; + if (p.length != max_length) { + size_ = p.length; + } else { + struct BOOST_IOSTREAMS_FD_STAT info; + success = ::BOOST_IOSTREAMS_FD_FSTAT(handle_, &info) != -1; + size_ = info.st_size; + } + if (!success) + cleanup_and_throw("failed querying file size"); +#endif // #ifdef BOOST_IOSTREAMS_WINDOWS +} + +void mapped_file_impl::try_map_file(param_type p) +{ + bool priv = p.flags == mapped_file::priv; + bool readonly = p.flags == mapped_file::readonly; +#ifdef BOOST_IOSTREAMS_WINDOWS + + // Create mapping + DWORD protect = priv ? + PAGE_WRITECOPY : + readonly ? + PAGE_READONLY : + PAGE_READWRITE; + mapped_handle_ = + ::CreateFileMappingA( + handle_, + NULL, + protect, + 0, + 0, + NULL ); + if (mapped_handle_ == NULL) + cleanup_and_throw("failed create mapping"); + + // Access data + DWORD access = priv ? + FILE_MAP_COPY : + readonly ? + FILE_MAP_READ : + FILE_MAP_WRITE; + void* data = + ::MapViewOfFileEx( + mapped_handle_, + access, + (DWORD) (p.offset >> 32), + (DWORD) (p.offset & 0xffffffff), + size_ != max_length ? size_ : 0, + (LPVOID) p.hint ); + if (!data) + cleanup_and_throw("failed mapping view"); +#else + void* data = + ::BOOST_IOSTREAMS_FD_MMAP( + const_cast(p.hint), + size_, + readonly ? PROT_READ : (PROT_READ | PROT_WRITE), + priv ? MAP_PRIVATE : MAP_SHARED, + handle_, + p.offset ); + if (data == MAP_FAILED) + cleanup_and_throw("failed mapping file"); +#endif + data_ = static_cast(data); +} + +void mapped_file_impl::map_file(param_type& p) +{ + try { + try_map_file(p); + } catch (const std::exception& e) { + if (p.hint) { + p.hint = 0; + try_map_file(p); + } else { + boost::throw_exception(e); + } + } +} + +bool mapped_file_impl::unmap_file() +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + bool error = false; + error = !::UnmapViewOfFile(data_) || error; + error = !::CloseHandle(mapped_handle_) || error; + mapped_handle_ = NULL; + return !error; +#else + return ::munmap(data_, size_) == 0; +#endif +} + +void mapped_file_impl::clear(bool error) +{ + params_ = param_type(); + data_ = 0; + size_ = 0; +#ifdef BOOST_IOSTREAMS_WINDOWS + handle_ = INVALID_HANDLE_VALUE; + mapped_handle_ = NULL; +#else + handle_ = 0; +#endif + error_ = error; +} + +// Called when an error is encountered during the execution of open_file or +// map_file +void mapped_file_impl::cleanup_and_throw(const char* msg) +{ +#ifdef BOOST_IOSTREAMS_WINDOWS + DWORD error = GetLastError(); + if (mapped_handle_ != NULL) + ::CloseHandle(mapped_handle_); + if (handle_ != INVALID_HANDLE_VALUE) + ::CloseHandle(handle_); + SetLastError(error); +#else + int error = errno; + if (handle_ != 0) + ::close(handle_); + errno = error; +#endif + clear(true); + boost::iostreams::detail::throw_system_failure(msg); +} + +//------------------Implementation of mapped_file_params_base-----------------// + +void mapped_file_params_base::normalize() +{ + if (mode && flags) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE( + "at most one of 'mode' and 'flags' may be specified" + )); + if (flags) { + switch (flags) { + case mapped_file::readonly: + case mapped_file::readwrite: + case mapped_file::priv: + break; + default: + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("invalid flags")); + } + } else { + flags = (mode & BOOST_IOS::out) ? + mapped_file::readwrite : + mapped_file::readonly; + mode = BOOST_IOS::openmode(); + } + if (offset < 0) + boost::throw_exception(BOOST_IOSTREAMS_FAILURE("invalid offset")); + if (new_file_size < 0) + boost::throw_exception( + BOOST_IOSTREAMS_FAILURE("invalid new file size") + ); +} + +} // End namespace detail. + +//------------------Implementation of mapped_file_source----------------------// + +mapped_file_source::mapped_file_source() + : pimpl_(new impl_type) + { } + +mapped_file_source::mapped_file_source(const mapped_file_source& other) + : pimpl_(other.pimpl_) + { } + +bool mapped_file_source::is_open() const +{ return pimpl_->is_open(); } + +void mapped_file_source::close() { pimpl_->close(); } + +// safe_bool is explicitly qualified below to please msvc 7.1 +mapped_file_source::operator mapped_file_source::safe_bool() const +{ return pimpl_->error() ? &safe_bool_helper::x : 0; } + +bool mapped_file_source::operator!() const +{ return pimpl_->error(); } + +mapped_file_source::mapmode mapped_file_source::flags() const +{ return pimpl_->flags(); } + +mapped_file_source::size_type mapped_file_source::size() const +{ return pimpl_->size(); } + +const char* mapped_file_source::data() const { return pimpl_->data(); } + +const char* mapped_file_source::begin() const { return data(); } + +const char* mapped_file_source::end() const { return data() + size(); } +int mapped_file_source::alignment() +{ return detail::mapped_file_impl::alignment(); } + +void mapped_file_source::init() { pimpl_.reset(new impl_type); } + +void mapped_file_source::open_impl(const param_type& p) +{ pimpl_->open(p); } + +//------------------Implementation of mapped_file-----------------------------// + +mapped_file::mapped_file(const mapped_file& other) + : delegate_(other.delegate_) + { } + +void mapped_file::resize(stream_offset new_size) +{ delegate_.pimpl_->resize(new_size); } + +//------------------Implementation of mapped_file_sink------------------------// + +mapped_file_sink::mapped_file_sink(const mapped_file_sink& other) + : mapped_file(static_cast(other)) + { } + +//----------------------------------------------------------------------------// + +} } // End namespaces iostreams, boost. diff --git a/deal.II/bundled/boost-1.49.0/libs/iostreams/src/zlib.cpp b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/zlib.cpp new file mode 100644 index 0000000000..d765e855df --- /dev/null +++ b/deal.II/bundled/boost-1.49.0/libs/iostreams/src/zlib.cpp @@ -0,0 +1,193 @@ +// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com) +// (C) Copyright 2003-2007 Jonathan Turkanis +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.) + +// See http://www.boost.org/libs/iostreams for documentation. + +// To configure Boost to work with zlib, see the +// installation instructions here: +// http://boost.org/libs/iostreams/doc/index.html?path=7 + +// Define BOOST_IOSTREAMS_SOURCE so that +// knows that we are building the library (possibly exporting code), rather +// than using it (possibly importing code). +#define BOOST_IOSTREAMS_SOURCE + +#include +#include +#include +#include "zlib.h" // Jean-loup Gailly's and Mark Adler's "zlib.h" header. + // To configure Boost to work with zlib, see the + // installation instructions here: + // http://boost.org/libs/iostreams/doc/index.html?path=7 + +namespace boost { namespace iostreams { + +namespace zlib { + + // Compression levels + +const int no_compression = Z_NO_COMPRESSION; +const int best_speed = Z_BEST_SPEED; +const int best_compression = Z_BEST_COMPRESSION; +const int default_compression = Z_DEFAULT_COMPRESSION; + + // Compression methods + +const int deflated = Z_DEFLATED; + + // Compression strategies + +const int default_strategy = Z_DEFAULT_STRATEGY; +const int filtered = Z_FILTERED; +const int huffman_only = Z_HUFFMAN_ONLY; + + // Status codes + +const int okay = Z_OK; +const int stream_end = Z_STREAM_END; +const int stream_error = Z_STREAM_ERROR; +const int version_error = Z_VERSION_ERROR; +const int data_error = Z_DATA_ERROR; +const int mem_error = Z_MEM_ERROR; +const int buf_error = Z_BUF_ERROR; + + // Flush codes + +const int finish = Z_FINISH; +const int no_flush = Z_NO_FLUSH; +const int sync_flush = Z_SYNC_FLUSH; + + // Code for current OS + +//const int os_code = OS_CODE; + +} // End namespace zlib. + +//------------------Implementation of zlib_error------------------------------// + +zlib_error::zlib_error(int error) + : BOOST_IOSTREAMS_FAILURE("zlib error"), error_(error) + { } + +void zlib_error::check BOOST_PREVENT_MACRO_SUBSTITUTION(int error) +{ + switch (error) { + case Z_OK: + case Z_STREAM_END: + //case Z_BUF_ERROR: + return; + case Z_MEM_ERROR: + boost::throw_exception(std::bad_alloc()); + default: + boost::throw_exception(zlib_error(error)); + ; + } +} + +//------------------Implementation of zlib_base-------------------------------// + +namespace detail { + +zlib_base::zlib_base() + : stream_(new z_stream), calculate_crc_(false), crc_(0), crc_imp_(0) + { } + +zlib_base::~zlib_base() { delete static_cast(stream_); } + +void zlib_base::before( const char*& src_begin, const char* src_end, + char*& dest_begin, char* dest_end ) +{ + z_stream* s = static_cast(stream_); + s->next_in = reinterpret_cast(const_cast(src_begin)); + s->avail_in = static_cast(src_end - src_begin); + s->next_out = reinterpret_cast(dest_begin); + s->avail_out= static_cast(dest_end - dest_begin); +} + +void zlib_base::after(const char*& src_begin, char*& dest_begin, bool compress) +{ + z_stream* s = static_cast(stream_); + char* next_in = reinterpret_cast(s->next_in); + char* next_out = reinterpret_cast(s->next_out); + if (calculate_crc_) { + const zlib::byte* buf = compress ? + reinterpret_cast(src_begin) : + reinterpret_cast( + const_cast(dest_begin) + ); + zlib::uint length = compress ? + static_cast(next_in - src_begin) : + static_cast(next_out - dest_begin); + if (length > 0) + crc_ = crc_imp_ = crc32(crc_imp_, buf, length); + } + total_in_ = s->total_in; + total_out_ = s->total_out; + src_begin = const_cast(next_in); + dest_begin = next_out; +} + +int zlib_base::xdeflate(int flush) +{ + return ::deflate(static_cast(stream_), flush); +} + +int zlib_base::xinflate(int flush) +{ + return ::inflate(static_cast(stream_), flush); +} + +void zlib_base::reset(bool compress, bool realloc) +{ + z_stream* s = static_cast(stream_); + // Undiagnosed bug: + // deflateReset(), etc., return Z_DATA_ERROR + //zlib_error::check BOOST_PREVENT_MACRO_SUBSTITUTION( + realloc ? + (compress ? deflateReset(s) : inflateReset(s)) : + (compress ? deflateEnd(s) : inflateEnd(s)) + ; + //); + crc_imp_ = 0; +} + +void zlib_base::do_init + ( const zlib_params& p, bool compress, + #if !BOOST_WORKAROUND(BOOST_MSVC, < 1300) + zlib::xalloc_func /* alloc */, zlib::xfree_func /* free*/, + #endif + void* derived ) +{ + calculate_crc_ = p.calculate_crc; + z_stream* s = static_cast(stream_); + + // Current interface for customizing memory management + // is non-conforming and has been disabled: + //#if !BOOST_WORKAROUND(BOOST_MSVC, < 1300) + // s->zalloc = alloc; + // s->zfree = free; + //#else + s->zalloc = 0; + s->zfree = 0; + //#endif + s->opaque = derived; + int window_bits = p.noheader? -p.window_bits : p.window_bits; + zlib_error::check BOOST_PREVENT_MACRO_SUBSTITUTION( + compress ? + deflateInit2( s, + p.level, + p.method, + window_bits, + p.mem_level, + p.strategy ) : + inflateInit2(s, window_bits) + ); +} + +} // End namespace detail. + +//----------------------------------------------------------------------------// + +} } // End namespaces iostreams, boost. diff --git a/deal.II/cmake/checks/check_01_cxx_features.cmake b/deal.II/cmake/checks/check_01_cxx_features.cmake index 0f5b0865f2..fa1540fae4 100644 --- a/deal.II/cmake/checks/check_01_cxx_features.cmake +++ b/deal.II/cmake/checks/check_01_cxx_features.cmake @@ -19,9 +19,8 @@ # # This file sets up # -# DEAL_II_HAVE_CXX11_FLAG -# DEAL_II_CXX11_FLAG -# DEAL_II_USE_CXX11 +# DEAL_II_WITH_CXX11 +# # HAVE_ISNAN # HAVE_UNDERSCORE_ISNAN # DEAL_II_HAVE_ISFINITE @@ -34,31 +33,24 @@ # # ######################################################################## + # -# See if there is a compiler flag to enable C++11 features +# Only run these tests if C++11 support should actually be set up: # -IF(NOT DEFINED DEAL_II_HAVE_CXX11_FLAG) - FOREACH(_test_flag - "-std=c++11" - "-std=c++0x" - ) - CHECK_CXX_COMPILER_FLAG("${_test_flag}" DEAL_II_HAVE_CXX11_FLAG) - - IF(DEAL_II_HAVE_CXX11_FLAG) - # We have found a CXX11_FLAG that the compiler understands - SET(DEAL_II_CXX11_FLAG "${_test_flag}" CACHE INTERNAL "") - BREAK() - ELSE() - # Remove test result from cache and try the next flag in the list - UNSET(DEAL_II_HAVE_CXX11_FLAG CACHE) +IF(NOT DEFINED DEAL_II_WITH_CXX11 OR DEAL_II_WITH_CXX11) + + IF("${DEAL_II_CXX11_FLAG}" STREQUAL "") + CHECK_CXX_COMPILER_FLAG("-std=c++11" DEAL_II_HAVE_FLAG_stdcxx11) + IF(DEAL_II_HAVE_FLAG_stdcxx11) + SET(DEAL_II_CXX11_FLAG "-std=c++11") + ELSEIF(DEAL_II_HAVE_FLAG_stdcxx11) + CHECK_CXX_COMPILER_FLAG("-std=c++0x" DEAL_II_HAVE_FLAG_stdcxx0x) + SET(DEAL_II_CXX11_FLAG "-std=x++0x") ENDIF() - ENDFOREACH() -ENDIF() - - -IF(DEAL_II_HAVE_CXX11_FLAG) + ENDIF() # Set CMAKE_REQUIRED_FLAGS for the unit tests + MESSAGE(STATUS "Using C++11 flag \"${DEAL_II_CXX11_FLAG}\"") PUSH_TEST_FLAG("${DEAL_II_CXX11_FLAG}") CHECK_CXX_SOURCE_COMPILES( @@ -110,12 +102,7 @@ IF(DEAL_II_HAVE_CXX11_FLAG) # below but it will throw an exception when run. So test # that as well. # - # TODO: This test will only succeed on platforms where "-pthread" is - # recognized. But this isn't easily fixable: - # configure_threads.cmake which will determine and setup threads - # has to be called later... - # - IF(NOT CMAKE_CROSSCOMPILING) # Todo: Is it better to use DEAL_II_ALLOW_PLATFORM_INTROSPECTION here? + IF(DEAL_II_ALLOW_PLATFORM_INTROSPECTION) PUSH_TEST_FLAG("-pthread") CHECK_CXX_SOURCE_RUNS( " @@ -127,7 +114,7 @@ IF(DEAL_II_HAVE_CXX11_FLAG) POP_TEST_FLAG() ELSE() # Just export it ;-) - SET_IF_EMPTY(DEAL_II_HAVE_CXX11_THREAD TRUE) + SET(DEAL_II_HAVE_CXX11_THREAD TRUE CACHE BOOL "") ENDIF() CHECK_CXX_SOURCE_COMPILES( @@ -156,6 +143,75 @@ IF(DEAL_II_HAVE_CXX11_FLAG) " DEAL_II_HAVE_CXX11_TYPE_TRAITS) + # + # On Mac OS-X 10.9 with recent gcc compilers in C++11 mode linking to + # some standard C library functions, notably toupper and tolower, fail + # due to unresolved references to this functions. + # + # Thanks to Denis Davydov for the testcase. + # + # Matthias Maier, 2013 + # + CHECK_CXX_SOURCE_COMPILES( + " + #include + int main () + { + char c = toupper('a'); + } + " + DEAL_II_HAVE_CXX11_MACOSXC99BUG_OK) + + + # + # icc-13 triggers an internal compiler error when compiling + # std::numeric_limits<...>::min() with -std=c++0x [1]. + # + # Reported by Ted Kord. + # + # - Matthias Maier, 2013 + # + # [1] http://software.intel.com/en-us/forums/topic/328902 + # + CHECK_CXX_SOURCE_COMPILES( + " + #include + struct Integer + { + static const int min_int_value; + static const int max_int_value; + }; + const int Integer::min_int_value = std::numeric_limits::min(); + const int Integer::max_int_value = std::numeric_limits::max(); + int main() { return 0; } + " + DEAL_II_HAVE_CXX11_ICCNUMERICLIMITSBUG_OK) + + # + # icc-14.0.0 has an astonishing bug [1] where it hits an internal compiler + # error when run in C++11 mode with libstdc++-4.7 (from gcc). + # + # We just disable C++11 mode in this case + # + # [1] http://software.intel.com/en-us/forums/topic/472385 + # + # - Matthias Maier, 2013 + # + CHECK_CXX_SOURCE_COMPILES( + " + #include + template void foo() + { + std::vector data(100); + } + int main() + { + foo(); + } + " + DEAL_II_HAVE_CXX11_ICCLIBSTDCPP47CXX11BUG_OK) + + IF( DEAL_II_HAVE_CXX11_ARRAY AND DEAL_II_HAVE_CXX11_CONDITION_VARIABLE AND DEAL_II_HAVE_CXX11_FUNCTIONAL AND @@ -164,58 +220,60 @@ IF(DEAL_II_HAVE_CXX11_FLAG) DEAL_II_HAVE_CXX11_THREAD AND DEAL_II_HAVE_CXX11_MUTEX AND DEAL_II_HAVE_CXX11_TUPLE AND - DEAL_II_HAVE_CXX11_TYPE_TRAITS ) - - MESSAGE(STATUS "Sufficient C++11 support. Enabling ${DEAL_II_CXX11_FLAG}.") - - SET(DEAL_II_USE_CXX11 TRUE) - - ADD_FLAGS(DEAL_II_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") - - ELSE() - MESSAGE(STATUS "Insufficient C++11 support. Disabling ${DEAL_II_CXX11_FLAG}.") + DEAL_II_HAVE_CXX11_TYPE_TRAITS AND + DEAL_II_HAVE_CXX11_MACOSXC99BUG_OK AND + DEAL_II_HAVE_CXX11_ICCNUMERICLIMITSBUG_OK AND + DEAL_II_HAVE_CXX11_ICCLIBSTDCPP47CXX11BUG_OK ) + SET(DEAL_II_HAVE_CXX11 TRUE) ENDIF() - IF(DEAL_II_USE_CXX11) - CHECK_CXX_SOURCE_COMPILES( - " - #include - int main(){ std::is_trivially_copyable bob; } - " - DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE) + RESET_CMAKE_REQUIRED() +ENDIF() -# Currently unused: # -# CHECK_CXX_SOURCE_COMPILES( -# " -# #include -# std::vector v; -# int main(){ auto i = v.begin(); *i; return 0;} -# " -# DEAL_II_HAVE_CXX11_AUTO_TYPE) +# Set up a configuration option for C++11 support: # -# CHECK_CXX_SOURCE_COMPILES( -# " -# #include ], -# std::vector v; -# int main(){ for (std::vector::iterator i : v) *i; return 0;} -# " -# DEAL_II_HAVE_CXX11_RANGE_BASED_FOR) + +OPTION(DEAL_II_WITH_CXX11 + "Compile deal.II using C++11 language standard." + ${DEAL_II_HAVE_CXX11} + ) + # -# IF( DEAL_II_HAVE_CXX11_AUTO_TYPE AND -# DEAL_II_HAVE_CXX11_RANGE_BASED_FOR ) +# Bail out if user requested C++11 support (DEAL_II_WITH_CXX11) but support +# is not available due to above tests (DEAL_II_HAVE_CXX11): # -# MESSAGE(STATUS "Additional C++11 support available.") + +IF(DEAL_II_WITH_CXX11 AND NOT DEAL_II_HAVE_CXX11) + MESSAGE(FATAL_ERROR "\n" + "C++11 support was requested (DEAL_II_WITH_CXX11=TRUE) but is not " + "supported by the current compiler.\n" + "Please disable C++11 support, i.e. configure with\n" + " -DDEAL_II_WITH_CXX11=FALSE,\n" + "or use a different compiler, instead. (If the compiler flag for C++11 " + "support differs from \"-std=c++0x\" or \"-std=c++11\", a suitable " + "compiler flag has to be specified manually.\n\n" + ) +ENDIF() + # -# SET(DEAL_II_CAN_USE_ADDITIONAL_CXX1X_FEATURES) -# ENDIF() +# Set up C++11 support: # - ENDIF() - POP_TEST_FLAG() +IF(DEAL_II_WITH_CXX11) + ADD_FLAGS(DEAL_II_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") + MESSAGE(STATUS "DEAL_II_WITH_CXX11 successfully set up") + PUSH_TEST_FLAG("${DEAL_II_CXX11_FLAG}") + CHECK_CXX_SOURCE_COMPILES( + " + #include + int main(){ std::is_trivially_copyable bob; } + " + DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE) + POP_TEST_FLAG() ELSE() - MESSAGE(STATUS "Insufficient C++11 support. Disabling ${DEAL_II_CXX11_FLAG}.") + MESSAGE(STATUS "DEAL_II_WITH_CXX11 disabled") ENDIF() diff --git a/deal.II/cmake/checks/check_03_compiler_bugs.cmake b/deal.II/cmake/checks/check_03_compiler_bugs.cmake index ea8d16f293..364bad027e 100644 --- a/deal.II/cmake/checks/check_03_compiler_bugs.cmake +++ b/deal.II/cmake/checks/check_03_compiler_bugs.cmake @@ -289,67 +289,6 @@ IF(DEAL_II_HAVE_BUNDLED_DIRECTORY) ENDIF() -# -# icc-13 triggers an internal compiler error when compiling -# std::numeric_limits<...>::min() with -std=c++0x [1]. -# Just disable C++11 support completely in this case. -# -# Reported by Ted Kord. -# -# - Matthias Maier, 2013 -# -# [1] http://software.intel.com/en-us/forums/topic/328902 -# -CHECK_CXX_COMPILER_BUG( - " - #include - struct Integer - { - static const int min_int_value; - static const int max_int_value; - }; - const int Integer::min_int_value = std::numeric_limits::min(); - const int Integer::max_int_value = std::numeric_limits::max(); - int main() { return 0; } - " - DEAL_II_ICC_NUMERICLIMITS_BUG) - -# -# icc-14.0.0 has an astonishing bug [1] where it hits an internal compiler -# error when run in C++11 mode with libstdc++-4.7 (from gcc). -# -# We just disable C++11 mode in this case -# -# [1] http://software.intel.com/en-us/forums/topic/472385 -# -# - Matthias Maier, 2013 -# -CHECK_CXX_COMPILER_BUG( - " - #include - template void foo() - { - std::vector data(100); - } - int main() - { - foo(); - } - " - DEAL_II_ICC_LIBSTDCPP47CXX11_BUG) - - -IF( DEAL_II_ICC_NUMERICLIMITS_BUG OR - DEAL_II_ICC_LIBSTDCPP47CXX11_BUG ) - MESSAGE(STATUS - "Intel C++11 bug found, disabling C++11 support" - ) - STRIP_FLAG(DEAL_II_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") - SET(DEAL_II_CAN_USE_CXX1X FALSE) - SET(DEAL_II_USE_CXX11 FALSE) -ENDIF() - - # # in intel (at least 13.1 and 14), vectorization causes # wrong code. See https://code.google.com/p/dealii/issues/detail?id=156 @@ -377,32 +316,3 @@ IF( CMAKE_SYSTEM_NAME MATCHES "CYGWIN" OR CMAKE_SYSTEM_NAME MATCHES "Windows" ) SET(DEAL_II_CONSTEXPR_BUG TRUE) ENDIF() - -# -# On Mac OS-X 10.9 with recent gcc compilers in C++11 mode linking to some -# standard C library functions, notably toupper and tolower, fail due to -# unresolved references to this functions. Disable C++11 support in this -# case. -# -# Thanks to Denis Davydov for the testcase. -# -# Matthias Maier, 2013 -# -CHECK_CXX_COMPILER_BUG( - " - #include - int main () - { - char c = toupper('a'); - } - " - DEAL_II_MAC_OSX_C99_BUG) - -IF(DEAL_II_MAC_OSX_C99_BUG) - MESSAGE(STATUS - "Mac OS-X C99 bug found, disabling C++11 support" - ) - STRIP_FLAG(DEAL_II_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") - SET(DEAL_II_CAN_USE_CXX1X FALSE) - SET(DEAL_II_USE_CXX11 FALSE) -ENDIF() diff --git a/deal.II/cmake/config/CMakeLists.txt b/deal.II/cmake/config/CMakeLists.txt index 987df97bf9..4ade81d2b2 100644 --- a/deal.II/cmake/config/CMakeLists.txt +++ b/deal.II/cmake/config/CMakeLists.txt @@ -26,7 +26,7 @@ # two distinct set ups. # -MESSAGE(STATUS "Setup project configuration") +MESSAGE(STATUS "Setting up project configuration") # # Read in auxiliary include directories for the build directory @@ -380,4 +380,4 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES) ) ENDIF() -MESSAGE(STATUS "Setup project configuration - Done") +MESSAGE(STATUS "Setting up project configuration - Done") diff --git a/deal.II/cmake/configure/configure_1_bzip2.cmake b/deal.II/cmake/configure/configure_1_bzip2.cmake new file mode 100644 index 0000000000..61fd319e7e --- /dev/null +++ b/deal.II/cmake/configure/configure_1_bzip2.cmake @@ -0,0 +1,36 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2012 - 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# Configuration for the zlib library: +# + +MACRO(FEATURE_BZIP2_FIND_EXTERNAL var) + + FIND_PACKAGE(BZip2) + + IF(BZIP2_FOUND) + # + # Rename some variables: + # + SET(BZIP2_VERSION ${BZIP2_VERSION_STRING}) + SET(BZIP2_INCLUDE_DIRS ${BZIP2_INCLUDE_DIR}) + + SET(${var} TRUE) + ENDIF() +ENDMACRO() + +CONFIGURE_FEATURE(BZIP2) diff --git a/deal.II/cmake/configure/configure_1_threads.cmake b/deal.II/cmake/configure/configure_1_threads.cmake index 182f7d3bc8..edd2b4cd20 100644 --- a/deal.II/cmake/configure/configure_1_threads.cmake +++ b/deal.II/cmake/configure/configure_1_threads.cmake @@ -165,7 +165,7 @@ MACRO(FEATURE_THREADS_CONFIGURE_EXTERNAL) # Workaround for an issue with C++11 mode, non gcc-compilers and missing # template std::ist_trivially_copyable # - IF( DEAL_II_USE_CXX11 AND + IF( DEAL_II_WITH_CXX11 AND NOT DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE AND NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU" ) LIST(APPEND DEAL_II_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") @@ -200,7 +200,7 @@ MACRO(FEATURE_THREADS_CONFIGURE_BUNDLED) # Workaround for an issue with C++11 mode, non gcc-compilers and missing # template std::ist_trivially_copyable # - IF( DEAL_II_USE_CXX11 AND + IF( DEAL_II_WITH_CXX11 AND NOT DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE AND NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU" ) LIST(APPEND DEAL_II_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") diff --git a/deal.II/cmake/configure/configure_zlib.cmake b/deal.II/cmake/configure/configure_1_zlib.cmake similarity index 100% rename from deal.II/cmake/configure/configure_zlib.cmake rename to deal.II/cmake/configure/configure_1_zlib.cmake diff --git a/deal.II/cmake/configure/configure_2_trilinos.cmake b/deal.II/cmake/configure/configure_2_trilinos.cmake index d76b3cbf20..e04bbfac41 100644 --- a/deal.II/cmake/configure/configure_2_trilinos.cmake +++ b/deal.II/cmake/configure/configure_2_trilinos.cmake @@ -160,7 +160,7 @@ MACRO(FEATURE_TRILINOS_FIND_EXTERNAL var) # with the -std=c++0x flag of GCC, see deal.II FAQ. # Test whether that is indeed the case # - IF(DEAL_II_USE_CXX11 AND NOT TRILINOS_SUPPORTS_CPP11) + IF(DEAL_II_WITH_CXX11 AND NOT TRILINOS_SUPPORTS_CPP11) IF(TRILINOS_HAS_C99_TR1_WORKAROUND) LIST(APPEND DEAL_II_DEFINITIONS "HAS_C99_TR1_CMATH") diff --git a/deal.II/cmake/configure/configure_boost.cmake b/deal.II/cmake/configure/configure_boost.cmake index 9571fe9b0e..de29304bbf 100644 --- a/deal.II/cmake/configure/configure_boost.cmake +++ b/deal.II/cmake/configure/configure_boost.cmake @@ -38,19 +38,24 @@ SET(DEAL_II_WITH_BOOST ON # Always true. We need it :-] MACRO(FEATURE_BOOST_FIND_EXTERNAL var) + # + # This mumbo jumbo is necessary because CMake won't let us test against + # BOOST_DIR directly. WTF?! + # IF(NOT DEFINED BOOST_DIR) SET(BOOST_DIR "$ENV{BOOST_DIR}") ELSE() SET_IF_EMPTY(BOOST_DIR "$ENV{BOOST_DIR}") ENDIF() + IF(NOT "${BOOST_DIR}" STREQUAL "") SET(BOOST_ROOT "${BOOST_DIR}") ENDIF() IF(DEAL_II_WITH_THREADS) - SET(_boost_components serialization system thread) + SET(_boost_components iostreams serialization system thread) ELSE() - SET(_boost_components serialization system) + SET(_boost_components iostreams serialization system) ENDIF() # @@ -65,20 +70,14 @@ MACRO(FEATURE_BOOST_FIND_EXTERNAL var) # # Fall back to dynamic libraries if no static libraries could be found: # - IF( Boost_USE_STATIC_LIBS AND - (NOT Boost_SERIALIZATION_FOUND OR NOT Boost_SYSTEM_FOUND) - ) + IF(NOT Boost_FOUND AND Boost_USE_STATIC_LIBS) SET(Boost_USE_STATIC_LIBS FALSE) FIND_PACKAGE(Boost 1.44 COMPONENTS ${_boost_components}) ENDIF() MARK_AS_ADVANCED(Boost_DIR) - - IF( Boost_SERIALIZATION_FOUND AND - Boost_SYSTEM_FOUND AND - (NOT DEAL_II_WITH_THREADS OR Boost_THREAD_FOUND) ) - + IF(Boost_FOUND) SET(BOOST_VERSION_MAJOR "${Boost_MAJOR_VERSION}") SET(BOOST_VERSION_MINOR "${Boost_MINOR_VERSION}") SET(BOOST_VERSION_SUBMINOR "${Boost_SUBMINOR_VERSION}") @@ -101,9 +100,7 @@ MACRO(FEATURE_BOOST_FIND_EXTERNAL var) ELSE() - SET(BOOST_DIR "" CACHE PATH - "An optional hint to a boost directory" - ) + SET(BOOST_DIR "" CACHE PATH "An optional hint to a boost directory") ENDIF() ENDMACRO() @@ -115,17 +112,6 @@ SET(BOOST_ADD_TO_USER_INCLUDE_DIRS TRUE) MACRO(FEATURE_BOOST_CONFIGURE_BUNDLED) - # - # We need to set some definitions to use the headers of the bundled boost - # library: - # - LIST(APPEND DEAL_II_DEFINITIONS - "BOOST_NO_HASH" "BOOST_NO_SLIST" - ) - LIST(APPEND DEAL_II_USER_DEFINITIONS - "BOOST_NO_HASH" "BOOST_NO_SLIST" - ) - INCLUDE_DIRECTORIES(${BOOST_FOLDER}/include) ENDMACRO() diff --git a/deal.II/cmake/macros/macro_configure_feature.cmake b/deal.II/cmake/macros/macro_configure_feature.cmake index 4a35b260fd..240e2bfa0d 100644 --- a/deal.II/cmake/macros/macro_configure_feature.cmake +++ b/deal.II/cmake/macros/macro_configure_feature.cmake @@ -100,14 +100,15 @@ or set the relevant variables by hand in ccmake." ) ELSE() SET(_hint_snippet - "or set the relevant variables by hand in ccmake." + " or set the relevant variables by hand in ccmake." ) ENDIF() IF(FEATURE_${_feature}_HAVE_BUNDLED) SET(_bundled_snippet - "\n\nAlternatively you may choose to compile the bundled library of -${_feature_lowercase} by setting DEAL_II_ALLOW_BUNDLED=on or\nDEAL_II_FORCE_BUNDLED_${_feature}=on.\n" + "\nAlternatively you may choose to compile the bundled library of " + "${_feature_lowercase} by setting DEAL_II_ALLOW_BUNDLED=on or " + "DEAL_II_FORCE_BUNDLED_${_feature}=on.\n" ) ELSE() SET(_bundled_snippet "\n") @@ -117,7 +118,7 @@ ${_feature_lowercase} by setting DEAL_II_ALLOW_BUNDLED=on or\nDEAL_II_FORCE_BUND "Could not find the ${_feature_lowercase} library!\n" ${${_feature}_ADDITIONAL_ERROR_STRING} "Please ensure that a suitable ${_feature_lowercase} library is installed on your computer.\n" - "If the library is not at a default location, either provide some hints\n" + "If the library is not at a default location, either provide some hints " "for autodetection,${_hint_snippet}${_bundled_snippet}" ) ENDMACRO() @@ -156,8 +157,8 @@ MACRO(CONFIGURE_FEATURE _feature) STRING(REGEX REPLACE "^DEAL_II_WITH_" "" _dependency ${_dependency}) IF(NOT FEATURE_${_dependency}_PROCESSED) MESSAGE(FATAL_ERROR "\n" - "Internal build system error:\nDEAL_II_WITH_${_feature} depends on " - "DEAL_II_WITH_${_dependency},\nbut CONFIGURE_FEATURE(${_feature}) " + "Internal build system error: DEAL_II_WITH_${_feature} depends on " + "DEAL_II_WITH_${_dependency}, but CONFIGURE_FEATURE(${_feature}) " "was called before CONFIGURE_FEATURE(${_dependency}).\n\n" ) ENDIF() diff --git a/deal.II/cmake/scripts/CMakeLists.txt b/deal.II/cmake/scripts/CMakeLists.txt index 8c38f883de..9bd6e3493d 100644 --- a/deal.II/cmake/scripts/CMakeLists.txt +++ b/deal.II/cmake/scripts/CMakeLists.txt @@ -20,7 +20,7 @@ SET_TARGET_PROPERTIES(expand_instantiations_exe ) IF(DEAL_II_COMPONENT_COMPAT_FILES) - MESSAGE(STATUS "Setup compat_files") + MESSAGE(STATUS "Setting up compat_files") # # Also build and install the old dependency_resolution executable: @@ -40,5 +40,5 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES) DESTINATION ${DEAL_II_COMMON_RELDIR}/scripts COMPONENT compat_files ) - MESSAGE(STATUS "Setup compat_files - Done") + MESSAGE(STATUS "Setting up compat_files - Done") ENDIF() diff --git a/deal.II/cmake/setup_cached_variables.cmake b/deal.II/cmake/setup_cached_variables.cmake index 055fe5c59f..3cad88e7d2 100644 --- a/deal.II/cmake/setup_cached_variables.cmake +++ b/deal.II/cmake/setup_cached_variables.cmake @@ -15,7 +15,7 @@ ## --------------------------------------------------------------------- # -# Setup cached variables (prior to the PROJECT(deal.II) call) +# Set up cached variables (prior to the PROJECT(deal.II) call) # # This file sets up the following cached Options: # @@ -55,7 +55,7 @@ # MESSAGE(STATUS "") -MESSAGE(STATUS "Set up cached variables.") +MESSAGE(STATUS "Setting up cached variables.") ######################################################################## @@ -99,7 +99,7 @@ OPTION(DEAL_II_COMPONENT_PARAMETER_GUI ) OPTION(DEAL_II_ALLOW_AUTODETECTION - "Allow to automatically setup features by setting all undefined DEAL_II_WITH_* variables to ON or OFF" + "Allow to automatically set up features by setting all undefined DEAL_II_WITH_* variables to ON or OFF" ON ) @@ -348,6 +348,14 @@ if this is what you tried to do.) ) ENDIF() +# +# Compatibility renaming: +# + +IF(DEFINED DEAL_II_HAVE_CXX11_FLAG AND NOT DEAL_II_HAVE_CXX11_FLAG) + SET(DEAL_II_WITH_CXX11 FALSE CACHE BOOL "" FORCE) +ENDIF() + # # Miscellaneous renaming: # diff --git a/deal.II/cmake/setup_compiler_flags.cmake b/deal.II/cmake/setup_compiler_flags.cmake index 1caeff34e9..848f601042 100644 --- a/deal.II/cmake/setup_compiler_flags.cmake +++ b/deal.II/cmake/setup_compiler_flags.cmake @@ -102,7 +102,7 @@ IF(DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS) # *Hooray* We are allowed to set compiler flags :-] # MESSAGE(STATUS "") - MESSAGE(STATUS "Set up default compiler flags.") + MESSAGE(STATUS "Setting up default compiler flags.") # # General setup for GCC and compilers sufficiently close to GCC: diff --git a/deal.II/cmake/setup_testsuite.cmake b/deal.II/cmake/setup_testsuite.cmake index 564a99acbf..888d32ad28 100644 --- a/deal.II/cmake/setup_testsuite.cmake +++ b/deal.II/cmake/setup_testsuite.cmake @@ -30,7 +30,7 @@ SET_IF_EMPTY(MAKEOPTS $ENV{MAKEOPTS}) MESSAGE(STATUS "") -MESSAGE(STATUS "Setup testsuite with TEST_DIR ${TEST_DIR}") +MESSAGE(STATUS "Testsuite will be set up with TEST_DIR=${TEST_DIR}") ADD_SUBDIRECTORY( ${CMAKE_SOURCE_DIR}/tests/quick_tests @@ -73,7 +73,7 @@ ADD_CUSTOM_TARGET(clean_tests -- ${MAKEOPTS} ) -MESSAGE(STATUS "Setup testsuite") +MESSAGE(STATUS "Setting up testsuite") # # Provide custom targets to setup and prune the testsuite subproject: @@ -152,7 +152,7 @@ FOREACH(_category ${_categories}) ) ENDIF() ENDFOREACH() -MESSAGE(STATUS "Setup testsuite - Done") +MESSAGE(STATUS "Setting up testsuite - Done") MESSAGE(STATUS "Regenerating testsuite subprojects") EXECUTE_PROCESS( diff --git a/deal.II/contrib/CMakeLists.txt b/deal.II/contrib/CMakeLists.txt index 0be39e772a..33272b9037 100644 --- a/deal.II/contrib/CMakeLists.txt +++ b/deal.II/contrib/CMakeLists.txt @@ -15,14 +15,14 @@ ## --------------------------------------------------------------------- IF(DEAL_II_COMPONENT_MESH_CONVERTER) - MESSAGE(STATUS "Setup mesh_converter") + MESSAGE(STATUS "Setting up mesh_converter") ADD_SUBDIRECTORY(mesh_conversion) - MESSAGE(STATUS "Setup mesh_converter - Done") + MESSAGE(STATUS "Setting up mesh_converter - Done") ENDIF() IF(DEAL_II_COMPONENT_PARAMETER_GUI) - MESSAGE(STATUS "Setup parameter_gui") + MESSAGE(STATUS "Setting up parameter_gui") ADD_SUBDIRECTORY(parameter_gui) - MESSAGE(STATUS "Setup parameter_gui - Done") + MESSAGE(STATUS "Setting up parameter_gui - Done") ENDIF() diff --git a/deal.II/doc/CMakeLists.txt b/deal.II/doc/CMakeLists.txt index b7efaf534c..64d79e2c07 100644 --- a/deal.II/doc/CMakeLists.txt +++ b/deal.II/doc/CMakeLists.txt @@ -19,7 +19,7 @@ # IF(DEAL_II_COMPONENT_DOCUMENTATION) - MESSAGE(STATUS "Setup documentation") + MESSAGE(STATUS "Setting up documentation") ADD_SUBDIRECTORY(doxygen) @@ -44,7 +44,7 @@ IF(DEAL_II_COMPONENT_DOCUMENTATION) COMPONENT documentation ) - MESSAGE(STATUS "Setup documentation - Done") + MESSAGE(STATUS "Setting up documentation - Done") ENDIF(DEAL_II_COMPONENT_DOCUMENTATION) diff --git a/deal.II/doc/developers/testsuite.html b/deal.II/doc/developers/testsuite.html index 7247194f88..793b94dbbc 100644 --- a/deal.II/doc/developers/testsuite.html +++ b/deal.II/doc/developers/testsuite.html @@ -777,6 +777,15 @@ CONFIG_FILE $ cmake -C ${CONFIG_FILE}). This only has an effect if CTEST_BINARY_DIRECTORY is empty. +DESCRIPTION + - A string that is appended to CTEST_BUILD_NAME + +COVERAGE + - If set to TRUE deal.II will be configured with + DEAL_II_SETUP_COVERAGE=TRUE, CMAKE_BUILD_TYPE=Debug and the + CTEST_COVERAGE() stage will be run. Test results must go into the + "Experimental" section. + MAKEOPTS - Additional options that will be passed directly to make (or ninja). diff --git a/deal.II/doc/news/3.4.0-vs-4.0.0.html b/deal.II/doc/news/3.4.0-vs-4.0.0.html index 847d48eda4..cffdc98b9e 100644 --- a/deal.II/doc/news/3.4.0-vs-4.0.0.html +++ b/deal.II/doc/news/3.4.0-vs-4.0.0.html @@ -260,8 +260,8 @@ contributor's names are abbreviated by WB (Wolfgang Bangerth), GK
  • New: There is now - a new report - on assembling matrices available from the + a new report + on assembling matrices available from the documentation page. The main focus is assembling of matrices for vector-valued problems, where shape functions are diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index 63dd4ad503..e398c01f87 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -50,6 +50,31 @@ inconvenience this causes.

      +
    1. New: deal.II now links with the + BOOST + Iostreams library (at least if the libz and libbz2 libraries + can be found that are necessary for BOOST Iostreams). + Among many other things, this allows to easily + read files that have been compressed, as in the following code snippet: + @code + #include + #include + #include + + ... + + boost::iostreams::filtering_istream in; + in.push(boost::iostreams::basic_gzip_decompressor<>()); + in.push(boost::iostreams::file_source("myfile.gz")); + + int i; + in >> i; + @endcode + More documentation on how to use BOOST Iostream can be found + in the documentation link referenced above. +
      + (Wolfgang Bangerth, 2013/12/21) +
    @@ -59,6 +84,68 @@ inconvenience this causes.

    Specific improvements

      + +
    1. Improved: When you call WorkStream::run with an empty function object + for the copier, operations on individual cells are essentially all independent. + In other words, you have a massively parallel collection of jobs. In this + case, a parallel for loop over all elements is better suited than the + pipeline approach currently used. This has now been implemented. +
      + (Wolfgang Bangerth, 2013/12/26) +
    2. + +
    3. New: The new function VectorTools::interpolate_based_on_material_id() + can be used to interpolate several functions onto a mesh, based on the + material id of each cell individually. +
      + (Valentin Zingan, 2013/12/26) +
    4. + +
    5. New: A new reinit() method has been introduced to + TrilinosWrappers::SparsityPattern that takes all rows that are possibly + written into as an optional argument. This allows for pre-allocating all + possible entries right away, which makes writing into the matrix from + several threads possible (otherwise, only one processor at a time can write + off-processor data). +
      + (Martin Kronbichler, 2013/12/23) +
    6. + +
    7. New: The TableBase::fill function has become more powerful in that + it now doesn't just take pointers to initializing elements but can deal + with arbitrary input iterators. It now also takes a flag that denotes the + order in which table elements are initialized, allowing to switch between + C- and Fortran-style table layouts. +
      + Along with the TableBase::fill function, the Table classes of various + ranks have also gotten constructors that allow the in-place construction + not only of a table of correct size, but already initialized from + somewhere. This finally allows to mark Table objects as const by creating + them already with the correct content. +
      + (Wolfgang Bangerth, 2013/12/21) +
    8. + +
    9. New: There is now a new class Functions::InterpolatedTensorProductGridData that can + be used to (bi-/tri-)linearly interpolate data given on a tensor product + mesh of $x$ (and $y$ and $z$) values, for example to evaluate experimentally + determined coefficients, or to assess the accuracy of a solution by + comparing with a solution generated by a different code and written in + gridded data. There is also a new class Functions::InterpolatedUniformGridData that + can perform the same task more efficiently if the data is stored on meshes + that are uniform in each coordinate direction. +
      + (Wolfgang Bangerth, 2013/12/20) +
    10. + +
    11. Fixed: ParameterHandler::get_double() and ParameterHandler::get_integer() + had bugs in that they didn't detect if they were asked to return a number + for a parameter whose value was in fact not a number but some general + text. This is now fixed. +
      + (Wolfgang Bangerth, 2013/12/19) +
    12. +
    13. Fixed: VectorTools::project_boundary_values could not deal with function values close to (but not exactly equal to) zero. This is now fixed.
      diff --git a/deal.II/doc/publications/index.html b/deal.II/doc/publications/index.html index 624c89ab28..1c495eb61c 100644 --- a/deal.II/doc/publications/index.html +++ b/deal.II/doc/publications/index.html @@ -250,6 +250,14 @@
        +
      1. A. Bertei, C.-C. Chueh, J. Pharoah, C. Nicolella +
        + Modified Collective Rearrangement Sphere-assembly Algorithm for Random Packings of Nonspherical Particles: Towards Engineering Applications + +
        + Powder Technology, vol. 253, pp. 311-324, 2014. +
      2. +
      3. T. Wick, G. Singh, M.F. Wheeler
        Pressurized-Fracture propagation using a phase-field approach coupled to a reservoir simulator @@ -257,7 +265,7 @@
        SPE 168597-MS, SPE HFTC Proc. 2014.
      4. -
      +
    @@ -4451,8 +4459,6 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with in M. Feistauer, V. Doleji, P. Knobloch and K. Najzar, editors, Numerical Mathematics and Advanced Applications, ENUMATH 2003, pp. 410-419, Springer-Verlag, 2004 -
    - (BibTeX entry)
  • U. Hasler, DataOut::build_patches -function, an intermediate format is generated that contains all the -information for the data on each cell. Usually, this intermediate format is -then further processed and converted into one of the graphical formats that we -can presently write, such as gmv, eps, ucd, gnuplot, or a number of other -ones. Once written in these formats, there is no way to reconstruct the -necessary information to merge multiple blocks of output. However, the base -classes of DataOut also allow to simply dump the intermediate format -to a file, from which it can later be recovered without loss of information. - -This has two advantages: first, simulations may just dump the intermediate -format data during run-time, and the user may later decide which particular -graphics format she wants to have. This way, she does not have to re-run the -entire simulation if graphical output is requested in a different format. One -typical case is that one would like to take a quick look at the data with -gnuplot, and then create high-quality pictures using GMV or OpenDX. Since both -can be generated out of the intermediate format without problem, there is no -need to re-run the simulation. - -In the present context, of more interest is the fact that in contrast to any -of the other formats, it is simple to merge multiple files of intermediate -format, if they belong to the same simulation. This is what we will do here: -we will generate one output file in intermediate format for each processor -that belongs to this computation (in the sequential case, this will simply be -a single file). They may then later be read in and merged so that we can -output a single file in whatever graphical format is requested. - -The way to do this is to first instruct the DataOutBase class to -write intermediate format rather than in gmv or any other graphical -format. This is simple: just use -data_out.write_deal_II_intermediate. We will write to a file -called solution-TTTT.TTTT.d2 if there is only one processor, or -files solution-TTTT.TTTT.NNN.d2 if this is really a parallel -job. Here, TTTT.TTTT denotes the time for which this output has -been generated, and NNN the number of the MPI process that did this. - -The next step is to convert this file or these files into whatever -format you like. The program that does this is the step-19 tutorial program: -for example, for the first time step, call it through -@code - ../\step-19/\step-19 solution-0001.0000.*.d2 solution-0001.0000.gmv -@endcode -to merge all the intermediate format files into a single file in GMV -format. More details on the parameters of this program and what it can do for -you can be found in the documentation of the step-19 tutorial program. - -@note In the years since the paragraphs above were written, it has -also become possible to not only compute solutions in parallel, but -also to visualize them with some programs (for example with the -Paraview viewer). To make this efficient, one can not store the entire -solution in a single file, but instead needs to have a single data -file for each processor to visualize individually. We discuss this -concept and how to use it in step-40. +into separate files and have a way to display all files for a certain timestep +at the same time. This way the code produces one .vtu file per processor per +time step. The two common VTK file viewers ParaView and VisIt both support +opening more than one .vtu file at once. To simplifiy the process of picking +the correct files and allow moving around in time, both support record files +that reference all files for a given timestep. Sadly, the record files have a +different format between VisIt and Paraview, so we write out both formats. + +The code will generate the files solution-TTTT.NNN.vtu, where TTTT is the +timestep number (starting from 1) and NNN is the processor id (starting from +0). These files contain the locally owned cells for the timestep and +processor. The files solution-TTTT.visit is the visit record for timestep +TTTT, while solution-TTTT.pvtu is the same for ParaView. Finally, the file +solution.pvd is a special record only supported by ParaView that references +all time steps. So in ParaView, only solution.pvd needs to be opened, while +one needs to select the group of all .visit files in VisIt for the same +effect.

    Overall structure of the program

    @@ -553,8 +513,7 @@ for (unsigned int i=0; i output_results (): This function simply outputs the solution based on what we have said above, i.e. every processor computes output only - for its own portion of the domain, and this can then be later merged by an - external program. In addition to the solution, we also compute the norm of + for its own portion of the domain. In addition to the solution, we also compute the norm of the stress averaged over all the quadrature points on each cell. diff --git a/deal.II/examples/step-18/doc/results.dox b/deal.II/examples/step-18/doc/results.dox index b59460a793..95e369d4c5 100644 --- a/deal.II/examples/step-18/doc/results.dox +++ b/deal.II/examples/step-18/doc/results.dox @@ -90,40 +90,20 @@ problem to keep a computer busy for a while. At the end of the day, this is what we have for output: @code examples/\step-18> ls -l *.d2 --rw-r--r-- 1 bangerth wheeler 8797414 May 25 09:10 solution-0001.0000.d2 --rw-r--r-- 1 bangerth wheeler 8788500 May 25 09:32 solution-0002.0000.d2 --rw-r--r-- 1 bangerth wheeler 8763718 May 25 09:55 solution-0003.0000.d2 --rw-r--r-- 1 bangerth wheeler 8738940 May 25 10:17 solution-0004.0000.d2 --rw-r--r-- 1 bangerth wheeler 8710104 May 25 10:39 solution-0005.0000.d2 --rw-r--r-- 1 bangerth wheeler 8685388 May 25 11:01 solution-0006.0000.d2 --rw-r--r-- 1 bangerth wheeler 8649088 May 25 11:23 solution-0007.0000.d2 --rw-r--r-- 1 bangerth wheeler 8585146 May 25 11:45 solution-0008.0000.d2 --rw-r--r-- 1 bangerth wheeler 8489764 May 25 12:07 solution-0009.0000.d2 --rw-r--r-- 1 bangerth wheeler 8405388 May 25 12:29 solution-0010.0000.d2 +-rw-r--r-- 1 bangerth wheeler 8797414 May 25 09:10 solution-0001.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8788500 May 25 09:32 solution-0002.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8763718 May 25 09:55 solution-0003.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8738940 May 25 10:17 solution-0004.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8710104 May 25 10:39 solution-0005.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8685388 May 25 11:01 solution-0006.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8649088 May 25 11:23 solution-0007.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8585146 May 25 11:45 solution-0008.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8489764 May 25 12:07 solution-0009.0000.vtu +-rw-r--r-- 1 bangerth wheeler 8405388 May 25 12:29 solution-0010.0000.vtu @endcode -Let us convert these files in deal.II intermediate format to gmv -format (this assumes that you have already compiled the -step-19 example program): -@code -examples/\step-18> ../\step-19/\step-19 - -Converter from deal.II intermediate format to other graphics formats. - -Usage: ./\step-19 [-p parameter_file] list_of_input_files [-x output_format] -o output_file - -examples/\step-18> ../\step-19/\step-19 solution-0001.0000.d2 -x gmv -o solution-0001.0000.gmv -examples/\step-18> ../\step-19/\step-19 solution-0002.0000.d2 -x gmv -o solution-0002.0000.gmv -[...] -@endcode -Of course, since we have run the program only in sequential mode, we -do have only one intermediate file for each time step that we have to -take as input. - - - -If we visualize these files with GMV, we get to see the full picture +If we visualize these files with VisIt or Paraview, we get to see the full picture of the disaster our forced compression wreaks on the cylinder (colors in the images encode the norm of the stress in the material): @@ -233,39 +213,26 @@ Timestep 20 at time 10 That's quite a good number of unknowns, given that we are in 3d. The output of this program are 16 files for each time step: @code -examples/\step-18> ls -l solution-0001.000* --rw-r--r-- 1 bangerth mfw 4325219 Aug 11 09:44 solution-0001.0000-000.d2 --rw-r--r-- 1 bangerth mfw 4454460 Aug 11 09:44 solution-0001.0000-001.d2 --rw-r--r-- 1 bangerth mfw 4485242 Aug 11 09:43 solution-0001.0000-002.d2 --rw-r--r-- 1 bangerth mfw 4517364 Aug 11 09:43 solution-0001.0000-003.d2 --rw-r--r-- 1 bangerth mfw 4462829 Aug 11 09:43 solution-0001.0000-004.d2 --rw-r--r-- 1 bangerth mfw 4482487 Aug 11 09:43 solution-0001.0000-005.d2 --rw-r--r-- 1 bangerth mfw 4548619 Aug 11 09:43 solution-0001.0000-006.d2 --rw-r--r-- 1 bangerth mfw 4522421 Aug 11 09:43 solution-0001.0000-007.d2 --rw-r--r-- 1 bangerth mfw 4337529 Aug 11 09:43 solution-0001.0000-008.d2 --rw-r--r-- 1 bangerth mfw 4163047 Aug 11 09:43 solution-0001.0000-009.d2 --rw-r--r-- 1 bangerth mfw 4288247 Aug 11 09:43 solution-0001.0000-010.d2 --rw-r--r-- 1 bangerth mfw 4350410 Aug 11 09:43 solution-0001.0000-011.d2 --rw-r--r-- 1 bangerth mfw 4458427 Aug 11 09:43 solution-0001.0000-012.d2 --rw-r--r-- 1 bangerth mfw 4466037 Aug 11 09:43 solution-0001.0000-013.d2 --rw-r--r-- 1 bangerth mfw 4505679 Aug 11 09:44 solution-0001.0000-014.d2 --rw-r--r-- 1 bangerth mfw 4340488 Aug 11 09:44 solution-0001.0000-015.d2 -@endcode -We merge and convert these 16 intermediate files into a single gmv file as -follows: -@code -examples/\step-18> time ../\step-19/\step-19 solution-0001.0000-* -x gmv -o solution-0001.0000.gmv - -real 0m45.929s -user 0m41.290s -sys 0m0.990s -examples/\step-18> ls -l solution-0001.0000.gmv --rw-r--r-- 1 bangerth mfw 68925360 Aug 11 17:04 solution-0001.0000.gmv +examples/\step-18> ls -l solution-0001* +-rw-r--r-- 1 bangerth mfw 4325219 Aug 11 09:44 solution-0001.000.d2 +-rw-r--r-- 1 bangerth mfw 4454460 Aug 11 09:44 solution-0001.001.d2 +-rw-r--r-- 1 bangerth mfw 4485242 Aug 11 09:43 solution-0001.002.d2 +-rw-r--r-- 1 bangerth mfw 4517364 Aug 11 09:43 solution-0001.003.d2 +-rw-r--r-- 1 bangerth mfw 4462829 Aug 11 09:43 solution-0001.004.d2 +-rw-r--r-- 1 bangerth mfw 4482487 Aug 11 09:43 solution-0001.005.d2 +-rw-r--r-- 1 bangerth mfw 4548619 Aug 11 09:43 solution-0001.006.d2 +-rw-r--r-- 1 bangerth mfw 4522421 Aug 11 09:43 solution-0001.007.d2 +-rw-r--r-- 1 bangerth mfw 4337529 Aug 11 09:43 solution-0001.008.d2 +-rw-r--r-- 1 bangerth mfw 4163047 Aug 11 09:43 solution-0001.009.d2 +-rw-r--r-- 1 bangerth mfw 4288247 Aug 11 09:43 solution-0001.010.d2 +-rw-r--r-- 1 bangerth mfw 4350410 Aug 11 09:43 solution-0001.011.d2 +-rw-r--r-- 1 bangerth mfw 4458427 Aug 11 09:43 solution-0001.012.d2 +-rw-r--r-- 1 bangerth mfw 4466037 Aug 11 09:43 solution-0001.013.d2 +-rw-r--r-- 1 bangerth mfw 4505679 Aug 11 09:44 solution-0001.014.d2 +-rw-r--r-- 1 bangerth mfw 4340488 Aug 11 09:44 solution-0001.015.d2 @endcode -Doing so for all time steps, we obtain gmv files that we can visualize (albeit -with some difficulty, due to their size gmv isn't exactly fast when plotting -them). Here are first the mesh on which we compute as well as the partitioning +Here are first the mesh on which we compute as well as the partitioning for the 16 processors: diff --git a/deal.II/examples/step-18/step-18.cc b/deal.II/examples/step-18/step-18.cc index f587d740be..6d9cf38039 100644 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@ -15,7 +15,8 @@ * --------------------------------------------------------------------- * - * Author: Wolfgang Bangerth, University of Texas at Austin, 2000, 2004, 2005 + * Author: Wolfgang Bangerth, University of Texas at Austin, 2000, 2004, 2005, + * Timo Heister, 2013 */ @@ -1284,11 +1285,10 @@ namespace Step18 // @sect4{TopLevel::output_results} - // This function generates the graphical output in intermediate format as - // explained in the introduction. Each process will only work on the cells - // it owns, and then write the result into a file of its own. These files - // may later be merged to get a single file in any of the supported output - // files, as mentioned in the introduction. + // This function generates the graphical output in .vtu format as explained + // in the introduction. Each process will only work on the cells it owns, + // and then write the result into a file of its own. Additionally, processor + // 0 will write the record files the reference all the .vtu files. // // The crucial part of this function is to give the DataOut // class a way to only work on the cells that the present process owns. This @@ -1468,50 +1468,70 @@ namespace Step18 data_out.build_patches (); - // Now that we have generated the intermediate format, let us determine - // the name of the file we will want to write it to. We compose it of the - // prefix solution-, followed by a representation of the - // present time written as a fixed point number so that file names sort - // naturally: - std::ostringstream filename; - filename << "solution-"; - filename << std::setfill('0'); - filename.setf(std::ios::fixed, std::ios::floatfield); - filename << std::setw(9) << std::setprecision(4) << present_time; - - // Next, in case there are multiple processes working together, we have to - // generate different file names for the output of each process. In our - // case, we encode the process number as a three-digit integer, padded - // with zeros. The assertion in the first line of the block makes sure - // that there are less than 1000 processes (a very conservative check, but - // worth having anyway) as our scheme of generating process numbers would - // overflow if there were 1000 processes or more. Note that we choose to - // use AssertThrow rather than Assert since the - // number of processes is a variable that depends on input files or the - // way the process is started, rather than static assumptions in the - // program code. Therefore, it is inappropriate to use Assert - // that is optimized away in optimized mode, whereas here we actually can - // assume that users will run the largest computations with the most - // processors in optimized mode, and we should check our assumptions in - // this particular case, and not only when running in debug mode: - if (n_mpi_processes != 1) - { - AssertThrow (n_mpi_processes < 1000, ExcNotImplemented()); - - filename << '-'; - filename << std::setfill('0'); - filename << std::setw(3) << this_mpi_process; - } - - // To the file name, attach the file name suffix usually used for the - // deal.II intermediate format. To determine it, we use the same function - // that has already been used in step-13: - filename << data_out.default_suffix(DataOut::deal_II_intermediate); + // Let us determine the name of the file we will want to write it to. We + // compose it of the prefix solution-, followed by the time + // step number, and finally the processor id (encoded as a three digit + // number): + std::string filename = "solution-" + Utilities::int_to_string(timestep_no,4) + + "." + Utilities::int_to_string(this_mpi_process,3) + + ".vtu"; + + // The following assertion makes sure that there are less than 1000 + // processes (a very conservative check, but worth having anyway) as our + // scheme of generating process numbers would overflow if there were 1000 + // processes or more. Note that we choose to use AssertThrow + // rather than Assert since the number of processes is a + // variable that depends on input files or the way the process is started, + // rather than static assumptions in the program code. Therefore, it is + // inappropriate to use Assert that is optimized away in + // optimized mode, whereas here we actually can assume that users will run + // the largest computations with the most processors in optimized mode, + // and we should check our assumptions in this particular case, and not + // only when running in debug mode: + AssertThrow (n_mpi_processes < 1000, ExcNotImplemented()); // With the so-completed filename, let us open a file and write the data - // we have generated into it, using the intermediate format: - std::ofstream output (filename.str().c_str()); - data_out.write_deal_II_intermediate (output); + // we have generated into it: + std::ofstream output (filename.c_str()); + data_out.write_vtu (output); + + // The record files must be written only once and not by each processor, + // so we do this on processor 0: + if (this_mpi_process==0) + { + // Here we collect all filenames of the current timestep (same format as above) + std::vector filenames; + for (unsigned int i=0; i > times_and_names; + times_and_names.push_back (std::pair (present_time, pvtu_master_filename)); + std::ofstream pvd_output ("solution.pvd"); + data_out.write_pvd_record (pvd_output, times_and_names); + } + } diff --git a/deal.II/examples/step-40/doc/results.dox b/deal.II/examples/step-40/doc/results.dox index 8b62cec1e7..45eec441e8 100644 --- a/deal.II/examples/step-40/doc/results.dox +++ b/deal.II/examples/step-40/doc/results.dox @@ -129,7 +129,7 @@ multigrid method from the
    Hypre package as a preconditioner, which unfortunately uses signed 32-bit integers to index the elements of a %distributed matrix. This limits the size of -problems to $2^31-1=2,147,483,647$ degrees of freedom. From the graphs +problems to $2^{31}-1=2,147,483,647$ degrees of freedom. From the graphs above it is obvious that the scalability would extend beyond this number, and one could expect that given more than the 4,096 machines shown above would also further reduce the compute time. That said, one diff --git a/deal.II/examples/step-40/step-40.cc b/deal.II/examples/step-40/step-40.cc index d02184477e..3a99cd12fa 100644 --- a/deal.II/examples/step-40/step-40.cc +++ b/deal.II/examples/step-40/step-40.cc @@ -264,8 +264,6 @@ namespace Step40 locally_relevant_dofs, mpi_communicator); system_rhs.reinit (locally_owned_dofs, mpi_communicator); - system_rhs = 0; - // The next step is to compute hanging node and boundary value // constraints, which we combine into a single object storing all // constraints. diff --git a/deal.II/examples/step-42/step-42.cc b/deal.II/examples/step-42/step-42.cc index f4b600317f..75e4100b86 100644 --- a/deal.II/examples/step-42/step-42.cc +++ b/deal.II/examples/step-42/step-42.cc @@ -427,7 +427,11 @@ namespace Step42 // i.e., the Chinese character. The first of the two, BitmapFile // is responsible for reading in data from a picture file // stored in pbm ascii format. This data will be bilinearly interpolated and - // provides in this way a function which describes an obstacle. + // thereby provides a function that describes the obstacle. (The code below + // shows how one can construct a function by interpolating between given + // data points. One could use the Functions::InterpolatedUniformGridData, + // introduced after this tutorial program was written, which does exactly + // what we want here, but it is instructive to see how to do it by hand.) // // The data which we read from the file will be stored in a double std::vector // named obstacle_data. This vector composes the base to calculate a diff --git a/deal.II/examples/step-47/step-47.cc b/deal.II/examples/step-47/step-47.cc index 87707ba908..cb6ada15f9 100644 --- a/deal.II/examples/step-47/step-47.cc +++ b/deal.II/examples/step-47/step-47.cc @@ -280,9 +280,6 @@ namespace Step47 FEValues plain_fe_values (fe_collection[0], quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values); - FEValues enriched_fe_values (fe_collection[1], quadrature_formula, - update_values | update_gradients | - update_quadrature_points | update_JxW_values); const unsigned int n_q_points = quadrature_formula.size(); diff --git a/deal.II/include/deal.II/base/config.h.in b/deal.II/include/deal.II/base/config.h.in index 6766f7c24e..c0a1eda16b 100644 --- a/deal.II/include/deal.II/base/config.h.in +++ b/deal.II/include/deal.II/base/config.h.in @@ -112,8 +112,11 @@ * to allow using the standard library classes instead of the corresponding * BOOST classes. */ -#cmakedefine DEAL_II_USE_CXX11 -#ifdef DEAL_II_USE_CXX11 +#cmakedefine DEAL_II_WITH_CXX11 +#ifdef DEAL_II_WITH_CXX11 +/* Compatibility definition (with naming from deal.II 8.0): */ +# define DEAL_II_USE_CXX11 +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_CAN_USE_CXX11 # define DEAL_II_CAN_USE_CXX1X #endif @@ -237,6 +240,7 @@ #cmakedefine DEAL_II_WITH_ARPACK #ifdef DEAL_II_WITH_ARPACK +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_ARPACK #endif @@ -246,6 +250,7 @@ #cmakedefine DEAL_II_WITH_FUNCTIONPARSER #ifdef DEAL_II_WITH_FUNCTIONPARSER +/* Compatibility definition (with naming from deal.II < 8.0): */ # define HAVE_FUNCTIONPARSER #endif @@ -255,6 +260,7 @@ #cmakedefine DEAL_II_WITH_HDF5 #ifdef DEAL_II_WITH_HDF5 +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_HAVE_HDF5 #endif @@ -264,6 +270,7 @@ #cmakedefine DEAL_II_WITH_LAPACK #ifdef DEAL_II_WITH_LAPACK +/* Compatibility definition (with naming from deal.II < 8.0): */ # define HAVE_LIBLAPACK /* Defined if the corresponding BLAS or LAPACK function is available */ @@ -315,6 +322,7 @@ #cmakedefine DEAL_II_WITH_METIS #ifdef DEAL_II_WITH_METIS +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_METIS #endif @@ -325,6 +333,7 @@ #cmakedefine DEAL_II_WITH_MPI #ifdef DEAL_II_WITH_MPI +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_COMPILER_SUPPORTS_MPI #endif @@ -335,6 +344,7 @@ #cmakedefine DEAL_II_WITH_MUMPS #ifdef DEAL_II_WITH_MUMPS +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_MUMPS #endif @@ -345,6 +355,7 @@ #cmakedefine DEAL_II_WITH_NETCDF #ifdef DEAL_II_WITH_NETCDF +/* Compatibility definition (with naming from deal.II < 8.0): */ # define HAVE_LIBNETCDF #endif @@ -355,6 +366,7 @@ #cmakedefine DEAL_II_WITH_P4EST #ifdef DEAL_II_WITH_P4EST +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_P4EST # define DEAL_II_P4EST_VERSION_MAJOR @P4EST_VERSION_MAJOR@ @@ -390,6 +402,7 @@ #cmakedefine DEAL_II_WITH_PETSC #ifdef DEAL_II_WITH_PETSC +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_PETSC #endif @@ -438,16 +451,25 @@ #cmakedefine DEAL_II_WITH_SLEPC #ifdef DEAL_II_WITH_SLEPC +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_SLEPC #endif +/****************************************** + * Configured in configure_1_bzip2.cmake: * + ******************************************/ + +#cmakedefine DEAL_II_WITH_BZIP2 + + /******************************************** * Configured in configure_1_threads.cmake: * ********************************************/ #cmakedefine DEAL_II_WITH_THREADS #ifdef DEAL_II_WITH_THREADS +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_MT #endif @@ -474,12 +496,24 @@ #endif +/***************************************** + * Configured in configure_1_zlib.cmake: * + *****************************************/ + +#cmakedefine DEAL_II_WITH_ZLIB +#ifdef DEAL_II_WITH_ZLIB +/* Compatibility definition (with naming from deal.II < 8.0): */ +# define HAVE_LIBZ +#endif + + /******************************************* * Configured in configure_trilinos.cmake: * *******************************************/ #cmakedefine DEAL_II_WITH_TRILINOS #ifdef DEAL_II_WITH_TRILINOS +/* Compatibility definition (with naming from deal.II < 8.0): */ # define DEAL_II_USE_TRILINOS #endif @@ -490,20 +524,11 @@ #cmakedefine DEAL_II_WITH_UMFPACK #ifdef DEAL_II_WITH_UMFPACK +/* Compatibility definition (with naming from deal.II < 8.0): */ # define HAVE_LIBUMFPACK #endif -/*************************************** - * Configured in configure_zlib.cmake: * - ***************************************/ - -#cmakedefine DEAL_II_WITH_ZLIB -#ifdef DEAL_II_WITH_ZLIB -# define HAVE_LIBZ -#endif - - #include #include diff --git a/deal.II/include/deal.II/base/function_lib.h b/deal.II/include/deal.II/base/function_lib.h index 5e49212269..19f3824c64 100644 --- a/deal.II/include/deal.II/base/function_lib.h +++ b/deal.II/include/deal.II/base/function_lib.h @@ -21,6 +21,9 @@ #include #include #include +#include + +#include DEAL_II_NAMESPACE_OPEN @@ -1175,6 +1178,175 @@ namespace Functions const Tensor<1,dim> exponents; }; + + + /** + * A scalar function that computes its values by (bi-, tri-)linear interpolation + * from a set of point data that are arranged on a possibly non-uniform + * tensor product mesh. In other words, considering the three-dimensional case, + * let there be points $x_0,\ldotx, x_{K-1}$, + * $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$, and data $d_{klm}$ defined at + * point $(x_k,y_l,z_m)^T$, then evaluating the function at a point + * $\mathbf x=(x,y,z)$ will find the box so that + * $x_k\le x\le x_{k+1}, y_l\le x\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a + * trilinear interpolation of the data on this cell. Similar operations are + * done in lower dimensions. + * + * This class is most often used for either evaluating coefficients or right + * hand sides that are provided experimentally at a number of points inside the + * domain, or for comparing outputs of a solution on a finite element mesh + * against previously obtained data defined on a grid. + * + * @note If the points $x_i$ are actually equally spaced on an interval $[x_0,x_1]$ + * and the same is true for the other data points in higher dimensions, you should + * use the InterpolatedUniformGridData class instead. + * + * If a point is requested outside the box defined by the end points of the + * coordinate arrays, then the function is assumed to simply extend by + * constant values beyond the last data point in each coordinate + * direction. (The class does not throw an error if a point lies outside the + * box since it frequently happens that a point lies just outside the box + * by an amount on the order of numerical roundoff.) + * + * @author Wolfgang Bangerth, 2013 + */ + template + class InterpolatedTensorProductGridData : public Function + { + public: + /** + * Constructor. + * @param coordinate_values An array of dim arrays. Each of the inner + * arrays contains the coordinate values $x_0,\ldotx, x_{K-1}$ and + * similarly for the other coordinate directions. These arrays + * need not have the same size. Obviously, we need dim such arrays + * for a dim-dimensional function object. The coordinate values + * within this array are assumed to be strictly ascending to allow + * for efficient lookup. + * @param data_values A dim-dimensional table of data at each of the + * mesh points defined by the coordinate arrays above. Note that the + * Table class has a number of conversion constructors that allow + * converting other data types into a table where you specify this + * argument. + */ + InterpolatedTensorProductGridData (const std_cxx1x::array,dim> &coordinate_values, + const Table &data_values); + + /** + * Compute the value of the function set by bilinear interpolation of the + * given data set. + * + * @param p The point at which the function is to be evaluated. + * @param component The vector component. Since this function is scalar, + * only zero is a valid argument here. + * @return The interpolated value at this point. If the point lies outside + * the set of coordinates, the function is extended by a constant. + */ + virtual + double + value (const Point &p, + const unsigned int component = 0) const; + + private: + /** + * The set of coordinate values in each of the coordinate directions. + */ + const std_cxx1x::array,dim> coordinate_values; + + /** + * The data that is to be interpolated. + */ + const Table data_values; + }; + + + /** + * A scalar function that computes its values by (bi-, tri-)linear interpolation + * from a set of point data that are arranged on a uniformly spaced + * tensor product mesh. In other words, considering the three-dimensional case, + * let there be points $x_0,\ldotx, x_{K-1}$ that result from a uniform subdivision + * of the interval $[x_0,x_{K-1}]$ into $K-1$ sub-intervals of size $\Delta x + * = (x_{K-1}-x_0)/(K-1)$, and similarly + * $y_0,\ldots,y_{L-1}$, $z_1,\ldots,z_{M-1}$. Also consider data $d_{klm}$ defined at + * point $(x_k,y_l,z_m)^T$, then evaluating the function at a point + * $\mathbf x=(x,y,z)$ will find the box so that + * $x_k\le x\le x_{k+1}, y_l\le x\le y_{l+1}, z_m\le z\le z_{m+1}$, and do a + * trilinear interpolation of the data on this cell. Similar operations are + * done in lower dimensions. + * + * This class is most often used for either evaluating coefficients or right + * hand sides that are provided experimentally at a number of points inside the + * domain, or for comparing outputs of a solution on a finite element mesh + * against previously obtained data defined on a grid. + * + * @note If you have a problem where the points $x_i$ are not equally spaced + * (e.g., they result from a computation on a graded mesh that is denser + * closer to one boundary), then use the InterpolatedTensorProductGridData + * class instead. + * + * If a point is requested outside the box defined by the end points of the + * coordinate arrays, then the function is assumed to simply extend by + * constant values beyond the last data point in each coordinate + * direction. (The class does not throw an error if a point lies outside the + * box since it frequently happens that a point lies just outside the box + * by an amount on the order of numerical roundoff.) + * + * @author Wolfgang Bangerth, 2013 + */ + template + class InterpolatedUniformGridData : public Function + { + public: + /** + * Constructor + * @param interval_endpoints The left and right end points of the (uniformly + * subdivided) intervals in each of the coordinate directions. + * @param n_subdivisions The number of subintervals of the subintervals + * in each coordinate direction. A value of one for a coordinate + * means that the interval is considered as one subinterval consisting + * of the entire range. A value of two means that there are two subintervals + * each with one half of the range, etc. + * @param data_values A dim-dimensional table of data at each of the + * mesh points defined by the coordinate arrays above. Note that the + * Table class has a number of conversion constructors that allow + * converting other data types into a table where you specify this + * argument. + */ + InterpolatedUniformGridData (const std_cxx1x::array,dim> &interval_endpoints, + const std_cxx1x::array &n_subintervals, + const Table &data_values); + + /** + * Compute the value of the function set by bilinear interpolation of the + * given data set. + * + * @param p The point at which the function is to be evaluated. + * @param component The vector component. Since this function is scalar, + * only zero is a valid argument here. + * @return The interpolated value at this point. If the point lies outside + * the set of coordinates, the function is extended by a constant. + */ + virtual + double + value (const Point &p, + const unsigned int component = 0) const; + + private: + /** + * The set of interval endpoints in each of the coordinate directions. + */ + const std_cxx1x::array,dim> interval_endpoints; + + /** + * The number of subintervals in each of the coordinate directions. + */ + const std_cxx1x::array n_subintervals; + + /** + * The data that is to be interpolated. + */ + const Table data_values; + }; } DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/include/deal.II/base/std_cxx1x/array.h b/deal.II/include/deal.II/base/std_cxx1x/array.h index 68b8060321..22635ef086 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/array.h +++ b/deal.II/include/deal.II/base/std_cxx1x/array.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/bind.h b/deal.II/include/deal.II/base/std_cxx1x/bind.h index 9cb2192988..5868fda7d0 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/bind.h +++ b/deal.II/include/deal.II/base/std_cxx1x/bind.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include diff --git a/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h b/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h index 4fd712c36c..1927778f1f 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h +++ b/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/function.h b/deal.II/include/deal.II/base/std_cxx1x/function.h index 86aea59ed1..0817258279 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/function.h +++ b/deal.II/include/deal.II/base/std_cxx1x/function.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/mutex.h b/deal.II/include/deal.II/base/std_cxx1x/mutex.h index 7c195fffc9..e8b40a54e2 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/mutex.h +++ b/deal.II/include/deal.II/base/std_cxx1x/mutex.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h b/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h index 172218deda..78f6701cab 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h +++ b/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/thread.h b/deal.II/include/deal.II/base/std_cxx1x/thread.h index ac3ed95d4d..2dccd96d78 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/thread.h +++ b/deal.II/include/deal.II/base/std_cxx1x/thread.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/tuple.h b/deal.II/include/deal.II/base/std_cxx1x/tuple.h index b3d846da6e..4a23a3d441 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/tuple.h +++ b/deal.II/include/deal.II/base/std_cxx1x/tuple.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/type_traits.h b/deal.II/include/deal.II/base/std_cxx1x/type_traits.h index d0e34b3280..d2084bb48b 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/type_traits.h +++ b/deal.II/include/deal.II/base/std_cxx1x/type_traits.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_USE_CXX11 +#ifdef DEAL_II_WITH_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/table.h b/deal.II/include/deal.II/base/table.h index 4f884dc414..890d8d9af4 100644 --- a/deal.II/include/deal.II/base/table.h +++ b/deal.II/include/deal.II/base/table.h @@ -481,6 +481,18 @@ public: */ TableBase (const TableIndices &sizes); + /** + * Constructor. Initialize the + * array with the given + * dimensions in each index + * component, and then initialize the elements of the table using the + * second and third argument by calling fill(entries,C_style_indexing). + */ + template + TableBase (const TableIndices &sizes, + InputIterator entries, + const bool C_style_indexing = true); + /** * Copy constructor. Performs a * deep copy. @@ -583,31 +595,48 @@ public: bool empty () const; /** - * Fill array with an array of - * elements. The input array must - * be arranged in usual C style, - * i.e. with the last index - * running fastest. For - * two-dimensional tables, this - * means line by line. No range - * checking is performed, i.e., - * it is assumed that the input - * array entries contains - * n_rows()*n_cols() - * elements, and that the layout - * refers to the desired shape of - * this table. The only check we - * do is that the present array - * is non-empty. + * Fill this table (which is assumed to already have the correct + * size) from a source given by dereferencing the given forward + * iterator (which could, for example, be a pointer to the first + * element of an array, or an inserting std::istream_iterator). The + * second argument denotes whether the elements pointed to are + * arranged in a way that corresponds to the last index running + * fastest or slowest. The default is to use C-style indexing + * where the last index runs fastest (as opposed to Fortran-style + * where the first index runs fastest when traversing multidimensional + * arrays. For example, if you try to fill an object of type + * Table<2,T>, then calling this function with the default + * value for the second argument will result in the equivalent of + * doing + * @code + * Table<2,T> t; + * for (unsigned int i=0; i t; + * for (unsigned int j=0; jT2, must be convertible to - * the type of the objects of - * this array. + * @param entries An iterator to a set of elements from which to + * initialize this table. It is assumed that iterator can be + * incremented and dereferenced a sufficient number of times + * to fill this table. + * @param C_style_indexing If true, run over elements of the + * table with the last index changing fastest as we dereference + * subsequent elements of the input range. If false, change + * the first index fastest. */ - template - void fill (const T2 *entries); + template + void fill (InputIterator entries, + const bool C_style_indexing = true); /** * Fill all table entries with @@ -789,6 +818,50 @@ public: */ Table (const unsigned int size); + /** + * Constructor. Create a table with a given size and initialize it from + * a set of iterators. + * + * This function is entirely equivalent to creating a table t + * of the given size and then calling + * @code + * t.fill (entries, C_style_indexing); + * @endcode + * on it, using the TableBase::fill() function where the arguments are + * explained in more detail. The point, however, is that that is only + * possible if the table can be changed after running the constructor, + * whereas calling the current constructor allows sizing and initializing + * an object right away so that it can be marked const. + * + * Using this constructor, you can do things like this: + * @code + * const double values[] = { 1, 2, 3 }; + * const Table<1,double> t(3, entries, true); + * @endcode + * You can also initialize a table right from a file, using input iterators: + * @code + * std::ifstream input ("myfile"); + * const Table<1,double> t(3, + * std::istream_iterator(input), + * true); + * @endcode + * + * + * @param size The size of this one-dimensional table. + * @param entries An iterator to a set of elements from which to + * initialize this table. It is assumed that iterator can be + * incremented and dereferenced a sufficient number of times + * to fill this table. + * @param C_style_indexing If true, run over elements of the + * table with the last index changing fastest as we dereference + * subsequent elements of the input range. If false, change + * the first index fastest. + */ + template + Table (const unsigned int size, + InputIterator entries, + const bool C_style_indexing = true); + /** * Access operator. Since this is * a one-dimensional object, this @@ -881,6 +954,52 @@ public: Table (const unsigned int size1, const unsigned int size2); + /** + * Constructor. Create a table with a given size and initialize it from + * a set of iterators. + * + * This function is entirely equivalent to creating a table t + * of the given size and then calling + * @code + * t.fill (entries, C_style_indexing); + * @endcode + * on it, using the TableBase::fill() function where the arguments are + * explained in more detail. The point, however, is that that is only + * possible if the table can be changed after running the constructor, + * whereas calling the current constructor allows sizing and initializing + * an object right away so that it can be marked const. + * + * Using this constructor, you can do things like this: + * @code + * const double values[] = { 1, 2, 3, 4, 5, 6 }; + * const Table<2,double> t(2, 3, entries, true); + * @endcode + * You can also initialize a table right from a file, using input iterators: + * @code + * std::ifstream input ("myfile"); + * const Table<2,double> t(2, 3, + * std::istream_iterator(input), + * true); + * @endcode + * + * + * @param size1 The size of this table in the first dimension. + * @param size2 The size of this table in the second dimension. + * @param entries An iterator to a set of elements from which to + * initialize this table. It is assumed that iterator can be + * incremented and dereferenced a sufficient number of times + * to fill this table. + * @param C_style_indexing If true, run over elements of the + * table with the last index changing fastest as we dereference + * subsequent elements of the input range. If false, change + * the first index fastest. + */ + template + Table (const unsigned int size1, + const unsigned int size2, + InputIterator entries, + const bool C_style_indexing = true); + /** * Reinitialize the object. This * function is mostly here for @@ -1061,6 +1180,55 @@ public: const unsigned int size2, const unsigned int size3); + /** + * Constructor. Create a table with a given size and initialize it from + * a set of iterators. + * + * This function is entirely equivalent to creating a table t + * of the given size and then calling + * @code + * t.fill (entries, C_style_indexing); + * @endcode + * on it, using the TableBase::fill() function where the arguments are + * explained in more detail. The point, however, is that that is only + * possible if the table can be changed after running the constructor, + * whereas calling the current constructor allows sizing and initializing + * an object right away so that it can be marked const. + * + * Using this constructor, you can do things like this (shown here for + * a two-dimensional table, but the same works for the current class): + * @code + * const double values[] = { 1, 2, 3, 4, 5, 6 }; + * const Table<2,double> t(2, 3, entries, true); + * @endcode + * You can also initialize a table right from a file, using input iterators: + * @code + * std::ifstream input ("myfile"); + * const Table<2,double> t(2, 3, + * std::istream_iterator(input), + * true); + * @endcode + * + * + * @param size1 The size of this table in the first dimension. + * @param size2 The size of this table in the second dimension. + * @param size3 The size of this table in the third dimension. + * @param entries An iterator to a set of elements from which to + * initialize this table. It is assumed that iterator can be + * incremented and dereferenced a sufficient number of times + * to fill this table. + * @param C_style_indexing If true, run over elements of the + * table with the last index changing fastest as we dereference + * subsequent elements of the input range. If false, change + * the first index fastest. + */ + template + Table (const unsigned int size1, + const unsigned int size2, + const unsigned int size3, + InputIterator entries, + const bool C_style_indexing = true); + /** * Access operator. Generate an * object that accesses the @@ -1734,6 +1902,20 @@ TableBase::TableBase (const TableIndices &sizes) +template +template +TableBase:: +TableBase (const TableIndices &sizes, + InputIterator entries, + const bool C_style_indexing) +{ + reinit (sizes); + fill (entries, C_style_indexing); +} + + + + template TableBase::TableBase (const TableBase &src) : @@ -2080,16 +2262,66 @@ TableBase::empty () const +namespace internal +{ + namespace Table + { + template + void fill_Fortran_style (InputIterator entries, + TableBase<1,T> &table) + { + for (unsigned int i=0; i(i)) = *entries++; + } + + + template + void fill_Fortran_style (InputIterator entries, + TableBase<2,T> &table) + { + for (unsigned int j=0; j(i,j)) = *entries++; + } + + + template + void fill_Fortran_style (InputIterator entries, + TableBase<3,T> &table) + { + for (unsigned int k=0; k(i,j,k)) = *entries++; + } + + + template + void fill_Fortran_style (InputIterator, + TableBase &) + { + Assert (false, ExcNotImplemented()); + } + } +} + + template -template +template inline void -TableBase::fill (const T2 *entries) +TableBase::fill (InputIterator entries, + const bool C_style_indexing) { Assert (n_elements() != 0, ExcMessage("Trying to fill an empty matrix.")); - std::copy (entries, entries+n_elements(), values.begin()); + if (C_style_indexing) + for (typename std::vector::iterator p = values.begin(); + p != values.end(); ++p) + *p = *entries++; + else + internal::Table::fill_Fortran_style (entries, *this); } @@ -2224,6 +2456,20 @@ Table<1,T>::Table (const unsigned int size) +template +template +inline +Table<1,T>::Table (const unsigned int size, + InputIterator entries, + const bool C_style_indexing) + : + TableBase<1,T> (TableIndices<1> (size), + entries, + C_style_indexing) +{} + + + template inline typename std::vector::const_reference @@ -2310,6 +2556,21 @@ Table<2,T>::Table (const unsigned int size1, +template +template +inline +Table<2,T>::Table (const unsigned int size1, + const unsigned int size2, + InputIterator entries, + const bool C_style_indexing) + : + TableBase<2,T> (TableIndices<2> (size1, size2), + entries, + C_style_indexing) +{} + + + template inline void @@ -2564,6 +2825,22 @@ Table<3,T>::Table (const unsigned int size1, +template +template +inline +Table<3,T>::Table (const unsigned int size1, + const unsigned int size2, + const unsigned int size3, + InputIterator entries, + const bool C_style_indexing) + : + TableBase<3,T> (TableIndices<3> (size1, size2, size3), + entries, + C_style_indexing) +{} + + + template inline dealii::internal::TableBaseAccessors::Accessor<3,T,true,2> diff --git a/deal.II/include/deal.II/base/work_stream.h b/deal.II/include/deal.II/base/work_stream.h index 63007aba91..fd30a26371 100644 --- a/deal.II/include/deal.II/base/work_stream.h +++ b/deal.II/include/deal.II/base/work_stream.h @@ -26,6 +26,7 @@ #include #include #include +#include #ifdef DEAL_II_WITH_THREADS # include @@ -1166,11 +1167,215 @@ namespace WorkStream const std_cxx1x::function copier; }; } + + + /** + * A namespace for functions used in the implementation of + * Implementation3 whenever we don't actually have a copier + * function. In that case, we can simply use parallel_for, but we + * have to be careful with the use of thread-local objects as we + * had to be with Implementation3 as well. + */ + namespace ParallelFor + { + template + class Worker + { + public: + /** + * Constructor. + */ + Worker (const std_cxx1x::function &worker, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data) + : + worker (worker), + sample_scratch_data (sample_scratch_data), + sample_copy_data (sample_copy_data) + {} + + + /** + * The function that calls the worker function on a + * range of items denoted by the two arguments. + */ + void operator() (const tbb::blocked_range::const_iterator> &range) + { + // we need to find an unused scratch and corresponding copy + // data object in the list that + // corresponds to the current thread and then mark it as used. if + // we can't find one, create one + // + // as discussed in the discussion of the documentation of the + // IteratorRangeToItemStream::scratch_data variable, there is no + // need to synchronize access to this variable using a mutex + // as long as we have no yield-point in between. this means that + // we can't take an iterator into the list now and expect it to + // still be valid after calling the worker, but we at least do + // not have to lock the following section + ScratchData *scratch_data = 0; + CopyData *copy_data = 0; + { + typename ItemType::ScratchAndCopyDataList & + scratch_and_copy_data_list = data.get(); + + // see if there is an unused object. if so, grab it and mark + // it as used + for (typename ItemType::ScratchAndCopyDataList::iterator + p = scratch_and_copy_data_list.begin(); + p != scratch_and_copy_data_list.end(); ++p) + if (p->currently_in_use == false) + { + scratch_data = p->scratch_data.get(); + copy_data = p->copy_data.get(); + p->currently_in_use = true; + break; + } + + // if no element in the list was found, create one and mark it as used + if (scratch_data == 0) + { + Assert (copy_data==0, ExcInternalError()); + scratch_data = new ScratchData(sample_scratch_data); + copy_data = new CopyData(sample_copy_data); + + typename ItemType::ScratchAndCopyDataList::value_type + new_scratch_object (scratch_data, copy_data, true); + scratch_and_copy_data_list.push_back (new_scratch_object); + } + } + + // then call the worker and copier function on each + // element of the chunk we were given. since these + // functions are called on separate threads, nothing good + // can happen if they throw an exception and we are best + // off catching it and showing an error message + for (typename std::vector::const_iterator p=range.begin(); + p != range.end(); ++p) + { + try + { + worker (*p, + *scratch_data, + *copy_data); + } + catch (const std::exception &exc) + { + Threads::internal::handle_std_exception (exc); + } + catch (...) + { + Threads::internal::handle_unknown_exception (); + } + } + + // finally mark the scratch object as unused again. as above, there + // is no need to lock anything here since the object we work on + // is thread-local + { + typename ItemType::ScratchAndCopyDataList & + scratch_and_copy_data_list = data.get(); + + for (typename ItemType::ScratchAndCopyDataList::iterator p = + scratch_and_copy_data_list.begin(); p != scratch_and_copy_data_list.end(); + ++p) + if (p->scratch_data.get() == scratch_data) + { + Assert(p->currently_in_use == true, ExcInternalError()); + p->currently_in_use = false; + } + } + + } + + private: + typedef + typename Implementation3::IteratorRangeToItemStream::ItemType + ItemType; + + typedef + typename ItemType::ScratchAndCopyDataList + ScratchAndCopyDataList; + + Threads::ThreadLocalStorage data; + + /** + * Pointer to the function + * that does the assembling + * on the sequence of cells. + */ + const std_cxx1x::function worker; + + /** + * References to sample scratch and copy data for + * when we need them. + */ + const ScratchData &sample_scratch_data; + const CopyData &sample_copy_data; + }; + } + } + #endif // DEAL_II_WITH_THREADS + /** + * This is one of two main functions of the WorkStream concept, doing work as + * described in the introduction to this namespace. It corresponds to + * implementation 3 of the paper by Turcksin, Kronbichler and Bangerth, + * see @ref workstream_paper . + * As such, it takes not a range of iterators described by a begin + * and end iterator, but a "colored" graph of iterators where each + * color represents cells for which writing the cell contributions into + * the global object does not conflict (in other words, these cells + * are not neighbors). Each "color" is represented by std::vectors of cells. + * The first argument to this function, a set of sets of cells (which are + * represent as a vector of vectors, for efficiency), is typically + * constructed by calling GraphColoring::make_graph_coloring(). See there + * for more information. + * + * This function that can be used for worker and copier objects that + * are either pointers to non-member functions or objects that allow to be + * called with an operator(), for example objects created by std::bind. + * + * The two data types ScratchData and CopyData need to + * have a working copy constructor. ScratchData is only used in the + * worker function, while CopyData is the object passed + * from the worker to the copier. + * + * The @p queue_length argument indicates the number of items that can be + * live at any given time. Each item consists of @p chunk_size elements of + * the input stream that will be worked on by the worker and copier + * functions one after the other on the same thread. + * + * @note If your data objects are large, or their constructors are + * expensive, it is helpful to keep in mind that queue_length + * copies of the ScratchData object and + * queue_length*chunk_size copies of the CopyData object + * are generated. + */ + template + void + run (const std::vector > &colored_iterators, + Worker worker, + Copier copier, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data, + const unsigned int queue_length = 2*multithread_info.n_threads(), + const unsigned int chunk_size = 8); + /** * This is one of two main functions of the WorkStream concept, doing work as @@ -1260,17 +1465,17 @@ namespace WorkStream #ifdef DEAL_II_WITH_THREADS else // have TBB and use more than one thread { - // create the three stages of the pipeline - internal::Implementation2::IteratorRangeToItemStream - iterator_range_to_item_stream (begin, end, - queue_length, - chunk_size, - sample_scratch_data, - sample_copy_data); - // Check that the copier exist if (static_cast& >(copier)) { + // create the three stages of the pipeline + internal::Implementation2::IteratorRangeToItemStream + iterator_range_to_item_stream (begin, end, + queue_length, + chunk_size, + sample_scratch_data, + sample_copy_data); + internal::Implementation2::Worker worker_filter (worker); internal::Implementation2::Copier copier_filter (copier); @@ -1287,58 +1492,37 @@ namespace WorkStream } else { - internal::Implementation2::Worker worker_filter (worker,false); - - // now create a pipeline from these stages - tbb::pipeline assembly_line; - assembly_line.add_filter (iterator_range_to_item_stream); - assembly_line.add_filter (worker_filter); - - // and run it - assembly_line.run (queue_length); - - assembly_line.clear (); + // there is no copier function. in this case, we have an + // embarrassingly parallel problem where we can + // essentially apply parallel_for. because parallel_for + // requires subdividing the range for which operator- is + // necessary between iterators, it is often inefficient to + // apply it directory to cell ranges and similar iterator + // types for which operator- is expensive or, in fact, + // nonexistent. rather, in that case, we simply copy the + // iterators into a large array and use operator- on + // iterators to this array of iterators. + // + // instead of duplicating code, this is essentially the + // same situation we have in Implementation3 below, so we + // just defer to that place + std::vector > all_iterators (1); + for (Iterator p=begin; p!=end; ++p) + all_iterators[0].push_back (p); + + run (all_iterators, + worker, copier, + sample_scratch_data, + sample_copy_data, + queue_length, + chunk_size); } } #endif } - /** - * This is one of two main functions of the WorkStream concept, doing work as - * described in the introduction to this namespace. It corresponds to - * implementation 3 of the paper by Turcksin, Kronbichler and Bangerth, - * see @ref workstream_paper . - * As such, it takes not a range of iterators described by a begin - * and end iterator, but a "colored" graph of iterators where each - * color represents cells for which writing the cell contributions into - * the global object does not conflict (in other words, these cells - * are not neighbors). Each "color" is represented by std::vectors of cells. - * The first argument to this function, a set of sets of cells (which are - * represent as a vector of vectors, for efficiency), is typically - * constructed by calling GraphColoring::make_graph_coloring(). See there - * for more information. - * - * This function that can be used for worker and copier objects that - * are either pointers to non-member functions or objects that allow to be - * called with an operator(), for example objects created by std::bind. - * - * The two data types ScratchData and CopyData need to - * have a working copy constructor. ScratchData is only used in the - * worker function, while CopyData is the object passed - * from the worker to the copier. - * - * The @p queue_length argument indicates the number of items that can be - * live at any given time. Each item consists of @p chunk_size elements of - * the input stream that will be worked on by the worker and copier - * functions one after the other on the same thread. - * - * @note If your data objects are large, or their constructors are - * expensive, it is helpful to keep in mind that queue_length - * copies of the ScratchData object and - * queue_length*chunk_size copies of the CopyData object - * are generated. - */ + // Implementation 3: template void run (const std::vector > &colored_iterators, - Worker worker, - Copier copier, - const ScratchData &sample_scratch_data, - const CopyData &sample_copy_data, - const unsigned int queue_length = 2*multithread_info.n_threads(), - const unsigned int chunk_size = 8) + Worker worker, + Copier copier, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data, + const unsigned int queue_length, + const unsigned int chunk_size) { Assert (queue_length > 0, ExcMessage ("The queue length must be at least one, and preferably " @@ -1391,30 +1575,65 @@ namespace WorkStream // loop over the various colors of what we're given for (unsigned int color=0; color 0) - { - // create the three stages of the pipeline - internal::Implementation3::IteratorRangeToItemStream - iterator_range_to_item_stream (colored_iterators[color].begin(), - colored_iterators[color].end(), - queue_length, - chunk_size, - sample_scratch_data, - sample_copy_data); - - - internal::Implementation3::WorkerAndCopier - worker_and_copier_filter (worker, copier); - - // now create a pipeline from these stages - tbb::pipeline assembly_line; - assembly_line.add_filter (iterator_range_to_item_stream); - assembly_line.add_filter (worker_and_copier_filter); - - // and run it - assembly_line.run (queue_length); - - assembly_line.clear (); - } + { + if (static_cast& >(copier)) + { + // there is a copier function, so we have to go with + // the full three-stage design of the pipeline + internal::Implementation3::IteratorRangeToItemStream + iterator_range_to_item_stream (colored_iterators[color].begin(), + colored_iterators[color].end(), + queue_length, + chunk_size, + sample_scratch_data, + sample_copy_data); + + + internal::Implementation3::WorkerAndCopier + worker_and_copier_filter (worker, copier); + + // now create a pipeline from these stages + tbb::pipeline assembly_line; + assembly_line.add_filter (iterator_range_to_item_stream); + assembly_line.add_filter (worker_and_copier_filter); + + // and run it + assembly_line.run (queue_length); + + assembly_line.clear (); + } + else + { + // no copier function, we can implement things as a parallel for + Assert (static_cast& >(worker), + ExcMessage ("It makes no sense to call this function with " + "empty functions for both the worker and the " + "copier!")); + + typedef + internal::ParallelFor::Worker + ParallelForWorker; + + typedef + typename std::vector::const_iterator + RangeType; + + ParallelForWorker parallel_for_worker (worker, + sample_scratch_data, + sample_copy_data); + + tbb::parallel_for (tbb::blocked_range + (colored_iterators[color].begin(), + colored_iterators[color].end(), + /*grain_size=*/chunk_size), + std_cxx1x::bind (&ParallelForWorker::operator(), + std_cxx1x::ref(parallel_for_worker), + std_cxx1x::_1), + tbb::auto_partitioner()); + } + } } #endif } diff --git a/deal.II/include/deal.II/fe/fe_face.h b/deal.II/include/deal.II/fe/fe_face.h index 5199cc13c1..af7b702cfa 100644 --- a/deal.II/include/deal.II/fe/fe_face.h +++ b/deal.II/include/deal.II/fe/fe_face.h @@ -33,15 +33,17 @@ DEAL_II_NAMESPACE_OPEN * and two, the polynomials hence correspond to the usual Lagrange polynomials * on equidistant points. * - * This finite element is the trace space of FE_RaviartThomas on the faces and + * Although the name does not give it away, the element is discontinuous + * at locations where faces of cells meet. In particular, + * this finite element is the trace space of FE_RaviartThomas on the faces and * serves in hybridized methods, e.g. in combination with the FE_DGQ * element. Its use is demonstrated in the step-51 tutorial program. * - * @note Since these are only finite elements on faces, only + * @note Since this element is defined only on faces, only * FEFaceValues and FESubfaceValues will be able to extract reasonable * values from any face polynomial. In order to make the use of - * FESystem simpler, FEValues objects will not fail using this finite - * element space, but all shape function values extracted will equal + * FESystem simpler, using a (cell) FEValues object will not fail using this finite + * element space, but all shape function values extracted will be equal * to zero. * * @ingroup fe @@ -133,18 +135,22 @@ private: /** - * A finite element, which is a Legendre on each face (i.e., FE_DGP) + * A finite element, which is a Legendre element of complete polynomials on + * each face (i.e., it is the face equivalent of what FE_DGP is on cells) * and undefined in the interior of the cells. The basis functions on * the faces are from Polynomials::Legendre. * - * This element is used in a hybridized method together with the FE_DGP - * element for the interior degrees of freedom. + * Although the name does not give it away, the element is discontinuous + * at locations where faces of cells meet. The element + * serves in hybridized methods, e.g. in combination with the FE_DGP + * element. An example of hybridizes methods can be found in the + * step-51 tutorial program. * - * @note Since these are only finite elements on faces, only + * @note Since this element is defined only on faces, only * FEFaceValues and FESubfaceValues will be able to extract reasonable * values from any face polynomial. In order to make the use of - * FESystem simpler, FEValues objects will not fail using this finite - * element space, but all shape function values extracted will equal + * FESystem simpler, using a (cell) FEValues object will not fail using this finite + * element space, but all shape function values extracted will be equal * to zero. * * @ingroup fe diff --git a/deal.II/include/deal.II/hp/fe_values.h b/deal.II/include/deal.II/hp/fe_values.h index cad63fdf4f..c4d14caa6d 100644 --- a/deal.II/include/deal.II/hp/fe_values.h +++ b/deal.II/include/deal.II/hp/fe_values.h @@ -158,7 +158,7 @@ namespace internal * Initially, all entries have zero pointers, and we will allocate them * lazily as needed in select_fe_values(). */ - Table<3,std_cxx1x::shared_ptr > fe_values_table; + dealii::Table<3,std_cxx1x::shared_ptr > fe_values_table; /** * Set of indices pointing at the fe_values object selected last time diff --git a/deal.II/include/deal.II/lac/block_sparsity_pattern.h b/deal.II/include/deal.II/lac/block_sparsity_pattern.h index 392eaa4fca..e22741f97c 100644 --- a/deal.II/include/deal.II/lac/block_sparsity_pattern.h +++ b/deal.II/include/deal.II/lac/block_sparsity_pattern.h @@ -1016,113 +1016,100 @@ namespace TrilinosWrappers public: /** - * Initialize the matrix empty, - * that is with no memory - * allocated. This is useful if - * you want such objects as - * member variables in other - * classes. You can make the - * structure usable by calling - * the reinit() function. + * Initialize the matrix empty, that is with no memory allocated. This is + * useful if you want such objects as member variables in other + * classes. You can make the structure usable by calling the reinit() + * function. */ BlockSparsityPattern (); /** - * Initialize the matrix with the - * given number of block rows and - * columns. The blocks themselves - * are still empty, and you have - * to call collect_sizes() after - * you assign them sizes. + * Initialize the matrix with the given number of block rows and + * columns. The blocks themselves are still empty, and you have to call + * collect_sizes() after you assign them sizes. */ BlockSparsityPattern (const size_type n_rows, const size_type n_columns); /** - * Initialize the pattern with - * two BlockIndices for the block - * structures of matrix rows and - * columns. This function is - * equivalent to calling the - * previous constructor with the - * length of the two index vector - * and then entering the index - * values. + * Initialize the pattern with two BlockIndices for the block structures + * of matrix rows and columns. This function is equivalent to calling the + * previous constructor with the length of the two index vector and then + * entering the index values. */ BlockSparsityPattern (const std::vector &row_block_sizes, const std::vector &col_block_sizes); /** - * Initialize the pattern with an array - * Epetra_Map that specifies both rows - * and columns of the matrix (so the - * final matrix will be a square - * matrix), where the Epetra_Map - * specifies the parallel distribution - * of the degrees of freedom on the - * individual block. This function is - * equivalent to calling the second - * constructor with the length of the - * mapping vector and then entering the - * index values. + * Initialize the pattern with an array Epetra_Map that specifies both + * rows and columns of the matrix (so the final matrix will be a square + * matrix), where the Epetra_Map specifies the parallel distribution of + * the degrees of freedom on the individual block. This function is + * equivalent to calling the second constructor with the length of the + * mapping vector and then entering the index values. */ BlockSparsityPattern (const std::vector ¶llel_partitioning); /** - * Initialize the pattern with an array - * of index sets that specifies both rows - * and columns of the matrix (so the - * final matrix will be a square matrix), - * where the size() of the IndexSets - * specifies the size of the blocks and - * the values in each IndexSet denotes - * the rows that are going to be saved in - * each block. + * Initialize the pattern with an array of index sets that specifies both + * rows and columns of the matrix (so the final matrix will be a square + * matrix), where the size() of the IndexSets specifies the size of the + * blocks and the values in each IndexSet denotes the rows that are going + * to be saved in each block. */ BlockSparsityPattern (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Resize the matrix to a tensor - * product of matrices with - * dimensions defined by the - * arguments. + * Resize the matrix to a tensor product of matrices with dimensions + * defined by the arguments. * - * The matrix will have as many - * block rows and columns as - * there are entries in the two - * arguments. The block at - * position (i,j) will - * have the dimensions - * row_block_sizes[i] - * times col_block_sizes[j]. + * The matrix will have as many block rows and columns as there are + * entries in the two arguments. The block at position (i,j) will + * have the dimensions row_block_sizes[i] times + * col_block_sizes[j]. */ void reinit (const std::vector &row_block_sizes, const std::vector &col_block_sizes); /** - * Resize the matrix to a square tensor - * product of matrices with parallel - * distribution according to the - * specifications in the array of + * Resize the matrix to a square tensor product of matrices with parallel + * distribution according to the specifications in the array of * Epetra_Maps. */ void reinit (const std::vector ¶llel_partitioning); /** - * Resize the matrix to a square tensor - * product of matrices. See the - * constructor that takes a vector of - * IndexSets for details. + * Resize the matrix to a square tensor product of matrices. See the + * constructor that takes a vector of IndexSets for details. */ void reinit (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); + /** + * Resize the matrix to a rectangular block matrices. This method allows + * rows and columns to be different, both in the outer block structure and + * within the blocks. + */ + void reinit (const std::vector &row_parallel_partitioning, + const std::vector &column_parallel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Resize the matrix to a rectangular block matrices that furthermore + * explicitly specify the writable rows in each of the blocks. This method + * is used to create matrices that allow several threads to write + * simultaneously into the matrix (to different rows, of course), see the + * method TrilinosWrappers::SparsityPattern::reinit method with three + * index set arguments for more details. + */ + void reinit (const std::vector &row_parallel_partitioning, + const std::vector &column_parallel_partitioning, + const std::vector &writeable_rows, + const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Allow the use of the reinit - * functions of the base class as - * well. + * Allow the use of the reinit functions of the base class as well. */ using BlockSparsityPatternBase::reinit; }; diff --git a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h index ed72eb0060..8babcea003 100644 --- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h @@ -84,17 +84,11 @@ namespace TrilinosWrappers << " are stored locally and can be accessed."); /** - * Handling of indices for both - * constant and non constant - * Accessor objects - * - * For a regular - * dealii::SparseMatrix, we would - * use an accessor for the sparsity - * pattern. For Trilinos matrices, - * this does not seem so simple, - * therefore, we write a little - * base class here. + * Handling of indices for both constant and non constant Accessor objects + * + * For a regular dealii::SparseMatrix, we would use an accessor for the + * sparsity pattern. For Trilinos matrices, this does not seem so simple, + * therefore, we write a little base class here. * * @author Guido Kanschat * @date 2012 @@ -115,34 +109,26 @@ namespace TrilinosWrappers const size_type index); /** - * Row number of the element - * represented by this object. + * Row number of the element represented by this object. */ size_type row() const; /** - * Index in row of the element - * represented by this object. + * Index in row of the element represented by this object. */ size_type index() const; /** - * Column number of the element - * represented by this object. + * Column number of the element represented by this object. */ size_type column() const; protected: /** - * Pointer to the matrix - * object. This object should - * be handled as a const - * pointer or non-const by the - * appropriate derived - * classes. In order to be able - * to implement both, it is not - * const here, so handle with - * care! + * Pointer to the matrix object. This object should be handled as a + * const pointer or non-const by the appropriate derived classes. In + * order to be able to implement both, it is not const here, so handle + * with care! */ mutable SparseMatrix *matrix; /** @@ -156,45 +142,28 @@ namespace TrilinosWrappers size_type a_index; /** - * Discard the old row caches - * (they may still be used by - * other accessors) and - * generate new ones for the - * row pointed to presently by + * Discard the old row caches (they may still be used by other + * accessors) and generate new ones for the row pointed to presently by * this accessor. */ void visit_present_row (); /** - * Cache where we store the - * column indices of the - * present row. This is - * necessary, since Trilinos - * makes access to the elements - * of its matrices rather hard, - * and it is much more - * efficient to copy all column - * entries of a row once when - * we enter it than repeatedly - * asking Trilinos for - * individual ones. This also - * makes some sense since it is - * likely that we will access - * them sequentially anyway. + * Cache where we store the column indices of the present row. This is + * necessary, since Trilinos makes access to the elements of its + * matrices rather hard, and it is much more efficient to copy all + * column entries of a row once when we enter it than repeatedly asking + * Trilinos for individual ones. This also makes some sense since it is + * likely that we will access them sequentially anyway. * - * In order to make copying of - * iterators/accessor of - * acceptable performance, we - * keep a shared pointer to - * these entries so that more - * than one accessor can access - * this data if necessary. + * In order to make copying of iterators/accessor of acceptable + * performance, we keep a shared pointer to these entries so that more + * than one accessor can access this data if necessary. */ std_cxx1x::shared_ptr > colnum_cache; /** - * Cache for the values - * of this row. + * Cache for the values of this row. */ std_cxx1x::shared_ptr > value_cache; }; @@ -231,25 +200,21 @@ namespace TrilinosWrappers { public: /** - * Typedef for the type (including - * constness) of the matrix to be - * used here. + * Typedef for the type (including constness) of the matrix to be used + * here. */ typedef const SparseMatrix MatrixType; /** - * Constructor. Since we use - * accessors only for read - * access, a const matrix - * pointer is sufficient. + * Constructor. Since we use accessors only for read access, a const + * matrix pointer is sufficient. */ Accessor (MatrixType *matrix, const size_type row, const size_type index); /** - * Copy constructor to get from a - * const or non-const accessor to a const + * Copy constructor to get from a const or non-const accessor to a const * accessor. */ template @@ -262,8 +227,7 @@ namespace TrilinosWrappers private: /** - * Make iterator class a - * friend. + * Make iterator class a friend. */ template friend class Iterator; }; @@ -283,66 +247,53 @@ namespace TrilinosWrappers Reference (const Accessor &accessor); /** - * Conversion operator to the - * data type of the matrix. + * Conversion operator to the data type of the matrix. */ operator TrilinosScalar () const; /** - * Set the element of the matrix - * we presently point to to @p n. + * Set the element of the matrix we presently point to to @p n. */ const Reference &operator = (const TrilinosScalar n) const; /** - * Add @p n to the element of the - * matrix we presently point to. + * Add @p n to the element of the matrix we presently point to. */ const Reference &operator += (const TrilinosScalar n) const; /** - * Subtract @p n from the element - * of the matrix we presently - * point to. + * Subtract @p n from the element of the matrix we presently point to. */ const Reference &operator -= (const TrilinosScalar n) const; /** - * Multiply the element of the - * matrix we presently point to - * by @p n. + * Multiply the element of the matrix we presently point to by @p n. */ const Reference &operator *= (const TrilinosScalar n) const; /** - * Divide the element of the - * matrix we presently point to - * by @p n. + * Divide the element of the matrix we presently point to by @p n. */ const Reference &operator /= (const TrilinosScalar n) const; private: /** - * Pointer to the accessor that - * denotes which element we - * presently point to. + * Pointer to the accessor that denotes which element we presently + * point to. */ Accessor &accessor; }; public: /** - * Typedef for the type (including - * constness) of the matrix to be - * used here. + * Typedef for the type (including constness) of the matrix to be used + * here. */ typedef SparseMatrix MatrixType; /** - * Constructor. Since we use - * accessors only for read - * access, a const matrix - * pointer is sufficient. + * Constructor. Since we use accessors only for read access, a const + * matrix pointer is sufficient. */ Accessor (MatrixType *matrix, const size_type row, @@ -355,13 +306,11 @@ namespace TrilinosWrappers private: /** - * Make iterator class a - * friend. + * Make iterator class a friend. */ template friend class Iterator; /** - * Make Reference object a - * friend. + * Make Reference object a friend. */ friend class Reference; }; @@ -390,25 +339,21 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * Typedef for the matrix type - * (including constness) we are to - * operate on. + * Typedef for the matrix type (including constness) we are to operate + * on. */ typedef typename Accessor::MatrixType MatrixType; /** - * Constructor. Create an - * iterator into the matrix @p - * matrix for the given row and - * the index within it. + * Constructor. Create an iterator into the matrix @p matrix for the + * given row and the index within it. */ Iterator (MatrixType *matrix, const size_type row, const size_type index); /** - * Copy constructor with - * optional change of constness. + * Copy constructor with optional change of constness. */ template Iterator(const Iterator &other); @@ -434,9 +379,8 @@ namespace TrilinosWrappers const Accessor *operator-> () const; /** - * Comparison. True, if both - * iterators point to the same - * matrix position. + * Comparison. True, if both iterators point to the same matrix + * position. */ bool operator == (const Iterator &) const; @@ -446,11 +390,8 @@ namespace TrilinosWrappers bool operator != (const Iterator &) const; /** - * Comparison operator. Result - * is true if either the first - * row number is smaller or if - * the row numbers are equal - * and the first index is + * Comparison operator. Result is true if either the first row number is + * smaller or if the row numbers are equal and the first index is * smaller. */ bool operator < (const Iterator &) const; @@ -471,8 +412,7 @@ namespace TrilinosWrappers private: /** - * Store an object of the - * accessor class. + * Store an object of the accessor class. */ Accessor accessor; @@ -508,6 +448,29 @@ namespace TrilinosWrappers * unused elements. Trilinos allows to continue with assembling the * matrix after calls to these functions, though. * + *

    Thread safety of Trilinos matrices

    + * + * When writing into Trilinos matrices from several threads in shared + * memory, several things must be kept in mind as there is no built-in locks + * in this class to prevent data races. Therefore, simultaneous access to + * the same matrix row at the same time leads to data races in general and + * must be explicitly avoided by the user. However, it is possible to access + * different rows of the matrix from several threads simultaneously + * under the following two conditions: + *
      + *
    • The matrix uses only one MPI process. + *
    • The matrix has been initialized from a + * TrilinosWrappers::SparsityPattern object that in turn has been + * initialized with the reinit function specifying three index sets, one + * for the rows, one for the columns and for the larger set of @p + * writeable_rows. Note that all other reinit methods and constructors of + * TrilinosWrappers::SparsityPattern will result in a matrix that needs to + * allocate off-processor entries on demand, which breaks + * thread-safety. Of course, using the respective reinit method for the + * block Trilinos sparsity pattern and block matrix also results in + * thread-safety. + *
    + * * @ingroup TrilinosWrappers * @ingroup Matrix1 * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 @@ -521,43 +484,33 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * A structure that describes - * some of the traits of this - * class in terms of its run-time - * behavior. Some other classes - * (such as the block matrix - * classes) that take one or - * other of the matrix classes as - * its template parameters can - * tune their behavior based on - * the variables in this class. + * A structure that describes some of the traits of this class in terms of + * its run-time behavior. Some other classes (such as the block matrix + * classes) that take one or other of the matrix classes as its template + * parameters can tune their behavior based on the variables in this + * class. */ struct Traits { /** - * It is safe to elide additions - * of zeros to individual - * elements of this matrix. + * It is safe to elide additions of zeros to individual elements of this + * matrix. */ static const bool zero_addition_can_be_elided = true; }; /** - * Declare a typedef for the - * iterator class. + * Declare a typedef for the iterator class. */ typedef SparseMatrixIterators::Iterator iterator; /** - * Declare a typedef for the - * const iterator class. + * Declare a typedef for the const iterator class. */ typedef SparseMatrixIterators::Iterator const_iterator; /** - * Declare a typedef in analogy - * to all the other container - * classes. + * Declare a typedef in analogy to all the other container classes. */ typedef TrilinosScalar value_type; @@ -566,18 +519,15 @@ namespace TrilinosWrappers */ //@{ /** - * Default constructor. Generates - * an empty (zero-size) matrix. + * Default constructor. Generates an empty (zero-size) matrix. */ SparseMatrix (); /** - * Generate a matrix that is completely - * stored locally, having #m rows and + * Generate a matrix that is completely stored locally, having #m rows and * #n columns. * - * The number of columns entries per - * row is specified as the maximum + * The number of columns entries per row is specified as the maximum * number of entries argument. */ SparseMatrix (const size_type m, @@ -585,118 +535,85 @@ namespace TrilinosWrappers const unsigned int n_max_entries_per_row); /** - * Generate a matrix that is completely - * stored locally, having #m rows and + * Generate a matrix that is completely stored locally, having #m rows and * #n columns. * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. + * The vector n_entries_per_row specifies the number of entries + * in each row. */ SparseMatrix (const size_type m, const size_type n, const std::vector &n_entries_per_row); /** - * Generate a matrix from a Trilinos - * sparsity pattern object. + * Generate a matrix from a Trilinos sparsity pattern object. */ SparseMatrix (const SparsityPattern &InputSparsityPattern); /** - * Copy constructor. Sets the - * calling matrix to be the same - * as the input matrix, i.e., - * using the same sparsity - * pattern and entries. + * Copy constructor. Sets the calling matrix to be the same as the input + * matrix, i.e., using the same sparsity pattern and entries. */ SparseMatrix (const SparseMatrix &InputMatrix); /** - * Destructor. Made virtual so - * that one can use pointers to - * this class. + * Destructor. Made virtual so that one can use pointers to this class. */ virtual ~SparseMatrix (); /** - * This function initializes the - * Trilinos matrix with a deal.II - * sparsity pattern, i.e. it makes - * the Trilinos Epetra matrix know - * the position of nonzero entries - * according to the sparsity - * pattern. This function is meant - * for use in serial programs, where - * there is no need to specify how - * the matrix is going to be - * distributed among different - * processors. This function works in - * %parallel, too, but it is - * recommended to manually specify - * the %parallel partioning of the - * matrix using an Epetra_Map. When - * run in %parallel, it is currently - * necessary that each processor - * holds the sparsity_pattern - * structure because each processor + * This function initializes the Trilinos matrix with a deal.II sparsity + * pattern, i.e. it makes the Trilinos Epetra matrix know the position of + * nonzero entries according to the sparsity pattern. This function is + * meant for use in serial programs, where there is no need to specify how + * the matrix is going to be distributed among different processors. This + * function works in %parallel, too, but it is recommended to manually + * specify the %parallel partioning of the matrix using an + * Epetra_Map. When run in %parallel, it is currently necessary that each + * processor holds the sparsity_pattern structure because each processor * sets its rows. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const SparsityType &sparsity_pattern); /** - * This function reinitializes the - * Trilinos sparse matrix from a - * (possibly distributed) Trilinos - * sparsity pattern. + * This function reinitializes the Trilinos sparse matrix from a (possibly + * distributed) Trilinos sparsity pattern. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * If you want to write to the matrix from several threads and use MPI, + * you need to use this reinit method with a sparsity pattern that has + * been created with explicitly stating writeable rows. In all other + * cases, you cannot mix MPI with multithreaded writing into the matrix. */ void reinit (const SparsityPattern &sparsity_pattern); /** - * This function copies the content - * in sparse_matrix to the + * This function copies the content in sparse_matrix to the * calling matrix. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ void reinit (const SparseMatrix &sparse_matrix); /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix + * This function initializes the Trilinos matrix using the deal.II sparse + * matrix and the entries stored therein. It uses a threshold to copy only + * elements with modulus larger than the threshold (so zeros in the + * deal.II matrix can be filtered away). + * + * The optional parameter copy_values decides whether only the + * sparsity structure of the input matrix should be used or the matrix * entries should be copied, too. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * deadlock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a deadlock. * * @note If a different sparsity pattern is given in the last argument * (i.e., one that differs from the one used in the sparse matrix given @@ -712,11 +629,9 @@ namespace TrilinosWrappers const ::dealii::SparsityPattern *use_this_sparsity=0); /** - * This reinit function takes as - * input a Trilinos Epetra_CrsMatrix - * and copies its sparsity - * pattern. If so requested, even the - * content (values) will be copied. + * This reinit function takes as input a Trilinos Epetra_CrsMatrix and + * copies its sparsity pattern. If so requested, even the content (values) + * will be copied. */ void reinit (const Epetra_CrsMatrix &input_matrix, const bool copy_values = true); @@ -726,150 +641,87 @@ namespace TrilinosWrappers */ //@{ /** - * Constructor using an Epetra_Map to - * describe the %parallel - * partitioning. The parameter @p - * n_max_entries_per_row sets the - * number of nonzero entries in each - * row that will be allocated. Note - * that this number does not need to - * be exact, and it is even allowed - * that the actual matrix structure - * has more nonzero entries than - * specified in the - * constructor. However it is still - * advantageous to provide good - * estimates here since this will - * considerably increase the - * performance of the matrix - * setup. However, there is no effect - * in the performance of - * matrix-vector products, since - * Trilinos reorganizes the matrix - * memory prior to use (in the - * compress() step). + * Constructor using an Epetra_Map to describe the %parallel + * partitioning. The parameter @p n_max_entries_per_row sets the number of + * nonzero entries in each row that will be allocated. Note that this + * number does not need to be exact, and it is even allowed that the + * actual matrix structure has more nonzero entries than specified in the + * constructor. However it is still advantageous to provide good estimates + * here since this will considerably increase the performance of the + * matrix setup. However, there is no effect in the performance of + * matrix-vector products, since Trilinos reorganizes the matrix memory + * prior to use (in the compress() step). */ SparseMatrix (const Epetra_Map ¶llel_partitioning, const size_type n_max_entries_per_row = 0); /** - * Same as before, but now set a - * value of nonzeros for each matrix - * row. Since we know the number of - * elements in the matrix exactly in - * this case, we can already allocate - * the right amount of memory, which - * makes the creation process - * including the insertion of nonzero - * elements by the respective - * SparseMatrix::reinit call - * considerably faster. + * Same as before, but now set a value of nonzeros for each matrix + * row. Since we know the number of elements in the matrix exactly in this + * case, we can already allocate the right amount of memory, which makes + * the creation process including the insertion of nonzero elements by the + * respective SparseMatrix::reinit call considerably faster. */ SparseMatrix (const Epetra_Map ¶llel_partitioning, const std::vector &n_entries_per_row); /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map describes the %parallel - * partitioning of the dofs - * associated with the matrix rows - * and the other one the partitioning - * of dofs in the matrix - * columns. Note that there is no - * real parallelism along the columns - * – the processor that owns a - * certain row always owns all the - * column elements, no matter how far - * they might be spread out. The - * second Epetra_Map is only used to - * specify the number of columns and - * for internal arrangements when - * doing matrix-vector products with - * vectors based on that column map. - * - * The integer input @p - * n_max_entries_per_row defines the - * number of columns entries per row - * that will be allocated. + * This constructor is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map describes + * the %parallel partitioning of the dofs associated with the matrix rows + * and the other one the partitioning of dofs in the matrix columns. Note + * that there is no real parallelism along the columns – the + * processor that owns a certain row always owns all the column elements, + * no matter how far they might be spread out. The second Epetra_Map is + * only used to specify the number of columns and for internal + * arrangements when doing matrix-vector products with vectors based on + * that column map. + * + * The integer input @p n_max_entries_per_row defines the number of + * columns entries per row that will be allocated. */ SparseMatrix (const Epetra_Map &row_parallel_partitioning, const Epetra_Map &col_parallel_partitioning, const size_type n_max_entries_per_row = 0); /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map specifies the %parallel - * distribution of degrees of freedom - * associated with matrix rows and - * the second one specifies the - * %parallel distribution the dofs - * associated with columns in the - * matrix. The second map also - * provides information for the - * internal arrangement in matrix - * vector products (i.e., the - * distribution of vector this matrix - * is to be multiplied with), but is - * not used for the distribution of - * the columns – rather, all - * column elements of a row are - * stored on the same processor in - * any case. The vector - * n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. + * This constructor is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of degrees of freedom associated with matrix + * rows and the second one specifies the %parallel distribution the dofs + * associated with columns in the matrix. The second map also provides + * information for the internal arrangement in matrix vector products + * (i.e., the distribution of vector this matrix is to be multiplied + * with), but is not used for the distribution of the columns – + * rather, all column elements of a row are stored on the same processor + * in any case. The vector n_entries_per_row specifies the number + * of entries in each row of the newly generated matrix. */ SparseMatrix (const Epetra_Map &row_parallel_partitioning, const Epetra_Map &col_parallel_partitioning, const std::vector &n_entries_per_row); /** - * This function is initializes the - * Trilinos Epetra matrix according to - * the specified sparsity_pattern, and - * also reassigns the matrix rows to - * different processes according to a - * user-supplied Epetra map. In - * programs following the style of the - * tutorial programs, this function - * (and the respective call for a - * rectangular matrix) are the natural - * way to initialize the matrix size, - * its distribution among the MPI - * processes (if run in %parallel) as - * well as the locatoin of non-zero - * elements. Trilinos stores the - * sparsity pattern internally, so it - * won't be needed any more after this - * call, in contrast to the deal.II own - * object. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. If - * the flag is not set, each processor - * just sets the elements in the - * sparsity pattern that belong to its - * rows. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This function is initializes the Trilinos Epetra matrix according to + * the specified sparsity_pattern, and also reassigns the matrix rows to + * different processes according to a user-supplied Epetra map. In + * programs following the style of the tutorial programs, this function + * (and the respective call for a rectangular matrix) are the natural way + * to initialize the matrix size, its distribution among the MPI processes + * (if run in %parallel) as well as the locatoin of non-zero + * elements. Trilinos stores the sparsity pattern internally, so it won't + * be needed any more after this call, in contrast to the deal.II own + * object. The optional argument @p exchange_data can be used for + * reinitialization with a sparsity pattern that is not fully + * constructed. This feature is only implemented for input sparsity + * patterns of type CompressedSimpleSparsityPattern. If the flag is not + * set, each processor just sets the elements in the sparsity pattern that + * belong to its rows. + * + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const Epetra_Map ¶llel_partitioning, @@ -877,25 +729,16 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * This function is similar to the - * other initialization function - * above, but now also reassigns the - * matrix rows and columns according - * to two user-supplied Epetra maps. - * To be used for rectangular - * matrices. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type + * This function is similar to the other initialization function above, + * but now also reassigns the matrix rows and columns according to two + * user-supplied Epetra maps. To be used for rectangular matrices. The + * optional argument @p exchange_data can be used for reinitialization + * with a sparsity pattern that is not fully constructed. This feature is + * only implemented for input sparsity patterns of type * CompressedSimpleSparsityPattern. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const Epetra_Map &row_parallel_partitioning, @@ -904,32 +747,20 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). In - * contrast to the other reinit - * function with deal.II sparse - * matrix argument, this function - * takes a %parallel partitioning - * specified by the user instead of - * internally generating it. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix + * This function initializes the Trilinos matrix using the deal.II sparse + * matrix and the entries stored therein. It uses a threshold to copy only + * elements with modulus larger than the threshold (so zeros in the + * deal.II matrix can be filtered away). In contrast to the other reinit + * function with deal.II sparse matrix argument, this function takes a + * %parallel partitioning specified by the user instead of internally + * generating it. + * + * The optional parameter copy_values decides whether only the + * sparsity structure of the input matrix should be used or the matrix * entries should be copied, too. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const Epetra_Map ¶llel_partitioning, @@ -939,25 +770,17 @@ namespace TrilinosWrappers const ::dealii::SparsityPattern *use_this_sparsity=0); /** - * This function is similar to the - * other initialization function with - * deal.II sparse matrix input above, - * but now takes Epetra maps for both - * the rows and the columns of the - * matrix. Chosen for rectangular + * This function is similar to the other initialization function with + * deal.II sparse matrix input above, but now takes Epetra maps for both + * the rows and the columns of the matrix. Chosen for rectangular * matrices. * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix + * The optional parameter copy_values decides whether only the + * sparsity structure of the input matrix should be used or the matrix * entries should be copied, too. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const Epetra_Map &row_parallel_partitioning, @@ -972,77 +795,45 @@ namespace TrilinosWrappers */ //@{ /** - * Constructor using an IndexSet and - * an MPI communicator to describe - * the %parallel partitioning. The - * parameter @p n_max_entries_per_row - * sets the number of nonzero entries - * in each row that will be - * allocated. Note that this number - * does not need to be exact, and it - * is even allowed that the actual - * matrix structure has more nonzero - * entries than specified in the - * constructor. However it is still - * advantageous to provide good - * estimates here since this will - * considerably increase the - * performance of the matrix - * setup. However, there is no effect - * in the performance of - * matrix-vector products, since - * Trilinos reorganizes the matrix - * memory prior to use (in the - * compress() step). + * Constructor using an IndexSet and an MPI communicator to describe the + * %parallel partitioning. The parameter @p n_max_entries_per_row sets the + * number of nonzero entries in each row that will be allocated. Note that + * this number does not need to be exact, and it is even allowed that the + * actual matrix structure has more nonzero entries than specified in the + * constructor. However it is still advantageous to provide good estimates + * here since this will considerably increase the performance of the + * matrix setup. However, there is no effect in the performance of + * matrix-vector products, since Trilinos reorganizes the matrix memory + * prior to use (in the compress() step). */ SparseMatrix (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD, const unsigned int n_max_entries_per_row = 0); /** - * Same as before, but now set the - * number of nonzeros in each matrix - * row separately. Since we know the - * number of elements in the matrix - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes the - * creation process including the - * insertion of nonzero elements by - * the respective - * SparseMatrix::reinit call - * considerably faster. + * Same as before, but now set the number of nonzeros in each matrix row + * separately. Since we know the number of elements in the matrix exactly + * in this case, we can already allocate the right amount of memory, which + * makes the creation process including the insertion of nonzero elements + * by the respective SparseMatrix::reinit call considerably faster. */ SparseMatrix (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator, const std::vector &n_entries_per_row); /** - * This constructor is similar to the - * one above, but it now takes two - * different IndexSet partitions for - * row and columns. This interface is - * meant to be used for generating - * rectangular matrices, where the - * first index set describes the - * %parallel partitioning of the - * degrees of freedom associated with - * the matrix rows and the second one - * the partitioning of the matrix - * columns. The second index set - * specifies the partitioning of the - * vectors this matrix is to be - * multiplied with, not the - * distribution of the elements that - * actually appear in the matrix. - * - * The parameter @p - * n_max_entries_per_row defines how - * much memory will be allocated for - * each row. This number does not - * need to be accurate, as the - * structure is reorganized in the - * compress() call. + * This constructor is similar to the one above, but it now takes two + * different IndexSet partitions for row and columns. This interface is + * meant to be used for generating rectangular matrices, where the first + * index set describes the %parallel partitioning of the degrees of + * freedom associated with the matrix rows and the second one the + * partitioning of the matrix columns. The second index set specifies the + * partitioning of the vectors this matrix is to be multiplied with, not + * the distribution of the elements that actually appear in the matrix. + * + * The parameter @p n_max_entries_per_row defines how much memory will be + * allocated for each row. This number does not need to be accurate, as + * the structure is reorganized in the compress() call. */ SparseMatrix (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, @@ -1050,33 +841,18 @@ namespace TrilinosWrappers const size_type n_max_entries_per_row = 0); /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map specifies the %parallel - * distribution of degrees of freedom - * associated with matrix rows and - * the second one specifies the - * %parallel distribution the dofs - * associated with columns in the - * matrix. The second map also - * provides information for the - * internal arrangement in matrix - * vector products (i.e., the - * distribution of vector this matrix - * is to be multiplied with), but is - * not used for the distribution of - * the columns – rather, all - * column elements of a row are - * stored on the same processor in - * any case. The vector - * n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. + * This constructor is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of degrees of freedom associated with matrix + * rows and the second one specifies the %parallel distribution the dofs + * associated with columns in the matrix. The second map also provides + * information for the internal arrangement in matrix vector products + * (i.e., the distribution of vector this matrix is to be multiplied + * with), but is not used for the distribution of the columns – + * rather, all column elements of a row are stored on the same processor + * in any case. The vector n_entries_per_row specifies the number + * of entries in each row of the newly generated matrix. */ SparseMatrix (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, @@ -1084,43 +860,24 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * This function is initializes the - * Trilinos Epetra matrix according - * to the specified sparsity_pattern, - * and also reassigns the matrix rows - * to different processes according - * to a user-supplied index set and - * %parallel communicator. In - * programs following the style of - * the tutorial programs, this - * function (and the respective call - * for a rectangular matrix) are the - * natural way to initialize the - * matrix size, its distribution - * among the MPI processes (if run in - * %parallel) as well as the locatoin - * of non-zero elements. Trilinos - * stores the sparsity pattern - * internally, so it won't be needed - * any more after this call, in - * contrast to the deal.II own - * object. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. If - * the flag is not set, each - * processor just sets the elements - * in the sparsity pattern that + * This function is initializes the Trilinos Epetra matrix according to + * the specified sparsity_pattern, and also reassigns the matrix rows to + * different processes according to a user-supplied index set and + * %parallel communicator. In programs following the style of the tutorial + * programs, this function (and the respective call for a rectangular + * matrix) are the natural way to initialize the matrix size, its + * distribution among the MPI processes (if run in %parallel) as well as + * the locatoin of non-zero elements. Trilinos stores the sparsity pattern + * internally, so it won't be needed any more after this call, in contrast + * to the deal.II own object. The optional argument @p exchange_data can + * be used for reinitialization with a sparsity pattern that is not fully + * constructed. This feature is only implemented for input sparsity + * patterns of type CompressedSimpleSparsityPattern. If the flag is not + * set, each processor just sets the elements in the sparsity pattern that * belong to its rows. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const IndexSet ¶llel_partitioning, @@ -1129,25 +886,16 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * This function is similar to the - * other initialization function - * above, but now also reassigns the - * matrix rows and columns according - * to two user-supplied index sets. - * To be used for rectangular - * matrices. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type + * This function is similar to the other initialization function above, + * but now also reassigns the matrix rows and columns according to two + * user-supplied index sets. To be used for rectangular matrices. The + * optional argument @p exchange_data can be used for reinitialization + * with a sparsity pattern that is not fully constructed. This feature is + * only implemented for input sparsity patterns of type * CompressedSimpleSparsityPattern. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const IndexSet &row_parallel_partitioning, @@ -1157,32 +905,20 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). In - * contrast to the other reinit - * function with deal.II sparse - * matrix argument, this function - * takes a %parallel partitioning - * specified by the user instead of - * internally generating it. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix + * This function initializes the Trilinos matrix using the deal.II sparse + * matrix and the entries stored therein. It uses a threshold to copy only + * elements with modulus larger than the threshold (so zeros in the + * deal.II matrix can be filtered away). In contrast to the other reinit + * function with deal.II sparse matrix argument, this function takes a + * %parallel partitioning specified by the user instead of internally + * generating it. + * + * The optional parameter copy_values decides whether only the + * sparsity structure of the input matrix should be used or the matrix * entries should be copied, too. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const IndexSet ¶llel_partitioning, @@ -1193,25 +929,17 @@ namespace TrilinosWrappers const ::dealii::SparsityPattern *use_this_sparsity=0); /** - * This function is similar to the - * other initialization function with - * deal.II sparse matrix input above, - * but now takes index sets for both - * the rows and the columns of the - * matrix. Chosen for rectangular + * This function is similar to the other initialization function with + * deal.II sparse matrix input above, but now takes index sets for both + * the rows and the columns of the matrix. Chosen for rectangular * matrices. * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix + * The optional parameter copy_values decides whether only the + * sparsity structure of the input matrix should be used or the matrix * entries should be copied, too. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ template void reinit (const IndexSet &row_parallel_partitioning, @@ -1228,89 +956,64 @@ namespace TrilinosWrappers //@{ /** - * Return the number of rows in - * this matrix. + * Return the number of rows in this matrix. */ size_type m () const; /** - * Return the number of columns - * in this matrix. + * Return the number of columns in this matrix. */ size_type n () const; /** - * Return the local dimension - * of the matrix, i.e. the - * number of rows stored on the - * present MPI process. For - * sequential matrices, this - * number is the same as m(), - * but for %parallel matrices it - * may be smaller. + * Return the local dimension of the matrix, i.e. the number of rows + * stored on the present MPI process. For sequential matrices, this number + * is the same as m(), but for %parallel matrices it may be smaller. * - * To figure out which elements - * exactly are stored locally, - * use local_range(). + * To figure out which elements exactly are stored locally, use + * local_range(). */ unsigned int local_size () const; /** - * Return a pair of indices - * indicating which rows of - * this matrix are stored - * locally. The first number is - * the index of the first row - * stored, the second the index - * of the one past the last one - * that is stored locally. If - * this is a sequential matrix, - * then the result will be the - * pair (0,m()), otherwise it - * will be a pair (i,i+n), - * where + * Return a pair of indices indicating which rows of this matrix are + * stored locally. The first number is the index of the first row stored, + * the second the index of the one past the last one that is stored + * locally. If this is a sequential matrix, then the result will be the + * pair (0,m()), otherwise it will be a pair (i,i+n), where * n=local_size(). */ std::pair local_range () const; /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). + * Return whether @p index is in the local range or not, see also + * local_range(). */ bool in_local_range (const size_type index) const; /** - * Return the number of nonzero - * elements of this matrix. + * Return the number of nonzero elements of this matrix. */ size_type n_nonzero_elements () const; /** - * Number of entries in a - * specific row. + * Number of entries in a specific row. */ unsigned int row_length (const size_type row) const; /** - * Returns the state of the matrix, - * i.e., whether compress() needs to - * be called after an operation - * requiring data exchange. A call to - * compress() is also needed when the - * method set() has been called (even - * when working in serial). + * Returns the state of the matrix, i.e., whether compress() needs to be + * called after an operation requiring data exchange. A call to compress() + * is also needed when the method set() has been called (even when working + * in serial). */ bool is_compressed () const; /** - * Determine an estimate for the memory - * consumption (in bytes) of this - * object. Note that only the memory - * reserved on the current processor is - * returned in case this is called in - * an MPI-based program. + * Determine an estimate for the memory consumption (in bytes) of this + * object. Note that only the memory reserved on the current processor is + * returned in case this is called in an MPI-based program. */ size_type memory_consumption () const; @@ -1321,75 +1024,48 @@ namespace TrilinosWrappers //@{ /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keeps the sparsity pattern + * This operator assigns a scalar to a matrix. Since this does usually not + * make much sense (should we set all matrix entries to this value? Only + * the nonzero entries of the sparsity pattern?), this operation is only + * allowed if the actual value to be assigned is zero. This operator only + * exists to allow for the obvious notation matrix=0, which sets + * all elements of the matrix to zero, but keeps the sparsity pattern * previously used. */ SparseMatrix & operator = (const double d); /** - * Release all memory and return to a - * state just like after having - * called the default constructor. + * Release all memory and return to a state just like after having called + * the default constructor. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ void clear (); /** * This command does two things: *
      - *
    • If the matrix was initialized - * without a sparsity pattern, - * elements have been added manually - * using the set() command. When this - * process is completed, a call to - * compress() reorganizes the - * internal data structures (aparsity - * pattern) so that a fast access to - * data is possible in matrix-vector - * products. - *
    • If the matrix structure has - * already been fixed (either by - * initialization with a sparsity - * pattern or by calling compress() - * during the setup phase), this - * command does the %parallel - * exchange of data. This is - * necessary when we perform assembly - * on more than one (MPI) process, - * because then some non-local row - * data will accumulate on nodes that - * belong to the current's processor - * element, but are actually held by - * another. This command is usually - * called after all elements have - * been traversed. + *
    • If the matrix was initialized without a sparsity pattern, elements + * have been added manually using the set() command. When this process is + * completed, a call to compress() reorganizes the internal data + * structures (aparsity pattern) so that a fast access to data is possible + * in matrix-vector products. + *
    • If the matrix structure has already been fixed (either by + * initialization with a sparsity pattern or by calling compress() during + * the setup phase), this command does the %parallel exchange of + * data. This is necessary when we perform assembly on more than one (MPI) + * process, because then some non-local row data will accumulate on nodes + * that belong to the current's processor element, but are actually held + * by another. This command is usually called after all elements have been + * traversed. *
    * - * In both cases, this function - * compresses the data structures and - * allows the resulting matrix to be - * used in all other operations like - * matrix-vector products. This is a - * collective operation, i.e., it - * needs to be run on all processors - * when used in %parallel. + * In both cases, this function compresses the data structures and allows + * the resulting matrix to be used in all other operations like + * matrix-vector products. This is a collective operation, i.e., it needs + * to be run on all processors when used in %parallel. * * See @ref GlossCompress "Compressing distributed objects" * for more information. @@ -1402,69 +1078,42 @@ namespace TrilinosWrappers void compress () DEAL_II_DEPRECATED; /** - * Set the element (i,j) - * to @p value. - * - * This function is able to insert new - * elements into the matrix as long as - * compress() has not been called, so - * the sparsity pattern will be - * extended. When compress() is called - * for the first time, then this is no - * longer possible and an insertion of - * elements at positions which have not - * been initialized will throw an - * exception. Note that in case - * elements need to be inserted, it is - * mandatory that elements are inserted - * only once. Otherwise, the elements - * will actually be added in the end - * (since it is not possible to - * efficiently find values to the same - * entry before compress() has been - * called). In the case that an element - * is set more than once, initialize - * the matrix with a sparsity pattern - * first. + * Set the element (i,j) to @p value. + * + * This function is able to insert new elements into the matrix as long as + * compress() has not been called, so the sparsity pattern will be + * extended. When compress() is called for the first time, then this is no + * longer possible and an insertion of elements at positions which have + * not been initialized will throw an exception. Note that in case + * elements need to be inserted, it is mandatory that elements are + * inserted only once. Otherwise, the elements will actually be added in + * the end (since it is not possible to efficiently find values to the + * same entry before compress() has been called). In the case that an + * element is set more than once, initialize the matrix with a sparsity + * pattern first. */ void set (const size_type i, const size_type j, const TrilinosScalar value); /** - * Set all elements given in a - * FullMatrix into the sparse - * matrix locations given by - * indices. In other words, - * this function writes the elements - * in full_matrix into the - * calling matrix, using the - * local-to-global indexing specified - * by indices for both the - * rows and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual + * Set all elements given in a FullMatrix into the sparse matrix + * locations given by indices. In other words, this function + * writes the elements in full_matrix into the calling matrix, + * using the local-to-global indexing specified by indices for + * both the rows and the columns of the matrix. This function assumes a + * quadratic sparse matrix and a quadratic full_matrix, the usual * situation in FE calculations. * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. + * This function is able to insert new elements into the matrix as long as + * compress() has not been called, so the sparsity pattern will be + * extended. When compress() is called for the first time, then this is no + * longer possible and an insertion of elements at positions which have + * not been initialized will throw an exception. * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be inserted anyway or they should be + * filtered away. The default value is false, i.e., even zero * values are inserted/replaced. */ void set (const std::vector &indices, @@ -1472,11 +1121,9 @@ namespace TrilinosWrappers const bool elide_zero_values = false); /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. + * Same function as before, but now including the possibility to use + * rectangular full_matrices and different local-to-global indexing on + * rows and columns, respectively. */ void set (const std::vector &row_indices, const std::vector &col_indices, @@ -1484,30 +1131,18 @@ namespace TrilinosWrappers const bool elide_zero_values = false); /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. + * Set several elements in the specified row of the matrix with column + * indices as given by col_indices to the respective value. * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero + * This function is able to insert new elements into the matrix as long as + * compress() has not been called, so the sparsity pattern will be + * extended. When compress() is called for the first time, then this is no + * longer possible and an insertion of elements at positions which have + * not been initialized will throw an exception. + * + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be inserted anyway or they should be + * filtered away. The default value is false, i.e., even zero * values are inserted/replaced. */ void set (const size_type row, @@ -1516,30 +1151,18 @@ namespace TrilinosWrappers const bool elide_zero_values = false); /** - * Set several elements to values - * given by values in a - * given row in columns given by - * col_indices into the sparse - * matrix. + * Set several elements to values given by values in a given row + * in columns given by col_indices into the sparse matrix. * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. + * This function is able to insert new elements into the matrix as long as + * compress() has not been called, so the sparsity pattern will be + * extended. When compress() is called for the first time, then this is no + * longer possible and an insertion of elements at positions which have + * not been initialized will throw an exception. * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be inserted anyway or they should be + * filtered away. The default value is false, i.e., even zero * values are inserted/replaced. */ void set (const size_type row, @@ -1549,70 +1172,44 @@ namespace TrilinosWrappers const bool elide_zero_values = false); /** - * Add @p value to the element - * (i,j). + * Add @p value to the element (i,j). * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. Moreover, if - * value is not a finite - * number an exception is thrown. + * Just as the respective call in deal.II SparseMatrix class (but + * in contrast to the situation for PETSc based matrices), this function + * throws an exception if an entry does not exist in the sparsity + * pattern. Moreover, if value is not a finite number an + * exception is thrown. */ void add (const size_type i, const size_type j, const TrilinosScalar value); /** - * Add all elements given in a - * FullMatrix into sparse - * matrix locations given by - * indices. In other words, - * this function adds the elements in - * full_matrix to the - * respective entries in calling - * matrix, using the local-to-global - * indexing specified by - * indices for both the rows - * and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. + * Add all elements given in a FullMatrix into sparse matrix + * locations given by indices. In other words, this function adds + * the elements in full_matrix to the respective entries in + * calling matrix, using the local-to-global indexing specified by + * indices for both the rows and the columns of the matrix. This + * function assumes a quadratic sparse matrix and a quadratic full_matrix, + * the usual situation in FE calculations. + * + * Just as the respective call in deal.II SparseMatrix class (but + * in contrast to the situation for PETSc based matrices), this function + * throws an exception if an entry does not exist in the sparsity pattern. * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be added anyway or these should be + * filtered away and only non-zero data is added. The default value is + * true, i.e., zero values won't be added into the matrix. */ void add (const std::vector &indices, const FullMatrix &full_matrix, const bool elide_zero_values = true); /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. + * Same function as before, but now including the possibility to use + * rectangular full_matrices and different local-to-global indexing on + * rows and columns, respectively. */ void add (const std::vector &row_indices, const std::vector &col_indices, @@ -1620,30 +1217,17 @@ namespace TrilinosWrappers const bool elide_zero_values = true); /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. + * Set several elements in the specified row of the matrix with column + * indices as given by col_indices to the respective value. + * + * Just as the respective call in deal.II SparseMatrix class (but + * in contrast to the situation for PETSc based matrices), this function + * throws an exception if an entry does not exist in the sparsity pattern. + * + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be added anyway or these should be + * filtered away and only non-zero data is added. The default value is + * true, i.e., zero values won't be added into the matrix. */ void add (const size_type row, const std::vector &col_indices, @@ -1651,29 +1235,17 @@ namespace TrilinosWrappers const bool elide_zero_values = true); /** - * Add an array of values given by - * values in the given - * global matrix row at columns - * specified by col_indices in the - * sparse matrix. - * - * Just as the respective call in - * deal.II SparseMatrix class - * (but in contrast to the situation - * for PETSc based matrices), this - * function throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. + * Add an array of values given by values in the given global + * matrix row at columns specified by col_indices in the sparse matrix. + * + * Just as the respective call in deal.II SparseMatrix class (but + * in contrast to the situation for PETSc based matrices), this function + * throws an exception if an entry does not exist in the sparsity pattern. + * + * The optional parameter elide_zero_values can be used to + * specify whether zero values should be added anyway or these should be + * filtered away and only non-zero data is added. The default value is + * true, i.e., zero values won't be added into the matrix. */ void add (const size_type row, const size_type n_cols, @@ -1683,14 +1255,12 @@ namespace TrilinosWrappers const bool col_indices_are_sorted = false); /** - * Multiply the entire matrix - * by a fixed factor. + * Multiply the entire matrix by a fixed factor. */ SparseMatrix &operator *= (const TrilinosScalar factor); /** - * Divide the entire matrix by - * a fixed factor. + * Divide the entire matrix by a fixed factor. */ SparseMatrix &operator /= (const TrilinosScalar factor); @@ -1701,84 +1271,48 @@ namespace TrilinosWrappers void copy_from (const SparseMatrix &source); /** - * Add matrix scaled by - * factor to this matrix, - * i.e. the matrix - * factor*matrix is added to - * this. If the sparsity - * pattern of the calling matrix does - * not contain all the elements in - * the sparsity pattern of the input - * matrix, this function will throw - * an exception. + * Add matrix scaled by factor to this matrix, i.e. the + * matrix factor*matrix is added to this. If the + * sparsity pattern of the calling matrix does not contain all the + * elements in the sparsity pattern of the input matrix, this function + * will throw an exception. */ void add (const TrilinosScalar factor, const SparseMatrix &matrix); /** - * Remove all elements from - * this row by setting - * them to zero. The function - * does not modify the number - * of allocated nonzero - * entries, it only sets some - * entries to zero. It may drop - * them from the sparsity - * pattern, though (but retains - * the allocated memory in case - * new entries are again added - * later). Note that this is a - * global operation, so this - * needs to be done on all MPI - * processes. - * - * This operation is used in - * eliminating constraints - * (e.g. due to hanging nodes) - * and makes sure that we can - * write this modification to - * the matrix without having to - * read entries (such as the - * locations of non-zero - * elements) from it — - * without this operation, - * removing constraints on - * %parallel matrices is a - * rather complicated - * procedure. - * - * The second parameter can be - * used to set the diagonal - * entry of this row to a value - * different from zero. The - * default is to set it to - * zero. + * Remove all elements from this row by setting them to zero. The + * function does not modify the number of allocated nonzero entries, it + * only sets some entries to zero. It may drop them from the sparsity + * pattern, though (but retains the allocated memory in case new entries + * are again added later). Note that this is a global operation, so this + * needs to be done on all MPI processes. + * + * This operation is used in eliminating constraints (e.g. due to hanging + * nodes) and makes sure that we can write this modification to the matrix + * without having to read entries (such as the locations of non-zero + * elements) from it — without this operation, removing constraints + * on %parallel matrices is a rather complicated procedure. + * + * The second parameter can be used to set the diagonal entry of this row + * to a value different from zero. The default is to set it to zero. */ void clear_row (const size_type row, const TrilinosScalar new_diag_value = 0); /** - * Same as clear_row(), except - * that it works on a number of - * rows at once. + * Same as clear_row(), except that it works on a number of rows at once. * - * The second parameter can be - * used to set the diagonal - * entries of all cleared rows - * to something different from - * zero. Note that all of these - * diagonal entries get the - * same value -- if you want - * different values for the - * diagonal entries, you have - * to set them by hand. + * The second parameter can be used to set the diagonal entries of all + * cleared rows to something different from zero. Note that all of these + * diagonal entries get the same value -- if you want different values for + * the diagonal entries, you have to set them by hand. */ void clear_rows (const std::vector &rows, const TrilinosScalar new_diag_value = 0); /** - * Make an in-place transpose - * of a matrix. + * Make an in-place transpose of a matrix. */ void transpose (); @@ -1789,77 +1323,40 @@ namespace TrilinosWrappers //@{ /** - * Return the value of the - * entry (i,j). This - * may be an expensive - * operation and you should - * always take care where to - * call this function. As in - * the deal.II sparse matrix - * class, we throw an exception - * if the respective entry - * doesn't exist in the - * sparsity pattern of this - * class, which is requested - * from Trilinos. Moreover, an - * exception will be thrown - * when the requested element - * is not saved on the calling - * process. + * Return the value of the entry (i,j). This may be an expensive + * operation and you should always take care where to call this + * function. As in the deal.II sparse matrix class, we throw an exception + * if the respective entry doesn't exist in the sparsity pattern of this + * class, which is requested from Trilinos. Moreover, an exception will be + * thrown when the requested element is not saved on the calling process. */ TrilinosScalar operator () (const size_type i, const size_type j) const; /** - * Return the value of the - * matrix entry - * (i,j). If this entry - * does not exist in the - * sparsity pattern, then zero - * is returned. While this may - * be convenient in some cases, - * note that it is simple to - * write algorithms that are - * slow compared to an optimal - * solution, since the sparsity - * of the matrix is not used. - * On the other hand, if you - * want to be sure the entry - * exists, you should use - * operator() instead. - * - * The lack of error checking - * in this function can also - * yield surprising results if - * you have a parallel - * matrix. In that case, just - * because you get a zero - * result from this function - * does not mean that either - * the entry does not exist in - * the sparsity pattern or that - * it does but has a value of - * zero. Rather, it could also - * be that it simply isn't - * stored on the current - * processor; in that case, it - * may be stored on a different - * processor, and possibly so - * with a nonzero value. + * Return the value of the matrix entry (i,j). If this entry does + * not exist in the sparsity pattern, then zero is returned. While this + * may be convenient in some cases, note that it is simple to write + * algorithms that are slow compared to an optimal solution, since the + * sparsity of the matrix is not used. On the other hand, if you want to + * be sure the entry exists, you should use operator() instead. + * + * The lack of error checking in this function can also yield surprising + * results if you have a parallel matrix. In that case, just because you + * get a zero result from this function does not mean that either the + * entry does not exist in the sparsity pattern or that it does but has a + * value of zero. Rather, it could also be that it simply isn't stored on + * the current processor; in that case, it may be stored on a different + * processor, and possibly so with a nonzero value. */ TrilinosScalar el (const size_type i, const size_type j) const; /** - * Return the main diagonal - * element in the ith - * row. This function throws an - * error if the matrix is not - * quadratic and it also throws - * an error if (i,i) is not - * element of the local matrix. - * See also the comment in - * trilinos_sparse_matrix.cc. + * Return the main diagonal element in the ith row. This function + * throws an error if the matrix is not quadratic and it also throws an + * error if (i,i) is not element of the local matrix. See also the + * comment in trilinos_sparse_matrix.cc. */ TrilinosScalar diag_element (const size_type i) const; @@ -1975,155 +1472,86 @@ namespace TrilinosWrappers const VectorType &src) const; /** - * Return the square of the norm - * of the vector $v$ with respect - * to the norm induced by this - * matrix, i.e., - * $\left(v,Mv\right)$. This is - * useful, e.g. in the finite - * element context, where the - * $L_2$ norm of a function - * equals the matrix norm with - * respect to the mass matrix of - * the vector representing the - * nodal values of the finite - * element function. - * - * Obviously, the matrix needs to - * be quadratic for this - * operation. - * - * The implementation of this - * function is not as efficient - * as the one in the @p - * SparseMatrix class used in - * deal.II (i.e. the original - * one, not the Trilinos wrapper - * class) since Trilinos doesn't - * support this operation and - * needs a temporary vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. + * Return the square of the norm of the vector $v$ with respect to the + * norm induced by this matrix, i.e., $\left(v,Mv\right)$. This is useful, + * e.g. in the finite element context, where the $L_2$ norm of a function + * equals the matrix norm with respect to the mass matrix of the vector + * representing the nodal values of the finite element function. + * + * Obviously, the matrix needs to be quadratic for this operation. + * + * The implementation of this function is not as efficient as the one in + * the @p SparseMatrix class used in deal.II (i.e. the original one, not + * the Trilinos wrapper class) since Trilinos doesn't support this + * operation and needs a temporary vector. + * + * Note that both vectors have to be distributed vectors generated using + * the same Map as was used for the matrix in case you work on a + * distributed memory architecture, using the interface in the + * TrilinosWrappers::VectorBase class (or one of the two derived classes + * Vector and MPI::Vector). + * + * In case of a localized Vector, this function will only work when + * running on one processor, since the matrix object is inherently + * distributed. Otherwise, and exception will be thrown. */ TrilinosScalar matrix_norm_square (const VectorBase &v) const; /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - * - * The implementation of this - * function is not as efficient - * as the one in the @p - * SparseMatrix class used in - * deal.II (i.e. the original - * one, not the Trilinos - * wrapper class) since - * Trilinos doesn't support - * this operation and needs a - * temporary vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. + * Compute the matrix scalar product $\left(u,Mv\right)$. + * + * The implementation of this function is not as efficient as the one in + * the @p SparseMatrix class used in deal.II (i.e. the original one, not + * the Trilinos wrapper class) since Trilinos doesn't support this + * operation and needs a temporary vector. + * + * Note that both vectors have to be distributed vectors generated using + * the same Map as was used for the matrix in case you work on a + * distributed memory architecture, using the interface in the + * TrilinosWrappers::VectorBase class (or one of the two derived classes + * Vector and MPI::Vector). + * + * In case of a localized Vector, this function will only work when + * running on one processor, since the matrix object is inherently + * distributed. Otherwise, and exception will be thrown. */ TrilinosScalar matrix_scalar_product (const VectorBase &u, const VectorBase &v) const; /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and - * destination dst must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. + * Compute the residual of an equation Mx=b, where the residual is + * defined to be r=b-Mx. Write the residual into @p dst. The + * l2 norm of the residual vector is returned. + * + * Source x and destination dst must not be the same vector. + * + * Note that both vectors have to be distributed vectors generated using + * the same Map as was used for the matrix in case you work on a + * distributed memory architecture, using the interface in the + * TrilinosWrappers::VectorBase class (or one of the two derived classes + * Vector and MPI::Vector). + * + * In case of a localized Vector, this function will only work when + * running on one processor, since the matrix object is inherently + * distributed. Otherwise, and exception will be thrown. */ TrilinosScalar residual (VectorBase &dst, const VectorBase &x, const VectorBase &b) const; /** - * Perform the matrix-matrix - * multiplication C = A * B, - * or, if an optional vector argument - * is given, C = A * diag(V) * - * B, where diag(V) - * defines a diagonal matrix with the - * vector entries. + * Perform the matrix-matrix multiplication C = A * B, or, if an + * optional vector argument is given, C = A * diag(V) * B, where + * diag(V) defines a diagonal matrix with the vector entries. * - * This function assumes that the - * calling matrix A and - * B have compatible - * sizes. The size of C will - * be set within this function. + * This function assumes that the calling matrix A and B + * have compatible sizes. The size of C will be set within this + * function. * - * The content as well as the sparsity - * pattern of the matrix C will be - * changed by this function, so make - * sure that the sparsity pattern is - * not used somewhere else in your - * program. This is an expensive - * operation, so think twice before you - * use this function. + * The content as well as the sparsity pattern of the matrix C will be + * changed by this function, so make sure that the sparsity pattern is not + * used somewhere else in your program. This is an expensive operation, so + * think twice before you use this function. */ void mmult (SparseMatrix &C, const SparseMatrix &B, @@ -2131,30 +1559,20 @@ namespace TrilinosWrappers /** - * Perform the matrix-matrix - * multiplication with the transpose of - * this, i.e., C = - * AT * B, or, if an - * optional vector argument is given, - * C = AT * diag(V) * - * B, where diag(V) - * defines a diagonal matrix with the - * vector entries. + * Perform the matrix-matrix multiplication with the transpose of + * this, i.e., C = AT * B, or, if an optional + * vector argument is given, C = AT * diag(V) * B, + * where diag(V) defines a diagonal matrix with the vector + * entries. * - * This function assumes that the - * calling matrix A and - * B have compatible - * sizes. The size of C will - * be set within this function. + * This function assumes that the calling matrix A and B + * have compatible sizes. The size of C will be set within this + * function. * - * The content as well as the sparsity - * pattern of the matrix C will be - * changed by this function, so make - * sure that the sparsity pattern is - * not used somewhere else in your - * program. This is an expensive - * operation, so think twice before you - * use this function. + * The content as well as the sparsity pattern of the matrix C will be + * changed by this function, so make sure that the sparsity pattern is not + * used somewhere else in your program. This is an expensive operation, so + * think twice before you use this function. */ void Tmmult (SparseMatrix &C, const SparseMatrix &B, @@ -2167,46 +1585,27 @@ namespace TrilinosWrappers //@{ /** - * Return the - * l1-norm of - * the matrix, that is - * $|M|_1= - * \max_{\mathrm{all\ columns\ } j} - * \sum_{\mathrm{all\ rows\ } i} - * |M_{ij}|$, (max. sum - * of columns). This is the - * natural matrix norm that is - * compatible to the l1-norm for - * vectors, i.e. $|Mv|_1 \leq - * |M|_1 |v|_1$. - * (cf. Haemmerlin-Hoffmann: - * Numerische Mathematik) + * Return the l1-norm of the matrix, that is $|M|_1= + * \max_{\mathrm{all\ columns\ } j} \sum_{\mathrm{all\ rows\ } i} + * |M_{ij}|$, (max. sum of columns). This is the natural matrix norm that + * is compatible to the l1-norm for vectors, i.e. $|Mv|_1 \leq |M|_1 + * |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische Mathematik) */ TrilinosScalar l1_norm () const; /** - * Return the linfty-norm of the - * matrix, that is - * $|M|_\infty=\max_{\mathrm{all\ - * rows\ } i}\sum_{\mathrm{all\ - * columns\ } j} |M_{ij}|$, - * (max. sum of rows). This is - * the natural matrix norm that - * is compatible to the - * linfty-norm of vectors, i.e. - * $|Mv|_\infty \leq |M|_\infty - * |v|_\infty$. - * (cf. Haemmerlin-Hoffmann: - * Numerische Mathematik) + * Return the linfty-norm of the matrix, that is + * $|M|_\infty=\max_{\mathrm{all\ rows\ } i}\sum_{\mathrm{all\ columns\ } + * j} |M_{ij}|$, (max. sum of rows). This is the natural matrix norm that + * is compatible to the linfty-norm of vectors, i.e. $|Mv|_\infty \leq + * |M|_\infty |v|_\infty$. (cf. Haemmerlin-Hoffmann: Numerische + * Mathematik) */ TrilinosScalar linfty_norm () const; /** - * Return the frobenius norm of - * the matrix, i.e. the square - * root of the sum of squares - * of all entries in the - * matrix. + * Return the frobenius norm of the matrix, i.e. the square root of the + * sum of squares of all entries in the matrix. */ TrilinosScalar frobenius_norm () const; @@ -2217,60 +1616,44 @@ namespace TrilinosWrappers //@{ /** - * Return a const reference to the - * underlying Trilinos - * Epetra_CrsMatrix data. + * Return a const reference to the underlying Trilinos Epetra_CrsMatrix + * data. */ const Epetra_CrsMatrix &trilinos_matrix () const; /** - * Return a const reference to the - * underlying Trilinos - * Epetra_CrsGraph data that stores - * the sparsity pattern of the - * matrix. + * Return a const reference to the underlying Trilinos Epetra_CrsGraph + * data that stores the sparsity pattern of the matrix. */ const Epetra_CrsGraph &trilinos_sparsity_pattern () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * domain space of this matrix, i.e., - * the partitioning of the vectors - * this matrix has to be multiplied - * with. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the domain space of this matrix, i.e., the + * partitioning of the vectors this matrix has to be multiplied with. */ const Epetra_Map &domain_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * range space of this matrix, i.e., - * the partitioning of the vectors - * that are result from matrix-vector + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the range space of this matrix, i.e., the + * partitioning of the vectors that are result from matrix-vector * products. */ const Epetra_Map &range_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * matrix rows. Equal to the - * partitioning of the range. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the matrix rows. Equal to the partitioning of + * the range. */ const Epetra_Map &row_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * matrix columns. This is in general - * not equal to the partitioner - * Epetra_Map for the domain because - * of overlap in the matrix. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the matrix columns. This is in general not + * equal to the partitioner Epetra_Map for the domain because of overlap + * in the matrix. */ const Epetra_Map &col_partitioner () const; //@} @@ -2280,8 +1663,7 @@ namespace TrilinosWrappers //@{ /** - * STL-like iterator with the - * first entry. + * STL-like iterator with the first entry. */ const_iterator begin () const; @@ -2291,40 +1673,27 @@ namespace TrilinosWrappers const_iterator end () const; /** - * STL-like iterator with the - * first entry of row @p r. + * STL-like iterator with the first entry of row @p r. * - * Note that if the given row - * is empty, i.e. does not - * contain any nonzero entries, - * then the iterator returned - * by this function equals - * end(r). Note also - * that the iterator may not be - * dereferencable in that case. + * Note that if the given row is empty, i.e. does not contain any nonzero + * entries, then the iterator returned by this function equals + * end(r). Note also that the iterator may not be dereferencable + * in that case. */ const_iterator begin (const size_type r) const; /** - * Final iterator of row - * r. It points to the - * first element past the end - * of line @p r, or past the - * end of the entire sparsity - * pattern. + * Final iterator of row r. It points to the first element past + * the end of line @p r, or past the end of the entire sparsity pattern. * - * Note that the end iterator - * is not necessarily - * dereferencable. This is in - * particular the case if it is - * the end iterator for the - * last row of a matrix. + * Note that the end iterator is not necessarily dereferencable. This is + * in particular the case if it is the end iterator for the last row of a + * matrix. */ const_iterator end (const size_type r) const; /** - * STL-like iterator with the - * first entry. + * STL-like iterator with the first entry. */ iterator begin (); @@ -2334,34 +1703,22 @@ namespace TrilinosWrappers iterator end (); /** - * STL-like iterator with the - * first entry of row @p r. + * STL-like iterator with the first entry of row @p r. * - * Note that if the given row - * is empty, i.e. does not - * contain any nonzero entries, - * then the iterator returned - * by this function equals - * end(r). Note also - * that the iterator may not be - * dereferencable in that case. + * Note that if the given row is empty, i.e. does not contain any nonzero + * entries, then the iterator returned by this function equals + * end(r). Note also that the iterator may not be dereferencable + * in that case. */ iterator begin (const size_type r); /** - * Final iterator of row - * r. It points to the - * first element past the end - * of line @p r, or past the - * end of the entire sparsity - * pattern. + * Final iterator of row r. It points to the first element past + * the end of line @p r, or past the end of the entire sparsity pattern. * - * Note that the end iterator - * is not necessarily - * dereferencable. This is in - * particular the case if it is - * the end iterator for the - * last row of a matrix. + * Note that the end iterator is not necessarily dereferencable. This is + * in particular the case if it is the end iterator for the last row of a + * matrix. */ iterator end (const size_type r); @@ -2372,27 +1729,18 @@ namespace TrilinosWrappers //@{ /** - * Abstract Trilinos object - * that helps view in ASCII - * other Trilinos - * objects. Currently this - * function is not - * implemented. TODO: Not + * Abstract Trilinos object that helps view in ASCII other Trilinos + * objects. Currently this function is not implemented. TODO: Not * implemented. */ void write_ascii (); /** - * Print the matrix to the given - * stream, using the format - * (line,col) value, i.e. one - * nonzero entry of the matrix per - * line. The optional flag outputs the - * sparsity pattern in Trilinos style, - * where the data is sorted according - * to the processor number when printed - * to the stream, as well as a summary - * of the matrix like the global size. + * Print the matrix to the given stream, using the format (line,col) + * value, i.e. one nonzero entry of the matrix per line. The optional + * flag outputs the sparsity pattern in Trilinos style, where the data is + * sorted according to the processor number when printed to the stream, as + * well as a summary of the matrix like the global size. */ void print (std::ostream &out, const bool write_extended_trilinos_info = false) const; @@ -2455,35 +1803,20 @@ namespace TrilinosWrappers protected: /** - * For some matrix storage - * formats, in particular for the - * PETSc distributed blockmatrices, - * set and add operations on - * individual elements can not be - * freely mixed. Rather, one has - * to synchronize operations when - * one wants to switch from - * setting elements to adding to - * elements. - * BlockMatrixBase automatically - * synchronizes the access by - * calling this helper function - * for each block. - * This function ensures that the - * matrix is in a state that - * allows adding elements; if it - * previously already was in this - * state, the function does - * nothing. + * For some matrix storage formats, in particular for the PETSc distributed + * blockmatrices, set and add operations on individual elements can not be + * freely mixed. Rather, one has to synchronize operations when one wants + * to switch from setting elements to adding to elements. BlockMatrixBase + * automatically synchronizes the access by calling this helper function + * for each block. This function ensures that the matrix is in a state + * that allows adding elements; if it previously already was in this state, + * the function does nothing. */ void prepare_add(); /** - * Same as prepare_add() but - * prepare the matrix for setting - * elements if the representation - * of elements in this class - * requires such an operation. + * Same as prepare_add() but prepare the matrix for setting elements if the + * representation of elements in this class requires such an operation. */ void prepare_set(); @@ -2492,60 +1825,46 @@ namespace TrilinosWrappers private: /** - * Pointer to the user-supplied - * Epetra Trilinos mapping of - * the matrix columns that - * assigns parts of the matrix - * to the individual processes. + * Pointer to the user-supplied Epetra Trilinos mapping of the matrix + * columns that assigns parts of the matrix to the individual processes. */ std_cxx1x::shared_ptr column_space_map; /** - * A sparse matrix object in - * Trilinos to be used for - * finite element based - * problems which allows for - * assembling into non-local - * elements. The actual type, - * a sparse matrix, is set in - * the constructor. + * A sparse matrix object in Trilinos to be used for finite element based + * problems which allows for assembling into non-local elements. The + * actual type, a sparse matrix, is set in the constructor. */ std_cxx1x::shared_ptr matrix; /** - * Trilinos doesn't allow to mix - * additions to matrix entries and - * overwriting them (to make - * synchronisation of %parallel - * computations simpler). The way we - * do it is to, for each access - * operation, store whether it is an - * insertion or an addition. If the - * previous one was of different - * type, then we first have to flush - * the Trilinos buffers; otherwise, - * we can simply go on. Luckily, - * Trilinos has an object for this - * which does already all the - * %parallel communications in such a - * case, so we simply use their - * model, which stores whether the - * last operation was an addition or - * an insertion. + * A sparse matrix object in Trilinos to be used for collecting the + * non-local elements if the matrix was constructed from a Trilinos + * sparsity pattern with the respective option. + */ + std_cxx1x::shared_ptr nonlocal_matrix; + + /** + * Trilinos doesn't allow to mix additions to matrix entries and + * overwriting them (to make synchronisation of %parallel computations + * simpler). The way we do it is to, for each access operation, store + * whether it is an insertion or an addition. If the previous one was of + * different type, then we first have to flush the Trilinos buffers; + * otherwise, we can simply go on. Luckily, Trilinos has an object for + * this which does already all the %parallel communications in such a + * case, so we simply use their model, which stores whether the last + * operation was an addition or an insertion. */ Epetra_CombineMode last_action; /** - * A boolean variable to hold - * information on whether the - * vector is compressed or not. + * A boolean variable to hold information on whether the vector is + * compressed or not. */ bool compressed; /** - * To allow calling protected - * prepare_add() and - * prepare_set(). + * To allow calling protected prepare_add() and prepare_set(). */ friend class BlockMatrixBase; }; @@ -2965,46 +2284,6 @@ namespace TrilinosWrappers - inline - void - SparseMatrix::compress (::dealii::VectorOperation::values operation) - { - - Epetra_CombineMode mode = last_action; - if (last_action == Zero) - { - if ((operation==::dealii::VectorOperation::add) || - (operation==::dealii::VectorOperation::unknown)) - mode = Add; - else if (operation==::dealii::VectorOperation::insert) - mode = Insert; - } - else - { - Assert( - ((last_action == Add) && (operation!=::dealii::VectorOperation::insert)) - || - ((last_action == Insert) && (operation!=::dealii::VectorOperation::add)), - ExcMessage("operation and argument to compress() do not match")); - } - - // flush buffers - int ierr; - ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(), - true, mode); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - - ierr = matrix->OptimizeStorage (); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - - last_action = Zero; - - compressed = true; - } - - - inline void SparseMatrix::compress () @@ -3023,19 +2302,17 @@ namespace TrilinosWrappers const int ierr = matrix->PutScalar(d); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + if (nonlocal_matrix.get() != 0) + nonlocal_matrix->PutScalar(d); return *this; } - // Inline the set() and add() - // functions, since they will be - // called frequently, and the - // compiler can optimize away - // some unnecessary loops when - // the sizes are given at - // compile time. + // Inline the set() and add() functions, since they will be called + // frequently, and the compiler can optimize away some unnecessary loops + // when the sizes are given at compile time. inline void SparseMatrix::set (const size_type i, @@ -3208,7 +2485,7 @@ namespace TrilinosWrappers // standard Insert/ReplaceGlobalValues function. Nevertheless, the way // we call it is the fastest one (any other will lead to repeated // allocation and deallocation of memory in order to call the function - // we already use, which is very unefficient if writing one element at + // we already use, which is very inefficient if writing one element at // a time). compressed = false; @@ -3222,6 +2499,19 @@ namespace TrilinosWrappers if (ierr > 0) ierr = 0; } + else if (nonlocal_matrix.get() != 0) + { + // this is the case when we have explicitly set the off-processor + // rows and want to create a separate matrix object for them (to + // retain thread-safety) + Assert (nonlocal_matrix->RowMap().LID(static_cast(row)) != -1, + ExcMessage("Attempted to write into off-processor matrix row " + "that has not be specified as being writable upon " + "initialization")); + ierr = nonlocal_matrix->ReplaceGlobalValues(row, n_columns, + col_value_ptr, + col_index_ptr); + } else ierr = matrix->ReplaceGlobalValues (1, (TrilinosWrappers::types::int_type *)&row, @@ -3413,6 +2703,20 @@ namespace TrilinosWrappers col_value_ptr, col_index_ptr); } + else if (nonlocal_matrix.get() != 0) + { + compressed = false; + // this is the case when we have explicitly set the off-processor rows + // and want to create a separate matrix object for them (to retain + // thread-safety) + Assert (nonlocal_matrix->RowMap().LID(static_cast(row)) != -1, + ExcMessage("Attempted to write into off-processor matrix row " + "that has not be specified as being writable upon " + "initialization")); + ierr = nonlocal_matrix->SumIntoGlobalValues(row, n_columns, + col_value_ptr, + col_index_ptr); + } else { // When we're at off-processor data, we have to stick with the diff --git a/deal.II/include/deal.II/lac/trilinos_sparsity_pattern.h b/deal.II/include/deal.II/lac/trilinos_sparsity_pattern.h index 3519bf8af1..01d6d16676 100644 --- a/deal.II/include/deal.II/lac/trilinos_sparsity_pattern.h +++ b/deal.II/include/deal.II/lac/trilinos_sparsity_pattern.h @@ -93,20 +93,17 @@ namespace TrilinosWrappers Accessor (const Accessor &a); /** - * Row number of the element - * represented by this object. + * Row number of the element represented by this object. */ size_type row() const; /** - * Index in row of the element - * represented by this object. + * Index in row of the element represented by this object. */ size_type index() const; /** - * Column number of the element - * represented by this object. + * Column number of the element represented by this object. */ size_type column() const; @@ -142,45 +139,28 @@ namespace TrilinosWrappers size_type a_index; /** - * Cache where we store the - * column indices of the - * present row. This is - * necessary, since Trilinos - * makes access to the elements - * of its matrices rather hard, - * and it is much more - * efficient to copy all column - * entries of a row once when - * we enter it than repeatedly - * asking Trilinos for - * individual ones. This also - * makes some sense since it is - * likely that we will access - * them sequentially anyway. + * Cache where we store the column indices of the present row. This is + * necessary, since Trilinos makes access to the elements of its + * matrices rather hard, and it is much more efficient to copy all + * column entries of a row once when we enter it than repeatedly asking + * Trilinos for individual ones. This also makes some sense since it is + * likely that we will access them sequentially anyway. * - * In order to make copying of - * iterators/accessor of - * acceptable performance, we - * keep a shared pointer to - * these entries so that more - * than one accessor can access - * this data if necessary. + * In order to make copying of iterators/accessor of acceptable + * performance, we keep a shared pointer to these entries so that more + * than one accessor can access this data if necessary. */ std_cxx1x::shared_ptr > colnum_cache; /** - * Discard the old row caches - * (they may still be used by - * other accessors) and - * generate new ones for the - * row pointed to presently by + * Discard the old row caches (they may still be used by other + * accessors) and generate new ones for the row pointed to presently by * this accessor. */ void visit_present_row (); /** - * Make enclosing class a - * friend. + * Make enclosing class a friend. */ friend class Iterator; }; @@ -199,10 +179,8 @@ namespace TrilinosWrappers typedef dealii::types::global_dof_index size_type; /** - * Constructor. Create an - * iterator into the matrix @p - * matrix for the given row and - * the index within it. + * Constructor. Create an iterator into the matrix @p matrix for the + * given row and the index within it. */ Iterator (const SparsityPattern *sparsity_pattern, const size_type row, @@ -234,9 +212,8 @@ namespace TrilinosWrappers const Accessor *operator-> () const; /** - * Comparison. True, if both - * iterators point to the same - * matrix position. + * Comparison. True, if both iterators point to the same matrix + * position. */ bool operator == (const Iterator &) const; @@ -246,11 +223,8 @@ namespace TrilinosWrappers bool operator != (const Iterator &) const; /** - * Comparison operator. Result - * is true if either the first - * row number is smaller or if - * the row numbers are equal - * and the first index is + * Comparison operator. Result is true if either the first row number is + * smaller or if the row numbers are equal and the first index is * smaller. */ bool operator < (const Iterator &) const; @@ -266,8 +240,7 @@ namespace TrilinosWrappers private: /** - * Store an object of the - * accessor class. + * Store an object of the accessor class. */ Accessor accessor; @@ -317,92 +290,61 @@ namespace TrilinosWrappers */ //@{ /** - * Default constructor. Generates an - * empty (zero-size) sparsity - * pattern. + * Default constructor. Generates an empty (zero-size) sparsity pattern. */ SparsityPattern (); /** - * Generate a sparsity pattern that is - * completely stored locally, having - * $m$ rows and $n$ columns. The - * resulting matrix will be completely + * Generate a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely * stored locally, too. * - * It is possible to specify the - * number of columns entries per row - * using the optional @p - * n_entries_per_row - * argument. However, this value does - * not need to be accurate or even - * given at all, since one does - * usually not have this kind of - * information before building the - * sparsity pattern (the usual case - * when the function - * DoFTools::make_sparsity_pattern() - * is called). The entries are - * allocated dynamically in a similar - * manner as for the deal.II - * CompressedSparsityPattern - * classes. However, a good estimate - * will reduce the setup time of the - * sparsity pattern. + * It is possible to specify the number of columns entries per row using + * the optional @p n_entries_per_row argument. However, this value does + * not need to be accurate or even given at all, since one does usually + * not have this kind of information before building the sparsity pattern + * (the usual case when the function DoFTools::make_sparsity_pattern() is + * called). The entries are allocated dynamically in a similar manner as + * for the deal.II CompressedSparsityPattern classes. However, a good + * estimate will reduce the setup time of the sparsity pattern. */ SparsityPattern (const size_type m, const size_type n, const size_type n_entries_per_row = 0); /** - * Generate a sparsity pattern that is - * completely stored locally, having - * $m$ rows and $n$ columns. The - * resulting matrix will be completely + * Generate a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely * stored locally, too. * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row (an information usually - * not available, though). + * The vector n_entries_per_row specifies the number of entries + * in each row (an information usually not available, though). */ SparsityPattern (const size_type m, const size_type n, const std::vector &n_entries_per_row); /** - * Copy constructor. Sets the calling - * sparsity pattern to be the same as + * Copy constructor. Sets the calling sparsity pattern to be the same as * the input sparsity pattern. */ SparsityPattern (const SparsityPattern &input_sparsity_pattern); /** - * Destructor. Made virtual so that - * one can use pointers to this - * class. + * Destructor. Made virtual so that one can use pointers to this class. */ virtual ~SparsityPattern (); /** - * Initialize a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ - * columns. The resulting matrix will - * be completely stored locally. + * Initialize a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally. * - * The number of columns entries per - * row is specified as the maximum - * number of entries argument. This - * does not need to be an accurate - * number since the entries are - * allocated dynamically in a similar - * manner as for the deal.II - * CompressedSparsityPattern classes, - * but a good estimate will reduce - * the setup time of the sparsity - * pattern. + * The number of columns entries per row is specified as the maximum + * number of entries argument. This does not need to be an accurate + * number since the entries are allocated dynamically in a similar manner + * as for the deal.II CompressedSparsityPattern classes, but a good + * estimate will reduce the setup time of the sparsity pattern. */ void reinit (const size_type m, @@ -410,16 +352,12 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * Initialize a sparsity pattern that - * is completely stored locally, - * having $m$ rows and $n$ columns. The - * resulting matrix will be - * completely stored locally. + * Initialize a sparsity pattern that is completely stored locally, having + * $m$ rows and $n$ columns. The resulting matrix will be completely + * stored locally. * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. + * The vector n_entries_per_row specifies the number of entries + * in each row. */ void reinit (const size_type m, @@ -427,65 +365,46 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * Copy function. Sets the calling - * sparsity pattern to be the same as - * the input sparsity pattern. + * Copy function. Sets the calling sparsity pattern to be the same as the + * input sparsity pattern. */ void copy_from (const SparsityPattern &input_sparsity_pattern); /** - * Copy function from one of the - * deal.II sparsity patterns. If used - * in parallel, this function uses an - * ad-hoc partitioning of the rows - * and columns. + * Copy function from one of the deal.II sparsity patterns. If used in + * parallel, this function uses an ad-hoc partitioning of the rows and + * columns. */ template void copy_from (const SparsityType &nontrilinos_sparsity_pattern); /** - * Copy operator. This operation is - * only allowed for empty objects, to - * avoid potentially very costly - * operations automatically - * synthesized by the compiler. Use - * copy_from() instead if you know - * that you really want to copy a - * sparsity pattern with non-trivial - * content. + * Copy operator. This operation is only allowed for empty objects, to + * avoid potentially very costly operations automatically synthesized by + * the compiler. Use copy_from() instead if you know that you really want + * to copy a sparsity pattern with non-trivial content. */ SparsityPattern &operator = (const SparsityPattern &input_sparsity_pattern); /** - * Release all memory and return to a - * state just like after having - * called the default constructor. + * Release all memory and return to a state just like after having called + * the default constructor. * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. + * This is a collective operation that needs to be called on all + * processors in order to avoid a dead lock. */ void clear (); /** - * In analogy to our own - * SparsityPattern class, this - * function compresses the sparsity - * pattern and allows the resulting - * pattern to be used for actually - * generating a (Trilinos-based) - * matrix. This function also - * exchanges non-local data that - * might have accumulated during the - * addition of new elements. This - * function must therefore be called - * once the structure is fixed. This - * is a collective operation, i.e., - * it needs to be run on all - * processors when used in parallel. + * In analogy to our own SparsityPattern class, this function compresses + * the sparsity pattern and allows the resulting pattern to be used for + * actually generating a (Trilinos-based) matrix. This function also + * exchanges non-local data that might have accumulated during the + * addition of new elements. This function must therefore be called once + * the structure is fixed. This is a collective operation, i.e., it needs + * to be run on all processors when used in parallel. */ void compress (); //@} @@ -495,79 +414,47 @@ namespace TrilinosWrappers //@{ /** - * Constructor for a square sparsity - * pattern using an Epetra_map for - * the description of the %parallel - * partitioning. Moreover, the number - * of nonzero entries in the rows of - * the sparsity pattern can be - * specified. Note that this number - * does not need to be exact, and it - * is allowed that the actual - * sparsity structure has more - * nonzero entries than specified in - * the constructor (the usual case - * when the function - * DoFTools::make_sparsity_pattern() - * is called). However it is still - * advantageous to provide good - * estimates here since a good value - * will avoid repeated allocation of - * memory, which considerably - * increases the performance when - * creating the sparsity pattern. + * Constructor for a square sparsity pattern using an Epetra_map for the + * description of the %parallel partitioning. Moreover, the number of + * nonzero entries in the rows of the sparsity pattern can be + * specified. Note that this number does not need to be exact, and it is + * allowed that the actual sparsity structure has more nonzero entries + * than specified in the constructor (the usual case when the function + * DoFTools::make_sparsity_pattern() is called). However it is still + * advantageous to provide good estimates here since a good value will + * avoid repeated allocation of memory, which considerably increases the + * performance when creating the sparsity pattern. */ SparsityPattern (const Epetra_Map ¶llel_partitioning, const size_type n_entries_per_row = 0); /** - * Same as before, but now use the - * exact number of nonzeros in each m - * row. Since we know the number of - * elements in the sparsity pattern - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes the - * creation process by the respective - * SparsityPattern::reinit call - * considerably faster. However, this - * is a rather unusual situation, - * since knowing the number of - * entries in each row is usually - * connected to knowing the indices - * of nonzero entries, which the - * sparsity pattern is designed to - * describe. + * Same as before, but now use the exact number of nonzeros in each m + * row. Since we know the number of elements in the sparsity pattern + * exactly in this case, we can already allocate the right amount of + * memory, which makes the creation process by the respective + * SparsityPattern::reinit call considerably faster. However, this is a + * rather unusual situation, since knowing the number of entries in each + * row is usually connected to knowing the indices of nonzero entries, + * which the sparsity pattern is designed to describe. */ SparsityPattern (const Epetra_Map ¶llel_partitioning, const std::vector &n_entries_per_row); /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant to - * be used for generating rectangular - * sparsity pattern, where one map - * describes the %parallel partitioning - * of the dofs associated with the - * sparsity pattern rows and the other - * one of the sparsity pattern - * columns. Note that there is no real - * parallelism along the columns - * – the processor that owns a - * certain row always owns all the - * column elements, no matter how far - * they might be spread out. The second - * Epetra_Map is only used to specify - * the number of columns and for - * specifying the correct domain space - * when performing matrix-vector - * products with vectors based on the - * same column map. + * This constructor is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular sparsity pattern, where one map + * describes the %parallel partitioning of the dofs associated with the + * sparsity pattern rows and the other one of the sparsity pattern + * columns. Note that there is no real parallelism along the columns + * – the processor that owns a certain row always owns all the + * column elements, no matter how far they might be spread out. The second + * Epetra_Map is only used to specify the number of columns and for + * specifying the correct domain space when performing matrix-vector + * products with vectors based on the same column map. * - * The number of columns entries per - * row is specified as the maximum + * The number of columns entries per row is specified as the maximum * number of entries argument. */ SparsityPattern (const Epetra_Map &row_parallel_partitioning, @@ -575,74 +462,47 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant to - * be used for generating rectangular - * matrices, where one map specifies - * the %parallel distribution of rows - * and the second one specifies the - * distribution of degrees of freedom - * associated with matrix columns. This - * second map is however not used for - * the distribution of the columns - * themselves – rather, all - * column elements of a row are stored - * on the same processor. The vector - * n_entries_per_row specifies - * the number of entries in each row of - * the newly generated matrix. + * This constructor is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of rows and the second one specifies the + * distribution of degrees of freedom associated with matrix columns. This + * second map is however not used for the distribution of the columns + * themselves – rather, all column elements of a row are stored on + * the same processor. The vector n_entries_per_row specifies the + * number of entries in each row of the newly generated matrix. */ SparsityPattern (const Epetra_Map &row_parallel_partitioning, const Epetra_Map &col_parallel_partitioning, const std::vector &n_entries_per_row); /** - * Reinitialization function for - * generating a square sparsity pattern - * using an Epetra_Map for the - * description of the %parallel - * partitioning and the number of - * nonzero entries in the rows of the - * sparsity pattern. Note that this - * number does not need to be exact, - * and it is even allowed that the - * actual sparsity structure has more - * nonzero entries than specified in - * the constructor. However it is still - * advantageous to provide good - * estimates here since this will - * considerably increase the - * performance when creating the - * sparsity pattern. + * Reinitialization function for generating a square sparsity pattern + * using an Epetra_Map for the description of the %parallel partitioning + * and the number of nonzero entries in the rows of the sparsity + * pattern. Note that this number does not need to be exact, and it is + * even allowed that the actual sparsity structure has more nonzero + * entries than specified in the constructor. However it is still + * advantageous to provide good estimates here since this will + * considerably increase the performance when creating the sparsity + * pattern. * - * This function does not create any - * entries by itself, but provides - * the correct data structures that - * can be used by the respective - * add() function. + * This function does not create any entries by itself, but provides the + * correct data structures that can be used by the respective add() + * function. */ void reinit (const Epetra_Map ¶llel_partitioning, const size_type n_entries_per_row = 0); /** - * Same as before, but now use the - * exact number of nonzeros in each m - * row. Since we know the number of - * elements in the sparsity pattern - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes process of - * adding entries to the sparsity - * pattern considerably - * faster. However, this is a rather - * unusual situation, since knowing - * the number of entries in each row - * is usually connected to knowing - * the indices of nonzero entries, - * which the sparsity pattern is + * Same as before, but now use the exact number of nonzeros in each m + * row. Since we know the number of elements in the sparsity pattern + * exactly in this case, we can already allocate the right amount of + * memory, which makes process of adding entries to the sparsity pattern + * considerably faster. However, this is a rather unusual situation, since + * knowing the number of entries in each row is usually connected to + * knowing the indices of nonzero entries, which the sparsity pattern is * designed to describe. */ void @@ -650,31 +510,19 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * This reinit function is similar to - * the one above, but it now takes - * two different Epetra maps for rows - * and columns. This interface is - * meant to be used for generating - * rectangular sparsity pattern, - * where one map describes the - * %parallel partitioning of the dofs - * associated with the sparsity - * pattern rows and the other one of - * the sparsity pattern columns. Note - * that there is no real parallelism - * along the columns – the - * processor that owns a certain row - * always owns all the column - * elements, no matter how far they - * might be spread out. The second - * Epetra_Map is only used to specify - * the number of columns and for - * internal arragements when doing - * matrix-vector products with - * vectors based on that column map. + * This reinit function is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular sparsity pattern, where one map + * describes the %parallel partitioning of the dofs associated with the + * sparsity pattern rows and the other one of the sparsity pattern + * columns. Note that there is no real parallelism along the columns + * – the processor that owns a certain row always owns all the + * column elements, no matter how far they might be spread out. The second + * Epetra_Map is only used to specify the number of columns and for + * internal arragements when doing matrix-vector products with vectors + * based on that column map. * - * The number of columns entries per - * row is specified by the argument + * The number of columns entries per row is specified by the argument * n_entries_per_row. */ void @@ -683,27 +531,15 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * This reinit function is similar to - * the one above, but it now takes - * two different Epetra maps for rows - * and columns. This interface is - * meant to be used for generating - * rectangular matrices, where one - * map specifies the %parallel - * distribution of rows and the - * second one specifies the - * distribution of degrees of freedom - * associated with matrix - * columns. This second map is - * however not used for the - * distribution of the columns - * themselves – rather, all - * column elements of a row are - * stored on the same processor. The - * vector n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. + * This reinit function is similar to the one above, but it now takes two + * different Epetra maps for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of rows and the second one specifies the + * distribution of degrees of freedom associated with matrix columns. This + * second map is however not used for the distribution of the columns + * themselves – rather, all column elements of a row are stored on + * the same processor. The vector n_entries_per_row specifies the + * number of entries in each row of the newly generated matrix. */ void reinit (const Epetra_Map &row_parallel_partitioning, @@ -711,19 +547,12 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * Reinit function. Takes one of the - * deal.II sparsity patterns and a - * %parallel partitioning of the rows - * and columns for initializing the - * current Trilinos sparsity - * pattern. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. + * Reinit function. Takes one of the deal.II sparsity patterns and a + * %parallel partitioning of the rows and columns for initializing the + * current Trilinos sparsity pattern. The optional argument @p + * exchange_data can be used for reinitialization with a sparsity pattern + * that is not fully constructed. This feature is only implemented for + * input sparsity patterns of type CompressedSimpleSparsityPattern. */ template void @@ -733,19 +562,12 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * Reinit function. Takes one of the - * deal.II sparsity patterns and a - * %parallel partitioning of the rows - * and columns for initializing the - * current Trilinos sparsity - * pattern. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. + * Reinit function. Takes one of the deal.II sparsity patterns and a + * %parallel partitioning of the rows and columns for initializing the + * current Trilinos sparsity pattern. The optional argument @p + * exchange_data can be used for reinitialization with a sparsity pattern + * that is not fully constructed. This feature is only implemented for + * input sparsity patterns of type CompressedSimpleSparsityPattern. */ template void @@ -759,74 +581,47 @@ namespace TrilinosWrappers //@{ /** - * Constructor for a square sparsity - * pattern using an IndexSet and an - * MPI communicator for the - * description of the %parallel - * partitioning. Moreover, the number - * of nonzero entries in the rows of - * the sparsity pattern can be - * specified. Note that this number - * does not need to be exact, and it - * is even allowed that the actual - * sparsity structure has more - * nonzero entries than specified in - * the constructor. However it is - * still advantageous to provide good - * estimates here since a good value - * will avoid repeated allocation of - * memory, which considerably - * increases the performance when - * creating the sparsity pattern. + * Constructor for a square sparsity pattern using an IndexSet and an MPI + * communicator for the description of the %parallel + * partitioning. Moreover, the number of nonzero entries in the rows of + * the sparsity pattern can be specified. Note that this number does not + * need to be exact, and it is even allowed that the actual sparsity + * structure has more nonzero entries than specified in the + * constructor. However it is still advantageous to provide good estimates + * here since a good value will avoid repeated allocation of memory, which + * considerably increases the performance when creating the sparsity + * pattern. */ SparsityPattern (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** - * Same as before, but now use the - * exact number of nonzeros in each m - * row. Since we know the number of - * elements in the sparsity pattern - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes the - * creation process by the respective - * SparsityPattern::reinit call - * considerably faster. However, this - * is a rather unusual situation, - * since knowing the number of - * entries in each row is usually - * connected to knowing the indices - * of nonzero entries, which the - * sparsity pattern is designed to - * describe. + * Same as before, but now use the exact number of nonzeros in each m + * row. Since we know the number of elements in the sparsity pattern + * exactly in this case, we can already allocate the right amount of + * memory, which makes the creation process by the respective + * SparsityPattern::reinit call considerably faster. However, this is a + * rather unusual situation, since knowing the number of entries in each + * row is usually connected to knowing the indices of nonzero entries, + * which the sparsity pattern is designed to describe. */ SparsityPattern (const IndexSet ¶llel_partitioning, const MPI_Comm &communicator, const std::vector &n_entries_per_row); /** - * This constructor is similar to the - * one above, but it now takes two - * different index sets to describe the - * %parallel partitioning of rows and - * columns. This interface is meant to - * be used for generating rectangular - * sparsity pattern. Note that there is - * no real parallelism along the - * columns – the processor that - * owns a certain row always owns all - * the column elements, no matter how - * far they might be spread out. The - * second Epetra_Map is only used to - * specify the number of columns and - * for internal arragements when doing - * matrix-vector products with vectors + * This constructor is similar to the one above, but it now takes two + * different index sets to describe the %parallel partitioning of rows and + * columns. This interface is meant to be used for generating rectangular + * sparsity pattern. Note that there is no real parallelism along the + * columns – the processor that owns a certain row always owns all + * the column elements, no matter how far they might be spread out. The + * second Epetra_Map is only used to specify the number of columns and for + * internal arragements when doing matrix-vector products with vectors * based on that column map. * - * The number of columns entries per - * row is specified as the maximum + * The number of columns entries per row is specified as the maximum * number of entries argument. */ SparsityPattern (const IndexSet &row_parallel_partitioning, @@ -835,24 +630,15 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * This constructor is similar to the - * one above, but it now takes two - * different index sets for rows and - * columns. This interface is meant to - * be used for generating rectangular - * matrices, where one map specifies - * the %parallel distribution of rows - * and the second one specifies the - * distribution of degrees of freedom - * associated with matrix columns. This - * second map is however not used for - * the distribution of the columns - * themselves – rather, all - * column elements of a row are stored - * on the same processor. The vector - * n_entries_per_row specifies - * the number of entries in each row of - * the newly generated matrix. + * This constructor is similar to the one above, but it now takes two + * different index sets for rows and columns. This interface is meant to + * be used for generating rectangular matrices, where one map specifies + * the %parallel distribution of rows and the second one specifies the + * distribution of degrees of freedom associated with matrix columns. This + * second map is however not used for the distribution of the columns + * themselves – rather, all column elements of a row are stored on + * the same processor. The vector n_entries_per_row specifies the + * number of entries in each row of the newly generated matrix. */ SparsityPattern (const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, @@ -860,30 +646,51 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * Reinitialization function for - * generating a square sparsity - * pattern using an IndexSet and an - * MPI communicator for the - * description of the %parallel - * partitioning and the number of - * nonzero entries in the rows of the - * sparsity pattern. Note that this - * number does not need to be exact, - * and it is even allowed that the - * actual sparsity structure has more - * nonzero entries than specified in - * the constructor. However it is - * still advantageous to provide good - * estimates here since this will - * considerably increase the - * performance when creating the - * sparsity pattern. + * This constructor constructs general sparsity patterns, possible + * non-square ones. Constructing a sparsity pattern this way allows the + * user to explicitly specify the rows into which we are going to add + * elements. This set is required to be a superset of the first index set + * @p row_parallel_partitioning that includes also rows that are owned by + * another processor (ghost rows). Note that elements can only be added to + * rows specified by @p writable_rows. + * + * This method is beneficial when the rows to which a processor is going + * to write can be determined before actually inserting elements into the + * matrix. For the typical parallel::distributed::Triangulation class used + * in deal.II, we know that a processor only will add row elements for + * what we call the locally relevant dofs (see + * DoFTools::extract_locally_relevant_dofs). The other constructors + * methods use general Trilinos facilities that allow to add elements to + * arbitrary rows (as done by all the other reinit functions). However, + * this flexbility come at a cost, the most prominent being that adding + * elements into the same matrix from multiple threads in shared memory is + * not safe whenever MPI is used. For these settings, the current method + * is the one to choose: It will store the off-processor data as an + * additional sparsity pattern (that is then passed to the Trilinos matrix + * via the reinit mehtod) which can be organized in such a way that + * thread-safety can be ensured (as long as the user makes sure to never + * write into the same matrix row simultaneously, of course). + */ + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Reinitialization function for generating a square sparsity pattern + * using an IndexSet and an MPI communicator for the description of the + * %parallel partitioning and the number of nonzero entries in the rows of + * the sparsity pattern. Note that this number does not need to be exact, + * and it is even allowed that the actual sparsity structure has more + * nonzero entries than specified in the constructor. However it is still + * advantageous to provide good estimates here since this will + * considerably increase the performance when creating the sparsity + * pattern. * - * This function does not create any - * entries by itself, but provides - * the correct data structures that - * can be used by the respective - * add() function. + * This function does not create any entries by itself, but provides the + * correct data structures that can be used by the respective add() + * function. */ void reinit (const IndexSet ¶llel_partitioning, @@ -891,21 +698,13 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * Same as before, but now use the - * exact number of nonzeros in each m - * row. Since we know the number of - * elements in the sparsity pattern - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes process of - * adding entries to the sparsity - * pattern considerably - * faster. However, this is a rather - * unusual situation, since knowing - * the number of entries in each row - * is usually connected to knowing - * the indices of nonzero entries, - * which the sparsity pattern is + * Same as before, but now use the exact number of nonzeros in each m + * row. Since we know the number of elements in the sparsity pattern + * exactly in this case, we can already allocate the right amount of + * memory, which makes process of adding entries to the sparsity pattern + * considerably faster. However, this is a rather unusual situation, since + * knowing the number of entries in each row is usually connected to + * knowing the indices of nonzero entries, which the sparsity pattern is * designed to describe. */ void @@ -914,32 +713,19 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * This reinit function is similar to - * the one above, but it now takes - * two different index sets for rows - * and columns. This interface is - * meant to be used for generating - * rectangular sparsity pattern, - * where one index set describes the - * %parallel partitioning of the dofs - * associated with the sparsity - * pattern rows and the other one of - * the sparsity pattern columns. Note - * that there is no real parallelism - * along the columns – the - * processor that owns a certain row - * always owns all the column - * elements, no matter how far they - * might be spread out. The second - * IndexSet is only used to specify - * the number of columns and for - * internal arragements when doing - * matrix-vector products with - * vectors based on an EpetraMap - * based on that IndexSet. + * This reinit function is similar to the one above, but it now takes two + * different index sets for rows and columns. This interface is meant to + * be used for generating rectangular sparsity pattern, where one index + * set describes the %parallel partitioning of the dofs associated with + * the sparsity pattern rows and the other one of the sparsity pattern + * columns. Note that there is no real parallelism along the columns + * – the processor that owns a certain row always owns all the + * column elements, no matter how far they might be spread out. The second + * IndexSet is only used to specify the number of columns and for internal + * arragements when doing matrix-vector products with vectors based on an + * EpetraMap based on that IndexSet. * - * The number of columns entries per - * row is specified by the argument + * The number of columns entries per row is specified by the argument * n_entries_per_row. */ void @@ -949,11 +735,40 @@ namespace TrilinosWrappers const size_type n_entries_per_row = 0); /** - * Same as before, but now using a - * vector n_entries_per_row - * for specifying the number of - * entries in each row of the - * sparsity pattern. + * This reinit function is used to specify general matrices, possibly + * non-square ones. In addition to the arguments of the other reinit + * method above, it allows the user to explicitly specify the rows into + * which we are going to add elements. This set is a superset of the first + * index set @p row_parallel_partitioning that includes also rows that are + * owned by another processor (ghost rows). + * + * This method is beneficial when the rows to which a processor is going + * to write can be determined before actually inserting elements into the + * matrix. For the typical parallel::distributed::Triangulation class used + * in deal.II, we know that a processor only will add row elements for + * what we call the locally relevant dofs (see + * DoFTools::extract_locally_relevant_dofs). Trilinos matrices allow to + * add elements to arbitrary rows (as done by all the other reinit + * functions) and this is what all the other reinit methods do, + * too. However, this flexbility come at a cost, the most prominent being + * that adding elements into the same matrix from multiple threads in + * shared memory is not safe whenever MPI is used. For these settings, the + * current method is the one to choose: It will store the off-processor + * data as an additional sparsity pattern (that is then passed to the + * Trilinos matrix via the reinit mehtod) which can be organized in such a + * way that thread-safety can be ensured (as long as the user makes sure + * to never write into the same matrix row simultaneously, of course). + */ + void + reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writeable_rows, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const size_type n_entries_per_row = 0); + + /** + * Same as before, but now using a vector n_entries_per_row for + * specifying the number of entries in each row of the sparsity pattern. */ void reinit (const IndexSet &row_parallel_partitioning, @@ -962,20 +777,13 @@ namespace TrilinosWrappers const std::vector &n_entries_per_row); /** - * Reinit function. Takes one of the - * deal.II sparsity patterns and the - * %parallel partitioning of the rows - * and columns specified by two index - * sets and a %parallel communicator - * for initializing the current - * Trilinos sparsity pattern. The - * optional argument @p exchange_data - * can be used for reinitialization - * with a sparsity pattern that is - * not fully constructed. This - * feature is only implemented for - * input sparsity patterns of type - * CompressedSimpleSparsityPattern. + * Reinit function. Takes one of the deal.II sparsity patterns and the + * %parallel partitioning of the rows and columns specified by two index + * sets and a %parallel communicator for initializing the current Trilinos + * sparsity pattern. The optional argument @p exchange_data can be used + * for reinitialization with a sparsity pattern that is not fully + * constructed. This feature is only implemented for input sparsity + * patterns of type CompressedSimpleSparsityPattern. */ template void @@ -986,19 +794,12 @@ namespace TrilinosWrappers const bool exchange_data = false); /** - * Reinit function. Takes one of the - * deal.II sparsity patterns and a - * %parallel partitioning of the rows - * and columns for initializing the - * current Trilinos sparsity - * pattern. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. + * Reinit function. Takes one of the deal.II sparsity patterns and a + * %parallel partitioning of the rows and columns for initializing the + * current Trilinos sparsity pattern. The optional argument @p + * exchange_data can be used for reinitialization with a sparsity pattern + * that is not fully constructed. This feature is only implemented for + * input sparsity patterns of type CompressedSimpleSparsityPattern. */ template void @@ -1013,119 +814,88 @@ namespace TrilinosWrappers //@{ /** - * Returns the state of the sparsity - * pattern, i.e., whether compress() - * needs to be called after an - * operation requiring data - * exchange. + * Returns the state of the sparsity pattern, i.e., whether compress() + * needs to be called after an operation requiring data exchange. */ bool is_compressed () const; /** - * Gives the maximum number of - * entries per row on the current - * processor. + * Gives the maximum number of entries per row on the current processor. */ unsigned int max_entries_per_row () const; /** - * Return the number of rows in this - * sparsity pattern. + * Return the number of rows in this sparsity pattern. */ size_type n_rows () const; /** - * Return the number of columns in - * this sparsity pattern. + * Return the number of columns in this sparsity pattern. */ size_type n_cols () const; /** - * Return the local dimension of the - * sparsity pattern, i.e. the number - * of rows stored on the present MPI - * process. In the sequential case, - * this number is the same as - * n_rows(), but for parallel - * matrices it may be smaller. + * Return the local dimension of the sparsity pattern, i.e. the number of + * rows stored on the present MPI process. In the sequential case, this + * number is the same as n_rows(), but for parallel matrices it may be + * smaller. * - * To figure out which elements - * exactly are stored locally, - * use local_range(). + * To figure out which elements exactly are stored locally, use + * local_range(). */ unsigned int local_size () const; /** - * Return a pair of indices - * indicating which rows of this - * sparsity pattern are stored - * locally. The first number is the - * index of the first row stored, the - * second the index of the one past - * the last one that is stored - * locally. If this is a sequential - * matrix, then the result will be - * the pair (0,n_rows()), otherwise - * it will be a pair (i,i+n), where + * Return a pair of indices indicating which rows of this sparsity pattern + * are stored locally. The first number is the index of the first row + * stored, the second the index of the one past the last one that is + * stored locally. If this is a sequential matrix, then the result will be + * the pair (0,n_rows()), otherwise it will be a pair (i,i+n), where * n=local_size(). */ std::pair local_range () const; /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). + * Return whether @p index is in the local range or not, see also + * local_range(). */ bool in_local_range (const size_type index) const; /** - * Return the number of nonzero - * elements of this sparsity pattern. + * Return the number of nonzero elements of this sparsity pattern. */ size_type n_nonzero_elements () const; /** - * Number of entries in a - * specific row. + * Number of entries in a specific row. */ size_type row_length (const size_type row) const; /** - * Compute the bandwidth of the - * matrix represented by this - * structure. The bandwidth is the - * maximum of $|i-j|$ for which the - * index pair $(i,j)$ represents a - * nonzero entry of the - * matrix. Consequently, the maximum - * bandwidth a $n\times m$ matrix can - * have is $\max\{n-1,m-1\}$. + * Compute the bandwidth of the matrix represented by this structure. The + * bandwidth is the maximum of $|i-j|$ for which the index pair $(i,j)$ + * represents a nonzero entry of the matrix. Consequently, the maximum + * bandwidth a $n\times m$ matrix can have is $\max\{n-1,m-1\}$. */ size_type bandwidth () const; /** - * Return whether the object is - * empty. It is empty if no memory is - * allocated, which is the same as - * when both dimensions are zero. + * Return whether the object is empty. It is empty if no memory is + * allocated, which is the same as when both dimensions are zero. */ bool empty () const; /** - * Return whether the index - * (i,j) exists in the - * sparsity pattern (i.e., it may be - * non-zero) or not. + * Return whether the index (i,j) exists in the sparsity pattern + * (i.e., it may be non-zero) or not. */ bool exists (const size_type i, const size_type j) const; /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. Currently not - * implemented for this class. + * Determine an estimate for the memory consumption (in bytes) of this + * object. Currently not implemented for this class. */ std::size_t memory_consumption () const; @@ -1135,16 +905,14 @@ namespace TrilinosWrappers */ //@{ /** - * Add the element (i,j) to - * the sparsity pattern. + * Add the element (i,j) to the sparsity pattern. */ void add (const size_type i, const size_type j); /** - * Add several elements in one row to - * the sparsity pattern. + * Add several elements in one row to the sparsity pattern. */ template void add_entries (const size_type row, @@ -1158,61 +926,44 @@ namespace TrilinosWrappers //@{ /** - * Return a const reference to the - * underlying Trilinos - * Epetra_CrsGraph data that stores - * the sparsity pattern. + * Return a const reference to the underlying Trilinos Epetra_CrsGraph + * data that stores the sparsity pattern. */ const Epetra_FECrsGraph &trilinos_sparsity_pattern () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the parallel - * partitioning of the domain space - * of this sparsity pattern, i.e., - * the partitioning of the vectors - * matrices based on this sparsity - * pattern are multiplied with. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the parallel partitioning of the domain space of this sparsity + * pattern, i.e., the partitioning of the vectors matrices based on this + * sparsity pattern are multiplied with. */ const Epetra_Map &domain_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * range space of this sparsity - * pattern, i.e., the partitioning of - * the vectors that are result from + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the range space of this sparsity pattern, + * i.e., the partitioning of the vectors that are result from * matrix-vector products. */ const Epetra_Map &range_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * sparsity pattern rows. Equal to - * the partitioning of the range. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the sparsity pattern rows. Equal to the + * partitioning of the range. */ const Epetra_Map &row_partitioner () const; /** - * Return a const reference to the - * underlying Trilinos Epetra_Map - * that sets the partitioning of the - * sparsity pattern columns. This is - * in general not equal to the - * partitioner Epetra_Map for the - * domain because of overlap in the - * matrix. + * Return a const reference to the underlying Trilinos Epetra_Map that + * sets the partitioning of the sparsity pattern columns. This is in + * general not equal to the partitioner Epetra_Map for the domain because + * of overlap in the matrix. */ const Epetra_Map &col_partitioner () const; /** - * Return a const reference to - * the communicator used for - * this object. + * Return a const reference to the communicator used for this object. */ const Epetra_Comm &trilinos_communicator () const; //@} @@ -1222,8 +973,7 @@ namespace TrilinosWrappers //@{ /** - * STL-like iterator with the - * first entry. + * STL-like iterator with the first entry. */ const_iterator begin () const; @@ -1233,34 +983,22 @@ namespace TrilinosWrappers const_iterator end () const; /** - * STL-like iterator with the - * first entry of row @p r. + * STL-like iterator with the first entry of row @p r. * - * Note that if the given row - * is empty, i.e. does not - * contain any nonzero entries, - * then the iterator returned - * by this function equals - * end(r). Note also - * that the iterator may not be - * dereferencable in that case. + * Note that if the given row is empty, i.e. does not contain any nonzero + * entries, then the iterator returned by this function equals + * end(r). Note also that the iterator may not be dereferencable + * in that case. */ const_iterator begin (const size_type r) const; /** - * Final iterator of row - * r. It points to the - * first element past the end - * of line @p r, or past the - * end of the entire sparsity - * pattern. + * Final iterator of row r. It points to the first element past + * the end of line @p r, or past the end of the entire sparsity pattern. * - * Note that the end iterator - * is not necessarily - * dereferencable. This is in - * particular the case if it is - * the end iterator for the - * last row of a matrix. + * Note that the end iterator is not necessarily dereferencable. This is + * in particular the case if it is the end iterator for the last row of a + * matrix. */ const_iterator end (const size_type r) const; @@ -1271,55 +1009,35 @@ namespace TrilinosWrappers //@{ /** - * Abstract Trilinos object - * that helps view in ASCII - * other Trilinos - * objects. Currently this - * function is not - * implemented. TODO: Not + * Abstract Trilinos object that helps view in ASCII other Trilinos + * objects. Currently this function is not implemented. TODO: Not * implemented. */ void write_ascii (); /** - * Print (the locally owned part of) - * the sparsity pattern to the given - * stream, using the format - * (line,col). The optional - * flag outputs the sparsity pattern - * in Trilinos style, where even the - * according processor number is - * printed to the stream, as well as - * a summary before actually writing - * the entries. + * Print (the locally owned part of) the sparsity pattern to the given + * stream, using the format (line,col). The optional flag outputs + * the sparsity pattern in Trilinos style, where even the according + * processor number is printed to the stream, as well as a summary before + * actually writing the entries. */ void print (std::ostream &out, const bool write_extended_trilinos_info = false) const; /** - * Print the sparsity of the matrix - * in a format that gnuplot - * understands and which can be used - * to plot the sparsity pattern in a - * graphical way. The format consists - * of pairs i j of nonzero - * elements, each representing one - * entry of this matrix, one per line - * of the output file. Indices are - * counted from zero on, as - * usual. Since sparsity patterns are - * printed in the same way as - * matrices are displayed, we print - * the negative of the column index, - * which means that the - * (0,0) element is in the - * top left rather than in the bottom + * Print the sparsity of the matrix in a format that gnuplot + * understands and which can be used to plot the sparsity pattern in a + * graphical way. The format consists of pairs i j of nonzero + * elements, each representing one entry of this matrix, one per line of + * the output file. Indices are counted from zero on, as usual. Since + * sparsity patterns are printed in the same way as matrices are + * displayed, we print the negative of the column index, which means that + * the (0,0) element is in the top left rather than in the bottom * left corner. * - * Print the sparsity pattern in - * gnuplot by setting the data style - * to dots or points and use the - * plot command. + * Print the sparsity pattern in gnuplot by setting the data style to dots + * or points and use the plot command. */ void print_gnuplot (std::ostream &out) const; @@ -1371,30 +1089,31 @@ namespace TrilinosWrappers private: /** - * Pointer to the user-supplied - * Epetra Trilinos mapping of - * the matrix columns that - * assigns parts of the matrix - * to the individual processes. + * Pointer to the user-supplied Epetra Trilinos mapping of the matrix + * columns that assigns parts of the matrix to the individual processes. */ std_cxx1x::shared_ptr column_space_map; /** - * A boolean variable to hold - * information on whether the - * vector is compressed or not. + * A boolean variable to hold information on whether the vector is + * compressed or not. */ bool compressed; /** - * A sparsity pattern object in - * Trilinos to be used for finite - * element based problems which - * allows for adding non-local - * elements to the pattern. + * A sparsity pattern object in Trilinos to be used for finite element + * based problems which allows for adding non-local elements to the + * pattern. */ std_cxx1x::shared_ptr graph; + /** + * A sparsity pattern object for the non-local part of the sparsity + * pattern that is going to be sent to the owning processor. Only used when the particular constructor or reinit method with writable_rows argument is set + */ + std_cxx1x::shared_ptr nonlocal_graph; + + friend class SparseMatrix; friend class SparsityPatternIterators::Accessor; friend class SparsityPatternIterators::Iterator; }; @@ -1696,9 +1415,23 @@ namespace TrilinosWrappers const int n_cols = static_cast(end - begin); compressed = false; - const int ierr = graph->InsertGlobalIndices (1, - (TrilinosWrappers::types::int_type *)&row, - n_cols, col_index_ptr); + int ierr; + if ( graph->RowMap().LID(static_cast(row)) != -1) + ierr = graph->InsertGlobalIndices (row, n_cols, col_index_ptr); + else if (nonlocal_graph.get() != 0) + { + // this is the case when we have explicitly set the off-processor rows + // and want to create a separate matrix object for them (to retain + // thread-safety) + Assert (nonlocal_graph->RowMap().LID(static_cast(row)) != -1, + ExcMessage("Attempted to write into off-processor matrix row " + "that has not be specified as being writable upon " + "initialization")); + ierr = nonlocal_graph->InsertGlobalIndices (row, n_cols, col_index_ptr); + } + else + ierr = graph->InsertGlobalIndices + (1, (TrilinosWrappers::types::int_type *)&row, n_cols, col_index_ptr); AssertThrow (ierr >= 0, ExcTrilinosError(ierr)); } @@ -1757,157 +1490,6 @@ namespace TrilinosWrappers return graph->RangeMap().Comm(); } - - - inline - SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, - const size_type n_entries_per_row) - : - compressed (false) - { - Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, - false); - reinit (map, map, n_entries_per_row); - } - - - - inline - SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row) - : - compressed (false) - { - Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, - false); - reinit (map, map, n_entries_per_row); - } - - - - inline - SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, - const size_type n_entries_per_row) - : - compressed (false) - { - Epetra_Map row_map = - row_parallel_partitioning.make_trilinos_map (communicator, false); - Epetra_Map col_map = - col_parallel_partitioning.make_trilinos_map (communicator, false); - reinit (row_map, col_map, n_entries_per_row); - } - - - - inline - SparsityPattern:: - SparsityPattern (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row) - : - compressed (false) - { - Epetra_Map row_map = - row_parallel_partitioning.make_trilinos_map (communicator, false); - Epetra_Map col_map = - col_parallel_partitioning.make_trilinos_map (communicator, false); - reinit (row_map, col_map, n_entries_per_row); - } - - - - inline - void - SparsityPattern::reinit (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, - const size_type n_entries_per_row) - { - Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, - false); - reinit (map, map, n_entries_per_row); - } - - - - inline - void SparsityPattern::reinit (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row) - { - Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, - false); - reinit (map, map, n_entries_per_row); - } - - - - inline - void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, - const size_type n_entries_per_row) - { - Epetra_Map row_map = - row_parallel_partitioning.make_trilinos_map (communicator, false); - Epetra_Map col_map = - col_parallel_partitioning.make_trilinos_map (communicator, false); - reinit (row_map, col_map, n_entries_per_row); - } - - - inline - void - SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row) - { - Epetra_Map row_map = - row_parallel_partitioning.make_trilinos_map (communicator, false); - Epetra_Map col_map = - col_parallel_partitioning.make_trilinos_map (communicator, false); - reinit (row_map, col_map, n_entries_per_row); - } - - - - template - inline - void - SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const SparsityType &nontrilinos_sparsity_pattern, - const MPI_Comm &communicator, - const bool exchange_data) - { - Epetra_Map row_map = - row_parallel_partitioning.make_trilinos_map (communicator, false); - Epetra_Map col_map = - col_parallel_partitioning.make_trilinos_map (communicator, false); - reinit (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data); - } - - - - template - inline - void - SparsityPattern::reinit (const IndexSet ¶llel_partitioning, - const SparsityType &nontrilinos_sparsity_pattern, - const MPI_Comm &communicator, - const bool exchange_data) - { - Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, - false); - reinit (map, map, nontrilinos_sparsity_pattern, exchange_data); - } - #endif // DOXYGEN } diff --git a/deal.II/include/deal.II/matrix_free/shape_info.h b/deal.II/include/deal.II/matrix_free/shape_info.h index e25b8bfb37..8bbae186b8 100644 --- a/deal.II/include/deal.II/matrix_free/shape_info.h +++ b/deal.II/include/deal.II/matrix_free/shape_info.h @@ -112,7 +112,7 @@ namespace internal * 2*dim faces, and the columns the * DoFs on the faces. */ - Table<2,unsigned int> face_indices; + dealii::Table<2,unsigned int> face_indices; /** * Stores one-dimensional values of shape functions evaluated in zero diff --git a/deal.II/include/deal.II/numerics/matrix_tools.h b/deal.II/include/deal.II/numerics/matrix_tools.h index 735834df76..27b47c9ba2 100644 --- a/deal.II/include/deal.II/numerics/matrix_tools.h +++ b/deal.II/include/deal.II/numerics/matrix_tools.h @@ -635,7 +635,7 @@ namespace MatrixCreator * solver which can handle nonsymmetric matrices in any case, so there * may be no need to do the Gauss elimination anyway. In fact, this is * the way the function works: it takes a parameter - * (@p elininate_columns) that specifies whether the sparsity pattern + * (@p eliminate_columns) that specifies whether the sparsity pattern * is symmetric; if so, then the column is eliminated and the right * hand side is also modified accordingly. If not, then only the row * is deleted and the column is not touched at all, and all right hand diff --git a/deal.II/include/deal.II/numerics/vector_tools.h b/deal.II/include/deal.II/numerics/vector_tools.h index 9d14cdcc10..9558de7a42 100644 --- a/deal.II/include/deal.II/numerics/vector_tools.h +++ b/deal.II/include/deal.II/numerics/vector_tools.h @@ -502,6 +502,73 @@ namespace VectorTools const InVector &data_1, OutVector &data_2); + /** + * This function is a kind of generalization or modification + * of the very first interpolate() + * function in the series. + * It interpolations a set of functions onto the finite element space + * given by the DoFHandler argument where the determination which function + * to use is made based on the material id (see @ref GlossMaterialId) of + * each cell. + * + * @param mapping - The mapping to use to determine the location + * of support points at which the functions + * are to be evaluated. + * @param dof - DoFHandler initialized with Triangulation + * and + * FiniteElement + * objects, + * @param function_map - std::map reflecting the correspondence + * between material ids and functions, + * @param dst - global FE vector at the support points, + * @param component_mask - mask of components that shall be interpolated + * + * @note If a material id of some group of cells + * is missed in @p function_map, then @p dst will + * not be updated in the respective degrees of freedom + * of the output vector + * For example, if @p dst was successfully + * initialized to capture the degrees of freedom of the @p dof_handler + * of the problem with all zeros in it, + * then those zeros which correspond to + * the missed material ids will still remain + * in @p dst even after calling this function. + * + * @note Degrees of freedom located on faces between cells of different + * material ids will get their value by that cell which was called + * last in the respective loop over cells implemented + * in this function. + * Since this process is kind of arbitrary, + * you cannot control it. + * However, if you want to have control over the order in which cells are visited, + * let us take a look at the following example: Let @p u be a variable of interest + * which is approximated by some CG finite element. + * Let @p 0, @p 1 and @p 2 be material ids + * of cells on the triangulation. + * Let 0: 0.0, 1: 1.0, 2: 2.0 + * be the whole @p function_map that you want to pass to + * this function, where @p key is a material id and + * @p value is a value of @p u. + * By using the whole @p function_map you do not really know + * which values will be assigned to the face DoFs. + * On the other hand, if you split the whole @p function_map + * into three smaller independent objects + * 0: 0.0 and 1: 1.0 and 2: 2.0 + * and make three distinct calls of this function passing each + * of these objects separately (the order depends on what you want + * to get between cells), then each subsequent call will rewrite + * the intercell @p dofs of the previous one. + * + * @author Valentin Zingan, 2013 + */ + template + void + interpolate_based_on_material_id(const Mapping& mapping, + const DH& dof_handler, + const std::map< types::material_id, const Function* >& function_map, + VECTOR& dst, + const ComponentMask& component_mask = ComponentMask()); + /** * Gives the interpolation of a * @p dof1-function @p u1 to a @@ -2353,6 +2420,12 @@ namespace VectorTools const unsigned int component); //@} + + /** + * Exception. + */ + DeclException0(ExcInvalidMaterialIndicator); + /** * Exception */ diff --git a/deal.II/include/deal.II/numerics/vector_tools.templates.h b/deal.II/include/deal.II/numerics/vector_tools.templates.h index ba25203908..bc7ed01a80 100644 --- a/deal.II/include/deal.II/numerics/vector_tools.templates.h +++ b/deal.II/include/deal.II/numerics/vector_tools.templates.h @@ -331,6 +331,154 @@ namespace VectorTools } + template + void + interpolate_based_on_material_id(const Mapping& mapping, + const DH& dof, + const std::map< types::material_id, const Function* >& function_map, + VECTOR& dst, + const ComponentMask& component_mask) + { + const unsigned int dim = DH::dimension; + + Assert( component_mask.represents_n_components(dof.get_fe().n_components()), + ExcMessage("The number of components in the mask has to be either " + "zero or equal to the number of components in the finite " + "element.") ); + + if( function_map.size() == 0 ) + return; + + Assert( function_map.find(numbers::invalid_material_id) == function_map.end(), + ExcInvalidMaterialIndicator() ); + + for( typename std::map< types::material_id, const Function* >::const_iterator + iter = function_map.begin(); + iter != function_map.end(); + ++iter ) + { + Assert( dof.get_fe().n_components() == iter->second->n_components, + ExcDimensionMismatch(dof.get_fe().n_components(), iter->second->n_components) ); + } + + const hp::FECollection fe(dof.get_fe()); + const unsigned int n_components = fe.n_components(); + const bool fe_is_system = (n_components != 1); + + typename DH::active_cell_iterator cell = dof.begin_active(), + endc = dof.end(); + + std::vector< std::vector< Point > > unit_support_points(fe.size()); + for(unsigned int fe_index = 0; fe_index < fe.size(); ++fe_index) + { + unit_support_points[fe_index] = fe[fe_index].get_unit_support_points(); + Assert( unit_support_points[fe_index].size() != 0, + ExcNonInterpolatingFE() ); + } + + std::vector< std::vector > dofs_of_rep_points(fe.size()); + std::vector< std::vector > dof_to_rep_index_table(fe.size()); + std::vector n_rep_points(fe.size(), 0); + + for(unsigned int fe_index = 0; fe_index < fe.size(); ++fe_index) + { + for(unsigned int i = 0; i < fe[fe_index].dofs_per_cell; ++i) + { + bool representative = true; + + for(unsigned int j = dofs_of_rep_points[fe_index].size(); j > 0; --j) + if( unit_support_points[fe_index][i] == unit_support_points[fe_index][dofs_of_rep_points[fe_index][j-1]] ) + { + dof_to_rep_index_table[fe_index].push_back(j-1); + representative = false; + break; + } + + if(representative) + { + dof_to_rep_index_table[fe_index].push_back(dofs_of_rep_points[fe_index].size()); + dofs_of_rep_points[fe_index].push_back(i); + ++n_rep_points[fe_index]; + } + } + + Assert( dofs_of_rep_points[fe_index].size() == n_rep_points[fe_index], + ExcInternalError() ); + Assert( dof_to_rep_index_table[fe_index].size() == fe[fe_index].dofs_per_cell, + ExcInternalError() ); + } + + const unsigned int max_rep_points = *std::max_element(n_rep_points.begin(), + n_rep_points.end()); + std::vector dofs_on_cell(fe.max_dofs_per_cell()); + std::vector< Point > rep_points(max_rep_points); + + std::vector< std::vector > function_values_scalar(fe.size()); + std::vector< std::vector< Vector > > function_values_system(fe.size()); + + hp::QCollection support_quadrature; + for(unsigned int fe_index = 0; fe_index < fe.size(); ++fe_index) + support_quadrature.push_back( Quadrature(unit_support_points[fe_index]) ); + + hp::MappingCollection mapping_collection(mapping); + hp::FEValues fe_values(mapping_collection, + fe, + support_quadrature, + update_quadrature_points); + + for( ; cell != endc; ++cell) + if( cell->is_locally_owned() ) + if( function_map.find(cell->material_id()) != function_map.end() ) + { + const unsigned int fe_index = cell->active_fe_index(); + + fe_values.reinit(cell); + + const std::vector< Point >& support_points = fe_values.get_present_fe_values().get_quadrature_points(); + + rep_points.resize( dofs_of_rep_points[fe_index].size() ); + for(unsigned int i = 0; i < dofs_of_rep_points[fe_index].size(); ++i) + rep_points[i] = support_points[dofs_of_rep_points[fe_index][i]]; + + dofs_on_cell.resize( fe[fe_index].dofs_per_cell ); + cell->get_dof_indices(dofs_on_cell); + + if(fe_is_system) + { + function_values_system[fe_index].resize( n_rep_points[fe_index], + Vector(fe[fe_index].n_components()) ); + + function_map.find(cell->material_id())->second->vector_value_list(rep_points, + function_values_system[fe_index]); + + for(unsigned int i = 0; i < fe[fe_index].dofs_per_cell; ++i) + { + const unsigned int component = fe[fe_index].system_to_component_index(i).first; + + if( component_mask[component] ) + { + const unsigned int rep_dof = dof_to_rep_index_table[fe_index][i]; + dst(dofs_on_cell[i]) = function_values_system[fe_index][rep_dof](component); + } + } + } + else + { + function_values_scalar[fe_index].resize(n_rep_points[fe_index]); + + function_map.find(cell->material_id())->second->value_list(rep_points, + function_values_scalar[fe_index], + 0); + + for(unsigned int i = 0; i < fe[fe_index].dofs_per_cell; ++i) + dst(dofs_on_cell[i]) = function_values_scalar[fe_index][dof_to_rep_index_table[fe_index][i]]; + } + } + + dst.compress (VectorOperation::insert); + } + + namespace internal { /** diff --git a/deal.II/source/CMakeLists.txt b/deal.II/source/CMakeLists.txt index dd6de69d3e..0525ca1b93 100644 --- a/deal.II/source/CMakeLists.txt +++ b/deal.II/source/CMakeLists.txt @@ -14,7 +14,7 @@ ## ## --------------------------------------------------------------------- -MESSAGE(STATUS "Setup library") +MESSAGE(STATUS "Setting up library") # # Compile the deal.II library @@ -51,9 +51,6 @@ ADD_SUBDIRECTORY(integrators) ADD_SUBDIRECTORY(matrix_free) ADD_SUBDIRECTORY(meshworker) -# A custom target to build the library: -ADD_CUSTOM_TARGET(build_library) - FOREACH(build ${DEAL_II_BUILD_TYPES}) STRING(TOLOWER ${build} build_lowercase) @@ -68,9 +65,7 @@ FOREACH(build ${DEAL_II_BUILD_TYPES}) dummy.cc # Workaround for a bug in the Xcode generator ${deal_ii_objects_${build_lowercase}} ) - - ADD_DEPENDENCIES(build_library ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX}) - ADD_DEPENDENCIES(library ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX}) + ADD_DEPENDENCIES(library ${DEAL_II_BASE_NAME}${DEAL_II_${build}_SUFFIX}) SET_TARGET_PROPERTIES(${DEAL_II_BASE_NAME}${DEAL_II_${build}_SUFFIX} PROPERTIES @@ -104,11 +99,11 @@ FOREACH(build ${DEAL_II_BUILD_TYPES}) ) INSTALL(TARGETS ${DEAL_II_BASE_NAME}${DEAL_II_${build}_SUFFIX} + COMPONENT library EXPORT ${DEAL_II_PROJECT_CONFIG_NAME}Targets RUNTIME DESTINATION ${DEAL_II_EXECUTABLE_RELDIR} LIBRARY DESTINATION ${DEAL_II_LIBRARY_RELDIR} ARCHIVE DESTINATION ${DEAL_II_LIBRARY_RELDIR} - COMPONENT library ) ENDFOREACH() @@ -117,4 +112,4 @@ INSTALL(EXPORT ${DEAL_II_PROJECT_CONFIG_NAME}Targets COMPONENT library ) -MESSAGE(STATUS "Setup library - Done") +MESSAGE(STATUS "Setting up library - Done") diff --git a/deal.II/source/base/function_lib.cc b/deal.II/source/base/function_lib.cc index 92d7097165..8cdbc261f0 100644 --- a/deal.II/source/base/function_lib.cc +++ b/deal.II/source/base/function_lib.cc @@ -2265,6 +2265,194 @@ namespace Functions } + + namespace + { + // interpolate a data value from a table where ix denotes + // the (lower) left endpoint of the interval to interpolate + // in, and p_unit denotes the point in unit coordinates to do so. + double interpolate (const Table<1,double> &data_values, + const TableIndices<1> &ix, + const Point<1> &xi) + { + return ((1-xi[0])*data_values[ix[0]] + + + xi[0]*data_values[ix[0]+1]); + } + + double interpolate (const Table<2,double> &data_values, + const TableIndices<2> &ix, + const Point<2> &p_unit) + { + return (((1-p_unit[0])*data_values[ix[0]][ix[1]] + + + p_unit[0]*data_values[ix[0]+1][ix[1]])*(1-p_unit[1]) + + + ((1-p_unit[0])*data_values[ix[0]][ix[1]+1] + + + p_unit[0]*data_values[ix[0]+1][ix[1]+1])*p_unit[1]); + } + + double interpolate (const Table<3,double> &data_values, + const TableIndices<3> &ix, + const Point<3> &p_unit) + { + return ((((1-p_unit[0])*data_values[ix[0]][ix[1]][ix[2]] + + + p_unit[0]*data_values[ix[0]+1][ix[1]][ix[2]])*(1-p_unit[1]) + + + ((1-p_unit[0])*data_values[ix[0]][ix[1]+1][ix[2]] + + + p_unit[0]*data_values[ix[0]+1][ix[1]+1][ix[2]])*p_unit[1]) * (1-p_unit[2]) + + + (((1-p_unit[0])*data_values[ix[0]][ix[1]][ix[2]+1] + + + p_unit[0]*data_values[ix[0]+1][ix[1]][ix[2]+1])*(1-p_unit[1]) + + + ((1-p_unit[0])*data_values[ix[0]][ix[1]+1][ix[2]+1] + + + p_unit[0]*data_values[ix[0]+1][ix[1]+1][ix[2]+1])*p_unit[1]) * p_unit[2]); + } + } + + + template + InterpolatedTensorProductGridData:: + InterpolatedTensorProductGridData(const std_cxx1x::array,dim> &coordinate_values, + const Table &data_values) + : + coordinate_values (coordinate_values), + data_values (data_values) + { + for (unsigned int d=0; d= 2, + ExcMessage ("Coordinate arrays must have at least two coordinate values!")); + for (unsigned int i=0; i + double + InterpolatedTensorProductGridData::value(const Point &p, + const unsigned int component) const + { + Assert (component == 0, + ExcMessage ("This is a scalar function object, the component can only be zero.")); + + // find out where this data point lies, relative to the given + // points. if we run all the way to the end of the range, + // set the indices so that we will simply query the last of the + // intervals, starting at x.size()-2 and going to x.size()-1. + TableIndices ix; + for (unsigned int d=0; d 0) + --ix[d]; + } + + // now compute the relative point within the interval/rectangle/box + // defined by the point coordinates found above. truncate below and + // above to accommodate points that may lie outside the range + Point p_unit; + for (unsigned int d=0; d + InterpolatedUniformGridData:: + InterpolatedUniformGridData(const std_cxx1x::array,dim> &interval_endpoints, + const std_cxx1x::array &n_subintervals, + const Table &data_values) + : + interval_endpoints (interval_endpoints), + n_subintervals (n_subintervals), + data_values (data_values) + { + for (unsigned int d=0; d= 1, + ExcMessage ("There needs to be at least one subinterval in each " + "coordinate direction.")); + Assert (interval_endpoints[d].first < interval_endpoints[d].second, + ExcMessage ("The interval in each coordinate direction needs " + "to have positive size")); + Assert (data_values.size()[d] == n_subintervals[d]+1, + ExcMessage ("The data table does not have the correct size.")); + } + } + + + template + double + InterpolatedUniformGridData::value(const Point &p, + const unsigned int component) const + { + Assert (component == 0, + ExcMessage ("This is a scalar function object, the component can only be zero.")); + + // find out where this data point lies, relative to the given + // subdivision points + TableIndices ix; + for (unsigned int d=0; d= interval_endpoints[d].second-delta_x) + ix[d] = n_subintervals[d]-1; + else + ix[d] = (unsigned int)((p[d]-interval_endpoints[d].first) / delta_x); + } + + // now compute the relative point within the interval/rectangle/box + // defined by the point coordinates found above. truncate below and + // above to accommodate points that may lie outside the range + Point p_unit; + for (unsigned int d=0; d; template class SquareFunction<2>; @@ -2306,6 +2494,12 @@ namespace Functions template class Monomial<3>; template class Bessel1<2>; template class Bessel1<3>; + template class InterpolatedTensorProductGridData<1>; + template class InterpolatedTensorProductGridData<2>; + template class InterpolatedTensorProductGridData<3>; + template class InterpolatedUniformGridData<1>; + template class InterpolatedUniformGridData<2>; + template class InterpolatedUniformGridData<3>; } DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/source/base/parameter_handler.cc b/deal.II/source/base/parameter_handler.cc index 58f27f55fb..8742ec4c08 100644 --- a/deal.II/source/base/parameter_handler.cc +++ b/deal.II/source/base/parameter_handler.cc @@ -1643,9 +1643,14 @@ long int ParameterHandler::get_integer (const std::string &entry_string) const { std::string s = get (entry_string); char *endptr; - long int i = std::strtol (s.c_str(), &endptr, 10); - // assert there was no error - AssertThrow (*endptr == '\0', ExcConversionError(s)); + const long int i = std::strtol (s.c_str(), &endptr, 10); + + // assert that there was no error. an error would be if + // either there was no string to begin with, or if + // strtol set the endptr to anything but the end of + // the string + AssertThrow ((s.size()>0) && (*endptr == '\0'), + ExcConversionError(s)); return i; } @@ -1657,9 +1662,13 @@ double ParameterHandler::get_double (const std::string &entry_string) const std::string s = get (entry_string); char *endptr; double d = std::strtod (s.c_str(), &endptr); - // assert there was no error - AssertThrow ((*s.c_str() != '\0') || (*endptr == '\0'), - ExcConversionError(s)); + + // assert that there was no error. an error would be if + // either there was no string to begin with, or if + // strtol set the endptr to anything but the end of + // the string + AssertThrow ((s.size()>0) && (*endptr == '\0'), + ExcConversionError(s)); return d; } diff --git a/deal.II/source/fe/fe_values.cc b/deal.II/source/fe/fe_values.cc index 639d937fd9..066c79c4c8 100644 --- a/deal.II/source/fe/fe_values.cc +++ b/deal.II/source/fe/fe_values.cc @@ -1000,7 +1000,7 @@ namespace FEValuesViews template void do_function_values (const ::dealii::Vector &dof_values, - const Table<2,double> &shape_values, + const dealii::Table<2,double> &shape_values, const std::vector::ShapeFunctionData> &shape_function_data, std::vector > &values) { @@ -1142,7 +1142,7 @@ namespace FEValuesViews template void do_function_values (const ::dealii::Vector &dof_values, - const Table<2,double> &shape_values, + const dealii::Table<2,double> &shape_values, const std::vector::ShapeFunctionData> &shape_function_data, std::vector > &values) { @@ -2201,7 +2201,7 @@ namespace internal template void do_function_values (const double *dof_values_ptr, - const Table<2,double> &shape_values, + const dealii::Table<2,double> &shape_values, std::vector &values) { // scalar finite elements, so shape_values.size() == dofs_per_cell @@ -2235,7 +2235,7 @@ namespace internal template void do_function_values (const double *dof_values_ptr, - const Table<2,double> &shape_values, + const dealii::Table<2,double> &shape_values, const FiniteElement &fe, const std::vector &shape_function_to_row_table, VectorSlice > &values, diff --git a/deal.II/source/lac/block_sparsity_pattern.cc b/deal.II/source/lac/block_sparsity_pattern.cc index 695ae57da6..0536414ef5 100644 --- a/deal.II/source/lac/block_sparsity_pattern.cc +++ b/deal.II/source/lac/block_sparsity_pattern.cc @@ -785,6 +785,45 @@ namespace TrilinosWrappers this->collect_sizes(); } + + + void + BlockSparsityPattern::reinit (const std::vector &row_parallel_partitioning, + const std::vector &col_parallel_partitioning, + const MPI_Comm &communicator) + { + dealii::BlockSparsityPatternBase:: + reinit(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + for (size_type i=0; iblock(i,j).reinit(row_parallel_partitioning[i], + col_parallel_partitioning[j], + communicator); + this->collect_sizes(); + } + + + + void + BlockSparsityPattern::reinit (const std::vector &row_parallel_partitioning, + const std::vector &col_parallel_partitioning, + const std::vector &writable_rows, + const MPI_Comm &communicator) + { + AssertDimension(writable_rows.size(), row_parallel_partitioning.size()); + dealii::BlockSparsityPatternBase:: + reinit(row_parallel_partitioning.size(), + col_parallel_partitioning.size()); + for (size_type i=0; iblock(i,j).reinit(row_parallel_partitioning[i], + col_parallel_partitioning[j], + writable_rows[i], + communicator); + this->collect_sizes(); + } + } #endif diff --git a/deal.II/source/lac/trilinos_sparse_matrix.cc b/deal.II/source/lac/trilinos_sparse_matrix.cc index b8f2956a74..665c7ba151 100644 --- a/deal.II/source/lac/trilinos_sparse_matrix.cc +++ b/deal.II/source/lac/trilinos_sparse_matrix.cc @@ -26,6 +26,7 @@ # include # include +# include # include # include # include @@ -574,13 +575,18 @@ namespace TrilinosWrappers { matrix.reset (); - // reinit with a (parallel) Trilinos - // sparsity pattern. + // reinit with a (parallel) Trilinos sparsity pattern. column_space_map.reset (new Epetra_Map (sparsity_pattern.domain_partitioner())); matrix.reset (new Epetra_FECrsMatrix (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false)); + + if (sparsity_pattern.nonlocal_graph.get() != 0) + { + nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *sparsity_pattern.nonlocal_graph)); + } compress(); + last_action = Zero; } @@ -770,6 +776,56 @@ namespace TrilinosWrappers + inline + void + SparseMatrix::compress (::dealii::VectorOperation::values operation) + { + + Epetra_CombineMode mode = last_action; + if (last_action == Zero) + { + if ((operation==::dealii::VectorOperation::add) || + (operation==::dealii::VectorOperation::unknown)) + mode = Add; + else if (operation==::dealii::VectorOperation::insert) + mode = Insert; + } + else + { + Assert( + ((last_action == Add) && (operation!=::dealii::VectorOperation::insert)) + || + ((last_action == Insert) && (operation!=::dealii::VectorOperation::add)), + ExcMessage("operation and argument to compress() do not match")); + } + + // flush buffers + int ierr; + if (nonlocal_matrix.get() != 0) + { + nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap()); + Epetra_Export exporter(nonlocal_matrix->RowMap(), matrix->RowMap()); + ierr = matrix->Export(*nonlocal_matrix, exporter, mode); + AssertThrow(ierr == 0, ExcTrilinosError(ierr)); + ierr = matrix->FillComplete(*column_space_map, matrix->RowMap()); + nonlocal_matrix->PutScalar(0); + } + else + ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(), + true, mode); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + + ierr = matrix->OptimizeStorage (); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + + last_action = Zero; + + compressed = true; + } + + + void SparseMatrix::clear () { @@ -779,6 +835,7 @@ namespace TrilinosWrappers column_space_map.reset (new Epetra_Map (0, 0, Utilities::Trilinos::comm_self())); matrix.reset (new Epetra_FECrsMatrix(View, *column_space_map, 0)); + nonlocal_matrix.reset(); matrix->FillComplete(); diff --git a/deal.II/source/lac/trilinos_sparsity_pattern.cc b/deal.II/source/lac/trilinos_sparsity_pattern.cc index 24f4618092..85ca1111fc 100644 --- a/deal.II/source/lac/trilinos_sparsity_pattern.cc +++ b/deal.II/source/lac/trilinos_sparsity_pattern.cc @@ -24,6 +24,8 @@ # include # include +# include + DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers @@ -250,6 +252,80 @@ namespace TrilinosWrappers + SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const size_type n_entries_per_row) + : + compressed (false) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + SparsityPattern::SparsityPattern (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + : + compressed (false) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + SparsityPattern::SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const size_type n_entries_per_row) + : + compressed (false) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + SparsityPattern:: + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + : + compressed (false) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + SparsityPattern:: + SparsityPattern (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm &communicator, + const size_type n_max_entries_per_row) + : + compressed (false) + { + reinit (row_parallel_partitioning, col_parallel_partitioning, + writable_rows, communicator, n_max_entries_per_row); + } + + + SparsityPattern::~SparsityPattern () {} @@ -282,6 +358,12 @@ namespace TrilinosWrappers const Epetra_Map &input_col_map, const size_type n_entries_per_row) { + Assert(input_row_map.IsOneToOne(), + ExcMessage("Row map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); + Assert(input_col_map.IsOneToOne(), + ExcMessage("Column map must be 1-to-1, i.e., no overlap between " + "the maps of different processors.")); graph.reset (); column_space_map.reset (new Epetra_Map (input_col_map)); compressed = false; @@ -355,6 +437,122 @@ namespace TrilinosWrappers + void + SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const size_type n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + void SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, n_entries_per_row); + } + + + + void SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const size_type n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + void + SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, n_entries_per_row); + } + + + + template + void + SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map row_map = + row_parallel_partitioning.make_trilinos_map (communicator, false); + Epetra_Map col_map = + col_parallel_partitioning.make_trilinos_map (communicator, false); + reinit (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data); + } + + + + template + void + SparsityPattern::reinit (const IndexSet ¶llel_partitioning, + const SparsityType &nontrilinos_sparsity_pattern, + const MPI_Comm &communicator, + const bool exchange_data) + { + Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator, + false); + reinit (map, map, nontrilinos_sparsity_pattern, exchange_data); + } + + + + void + SparsityPattern::reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const IndexSet &writable_rows, + const MPI_Comm &communicator, + const size_type n_entries_per_row) + { + reinit(row_parallel_partitioning, col_parallel_partitioning, + communicator,n_entries_per_row); + + IndexSet nonlocal_partitioner = writable_rows; + AssertDimension(nonlocal_partitioner.size(), row_parallel_partitioning.size()); +#ifdef DEBUG + { + IndexSet tmp = writable_rows & row_parallel_partitioning; + Assert (tmp == row_parallel_partitioning, + ExcMessage("The set of writable rows passed to this method does not " + "contain the locally owned rows, which is not allowed.")); + } +#endif + nonlocal_partitioner.subtract_set(row_parallel_partitioning); + if (Utilities::MPI::n_mpi_processes(communicator) > 1) + { + Epetra_Map nonlocal_map = + nonlocal_partitioner.make_trilinos_map(communicator, false); + nonlocal_graph.reset(new Epetra_CrsGraph(Copy, nonlocal_map, 0)); + } + else + Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError()); + } + + + template void SparsityPattern::reinit (const Epetra_Map &input_map, @@ -509,6 +707,8 @@ namespace TrilinosWrappers *column_space_map, 0)); graph->FillComplete(); + nonlocal_graph.reset(); + compressed = true; } @@ -519,9 +719,31 @@ namespace TrilinosWrappers { int ierr; Assert (column_space_map.get() != 0, ExcInternalError()); - ierr = graph->GlobalAssemble (*column_space_map, - static_cast(graph->RangeMap()), - true); + if (nonlocal_graph.get() != 0) + { + if (nonlocal_graph->IndicesAreGlobal() == false && + nonlocal_graph->RowMap().NumMyElements() > 0) + { + // insert dummy element + TrilinosWrappers::types::int_type row = nonlocal_graph->RowMap().MyGID(0); + nonlocal_graph->InsertGlobalIndices(row, 1, &row); + } + Assert(nonlocal_graph->IndicesAreGlobal() == true, + ExcInternalError()); + nonlocal_graph->FillComplete(*column_space_map, + static_cast(graph->RangeMap())); + nonlocal_graph->OptimizeStorage(); + Epetra_Export exporter(nonlocal_graph->RowMap(), graph->RowMap()); + ierr = graph->Export(*nonlocal_graph, exporter, Add); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + ierr = + graph->FillComplete(*column_space_map, + static_cast(graph->RangeMap())); + } + else + ierr = graph->GlobalAssemble (*column_space_map, + static_cast(graph->RangeMap()), + true); AssertThrow (ierr == 0, ExcTrilinosError(ierr)); @@ -849,6 +1071,55 @@ namespace TrilinosWrappers const dealii::CompressedSimpleSparsityPattern &, bool); + + + template void + SparsityPattern::reinit (const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const dealii::CompressedSparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const dealii::CompressedSetSparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const dealii::CompressedSimpleSparsityPattern &, + const MPI_Comm &, + bool); + + + template void + SparsityPattern::reinit (const IndexSet &, + const IndexSet &, + const dealii::SparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const IndexSet &, + const dealii::CompressedSparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const IndexSet &, + const dealii::CompressedSetSparsityPattern &, + const MPI_Comm &, + bool); + template void + SparsityPattern::reinit (const IndexSet &, + const IndexSet &, + const dealii::CompressedSimpleSparsityPattern &, + const MPI_Comm &, + bool); + } DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/source/numerics/matrix_tools.cc b/deal.II/source/numerics/matrix_tools.cc index 74f5dcbba1..6a726729c1 100644 --- a/deal.II/source/numerics/matrix_tools.cc +++ b/deal.II/source/numerics/matrix_tools.cc @@ -2033,7 +2033,11 @@ namespace MatrixTools Assert ((p != matrix.end(row)) && (p->column() == dof_number), - ExcInternalError()); + ExcMessage("This function is trying to access an element of the " + "matrix that doesn't seem to exist. Are you using a " + "nonsymmetric sparsity pattern? If so, you are not " + "allowed to set the eliminate_column argument of this " + "function, see the documentation.")); // correct right hand side right_hand_side(row) -= p->value() / diff --git a/deal.II/source/numerics/solution_transfer.cc b/deal.II/source/numerics/solution_transfer.cc index 5e0ac217ed..8a3d59e895 100644 --- a/deal.II/source/numerics/solution_transfer.cc +++ b/deal.II/source/numerics/solution_transfer.cc @@ -173,12 +173,12 @@ namespace internal */ template void extract_interpolation_matrices (const DH &, - Table<2,FullMatrix > &) + dealii::Table<2,FullMatrix > &) {} template void extract_interpolation_matrices (const dealii::hp::DoFHandler &dof, - Table<2,FullMatrix > &matrices) + dealii::Table<2,FullMatrix > &matrices) { const dealii::hp::FECollection &fe = dof.get_fe(); matrices.reinit (fe.size(), fe.size()); diff --git a/deal.II/source/numerics/vector_tools_interpolate.inst.in b/deal.II/source/numerics/vector_tools_interpolate.inst.in index bbc0d6b237..9889393677 100644 --- a/deal.II/source/numerics/vector_tools_interpolate.inst.in +++ b/deal.II/source/numerics/vector_tools_interpolate.inst.in @@ -65,6 +65,21 @@ for (VEC : SERIAL_VECTORS ; deal_II_dimension : DIMENSIONS; deal_II_space_dimens const FullMatrix&, const VEC&, VEC&); + + template + void interpolate_based_on_material_id(const Mapping&, + const DoFHandler&, + const std::map< types::material_id, const Function* >&, + VEC&, + const ComponentMask&); + + template + void interpolate_based_on_material_id(const Mapping&, + const hp::DoFHandler&, + const std::map< types::material_id, const Function* >&, + VEC&, + const ComponentMask&); + \} #endif } diff --git a/deal.II/tests/quick_tests/CMakeLists.txt b/deal.II/tests/quick_tests/CMakeLists.txt index 232036aae0..e8d639f481 100644 --- a/deal.II/tests/quick_tests/CMakeLists.txt +++ b/deal.II/tests/quick_tests/CMakeLists.txt @@ -21,7 +21,7 @@ ENABLE_TESTING() # Use the first available build type (this prefers debug mode if available): LIST(GET DEAL_II_BUILD_TYPES 0 _mybuild) -MESSAGE(STATUS "Setup quick_tests in ${_mybuild} mode") +MESSAGE(STATUS "Setting up quick_tests in ${_mybuild} mode") SET(ALL_TESTS) # clean variable @@ -108,6 +108,5 @@ ADD_CUSTOM_TARGET(test COMMAND ${CMAKE_COMMAND} -D ALL_TESTS="${ALL_TESTS}" -P ${CMAKE_CURRENT_SOURCE_DIR}/run.cmake WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} ) -ADD_DEPENDENCIES(test build_library) -MESSAGE(STATUS "Setup quick_tests in ${_mybuild} mode - Done") +MESSAGE(STATUS "Setting up quick_tests in ${_mybuild} mode - Done")