From 6462b87e2e2bf691f04c89190c4ba1e72dac0f90 Mon Sep 17 00:00:00 2001 From: Jean-Paul Pelteret Date: Thu, 6 Dec 2018 14:37:14 +0100 Subject: [PATCH] Refactor the AD drivers classes. This commit significantly reworks the implementation of the AD driver classes. Firstly, all class methods are moved to the associated source file. The (logically) static class methods have been removed, so all methods must be called through an instance of a driver class. The previously implemented logic for existant ADOL-C tapes has been superceeded by low-level calls to the library itself. The ADHelper classes are updated to accommodate the changes to the driver classes. Fixes #7335 Fixes #7372 --- .../deal.II/differentiation/ad/ad_drivers.h | 1577 +++--------- .../deal.II/differentiation/ad/ad_helpers.h | 3 + source/differentiation/ad/CMakeLists.txt | 5 +- source/differentiation/ad/ad_drivers.cc | 2258 +++++++++++++++++ source/differentiation/ad/ad_drivers.inst1.in | 73 + source/differentiation/ad/ad_drivers.inst2.in | 83 + source/differentiation/ad/ad_helpers.cc | 61 +- 7 files changed, 2808 insertions(+), 1252 deletions(-) create mode 100644 source/differentiation/ad/ad_drivers.cc create mode 100644 source/differentiation/ad/ad_drivers.inst1.in create mode 100644 source/differentiation/ad/ad_drivers.inst2.in diff --git a/include/deal.II/differentiation/ad/ad_drivers.h b/include/deal.II/differentiation/ad/ad_drivers.h index 05ec558406..5af046665b 100644 --- a/include/deal.II/differentiation/ad/ad_drivers.h +++ b/include/deal.II/differentiation/ad/ad_drivers.h @@ -20,12 +20,9 @@ #include #include -#include #include #include -#include -#include #include #include @@ -33,10 +30,7 @@ #ifdef DEAL_II_WITH_ADOLC DEAL_II_DISABLE_EXTRA_DIAGNOSTICS -# include -# include # include -# include DEAL_II_ENABLE_EXTRA_DIAGNOSTICS #endif // DEAL_II_WITH_ADOLC @@ -136,7 +130,7 @@ namespace Differentiation /** - * A driver class for taped auto-differentiable numbers. + * A prototype driver class for taped auto-differentiable numbers. * * It is intended that this class be specialized for the valid * combinations of auto-differentiable numbers and output scalar @@ -258,6 +252,12 @@ namespace Differentiation const typename Types::tape_index active_tape_index, const bool write_tapes_to_file); + /** + * Return a list of registered tape indices. + */ + std::vector::tape_index> + get_registered_tape_indices() const; + /** * Select a tape to record to or read from. * @@ -275,6 +275,39 @@ namespace Differentiation void activate_tape(const typename Types::tape_index tape_index); + /** + * Return a flag that, when true, indicates that the retaping + * of the dependent function for the chosen @p tape_index is necessary for + * a reliable computation to be performed. + * This may be necessary a sign comparison within branched operations + * yields different results to those computed at the original tape + * evaluation point. + * + * @note The chosen tape index must be greater than + * Numbers::invalid_tape_index and less than + * Numbers::max_tape_index. + */ + bool + requires_retaping( + const typename Types::tape_index tape_index) const; + + /** + * Return a flag that, when true, indicates that the retaping + * of the dependent function is necessary for a reliable computation to be + * performed on the active tape. + * This may be necessary a sign comparison within branched operations + * yields different results to those computed at the original tape + * evaluation point. + */ + bool + last_action_requires_retaping() const; + + /** + * Completely erases the tape with the given @p tape_index. + */ + void + remove_tape(const typename Types::tape_index tape_index); + /** * Reset the state of the class. * @@ -302,10 +335,10 @@ namespace Differentiation * @param[out] stream The output stream to which the values are to be * written. */ - static void + void print_tape_stats( const typename Types::tape_index tape_index, - std::ostream & stream); + std::ostream & stream) const; //@} @@ -324,9 +357,9 @@ namespace Differentiation * * @return The scalar value of the function. */ - static ScalarType + ScalarType value(const typename Types::tape_index active_tape_index, - const std::vector &independent_variables); + const std::vector &independent_variables) const; /** * Compute the gradient of the scalar field with respect to all @@ -341,10 +374,10 @@ namespace Differentiation * correct size (with length * n_independent_variables). */ - static void + void gradient(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - Vector & gradient); + Vector & gradient) const; /** * Compute the Hessian of the scalar field with respect to all @@ -359,10 +392,10 @@ namespace Differentiation * size (with dimensions * n_independent_variables$\times$n_independent_variables). */ - static void + void hessian(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - FullMatrix & hessian); + FullMatrix & hessian) const; //@} @@ -383,11 +416,11 @@ namespace Differentiation * It is expected that this vector be of the correct size * (with length n_dependent_variables). */ - static void + void values(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - Vector & values); + Vector & values) const; /** * Compute the Jacobian of the vector field. @@ -407,11 +440,11 @@ namespace Differentiation * size (with dimensions * n_dependent_variables$\times$n_independent_variables). */ - static void + void jacobian(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - FullMatrix & jacobian); + FullMatrix & jacobian) const; //@} }; @@ -510,8 +543,8 @@ namespace Differentiation * * @return The scalar value of the function. */ - static ScalarType - value(const std::vector &dependent_variables); + ScalarType + value(const std::vector &dependent_variables) const; /** * Compute the gradient of the scalar field with respect to all @@ -526,10 +559,10 @@ namespace Differentiation * correct size (with length * n_independent_variables). */ - static void + void gradient(const std::vector &independent_variables, const std::vector &dependent_variables, - Vector & gradient); + Vector & gradient) const; /** * Compute the Hessian of the scalar field with respect to all @@ -544,10 +577,10 @@ namespace Differentiation * size (with dimensions * n_independent_variables$\times$n_independent_variables). */ - static void + void hessian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & hessian); + FullMatrix & hessian) const; //@} @@ -565,9 +598,9 @@ namespace Differentiation * It is expected that this vector be of the correct size * (with length n_dependent_variables). */ - static void + void values(const std::vector &dependent_variables, - Vector & values); + Vector & values) const; /** * Compute the Jacobian of the vector field. @@ -586,10 +619,10 @@ namespace Differentiation * size (with dimensions * n_dependent_variables$\times$n_independent_variables). */ - static void + void jacobian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & jacobian); + FullMatrix & jacobian) const; //@} }; @@ -608,170 +641,6 @@ namespace Differentiation { namespace AD { - // ------------- TapedDrivers ------------- - - - template - bool - TapedDrivers::is_recording() const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return false; - } - - - template - typename Types::tape_index - TapedDrivers::active_tape_index() const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return Numbers::invalid_tape_index; - } - - - template - bool - TapedDrivers::is_registered_tape( - const typename Types::tape_index) const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return false; - } - - - template - bool - TapedDrivers::keep_independent_values() const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return false; - } - - - template - void - TapedDrivers::set_tape_buffer_sizes( - const typename Types::tape_buffer_sizes, - const typename Types::tape_buffer_sizes, - const typename Types::tape_buffer_sizes, - const typename Types::tape_buffer_sizes) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::start_taping( - const typename Types::tape_index, - const bool) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::stop_taping( - const typename Types::tape_index, - const bool) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::activate_tape( - const typename Types::tape_index) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::reset(const bool) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::print(std::ostream &) const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::print_tape_stats( - const typename Types::tape_index, - std::ostream &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - ScalarType - TapedDrivers::value( - const typename Types::tape_index, - const std::vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return ScalarType(0.0); - } - - - template - void - TapedDrivers::gradient( - const typename Types::tape_index, - const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::hessian( - const typename Types::tape_index, - const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::values( - const typename Types::tape_index, - const unsigned int, - const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapedDrivers::jacobian( - const typename Types::tape_index, - const unsigned int, - const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - # ifdef DEAL_II_WITH_ADOLC /** @@ -846,309 +715,114 @@ namespace Differentiation { using scalar_type = double; - TapedDrivers() - : active_tape(Numbers::invalid_tape_index) - , keep_values(true) - , is_recording_flag(false) - , use_stored_taped_buffer_sizes(false) - , obufsize(0u) - , lbufsize(0u) - , vbufsize(0u) - , tbufsize(0u) - {} + /** + * Constructor + */ + TapedDrivers(); + - // === Taping === + /** + * @name Taping + */ + //@{ bool - is_recording() const - { - return is_recording_flag; - } + is_recording() const; typename Types::tape_index - active_tape_index() const - { - return active_tape; - } + active_tape_index() const; bool - keep_independent_values() const - { - return keep_values; - } + keep_independent_values() const; bool is_registered_tape( - const typename Types::tape_index tape_index) const - { - // See https://gitlab.com/adol-c/adol-c/issues/11#note_108341333 - try - { - std::vector counts(STAT_SIZE); - ::tapestats(tape_index, counts.data()); - return true; - } - catch (const ::FatalError &exc) - { - return false; - } - } + const typename Types::tape_index tape_index) const; void set_tape_buffer_sizes( const typename Types::tape_buffer_sizes in_obufsize, const typename Types::tape_buffer_sizes in_lbufsize, const typename Types::tape_buffer_sizes in_vbufsize, - const typename Types::tape_buffer_sizes in_tbufsize) - { - // When valid for the chosen AD number type, these values will be used - // the next time start_recording_operations() is called. - obufsize = in_obufsize; - lbufsize = in_lbufsize; - vbufsize = in_vbufsize; - tbufsize = in_tbufsize; - use_stored_taped_buffer_sizes = true; - } + const typename Types::tape_buffer_sizes in_tbufsize); void start_taping(const typename Types::tape_index tape_index, - const bool keep_independent_values) - { - if (use_stored_taped_buffer_sizes) - trace_on(tape_index, - keep_independent_values, - obufsize, - lbufsize, - vbufsize, - tbufsize); - else - trace_on(tape_index, keep_independent_values); - - // Set some other flags to their indicated / required values - keep_values = keep_independent_values; - is_recording_flag = true; - } + const bool keep_independent_values); void stop_taping( const typename Types::tape_index active_tape_index, - const bool write_tapes_to_file) - { - if (write_tapes_to_file) - trace_off(active_tape_index); // Slow - else - trace_off(); // Fast(er) - - // Now that we've turned tracing off, we've definitely - // stopped all tape recording. - is_recording_flag = false; - - // If the keep_values flag is set, then we expect the user to use this - // tape immediately after recording it. There is therefore no need to - // invalidate it. However, there is now also no way to double-check - // that the newly recorded tape is indeed the active tape. - if (keep_independent_values() == false) - active_tape = Numbers::invalid_tape_index; - } - - void - activate_tape(const typename Types::tape_index tape_index) - { - active_tape = tape_index; - } - - void - reset(const bool clear_registered_tapes) - { - active_tape = Numbers::invalid_tape_index; - is_recording_flag = false; - if (clear_registered_tapes) - { - const std::vector::tape_index> - registered_tape_indices = get_registered_tape_indices(); - for (const auto &tape_index : registered_tape_indices) - removeTape(tape_index, TapeRemovalType::ADOLC_REMOVE_COMPLETELY); - } - } - - void - print(std::ostream &stream) const - { - const std::vector::tape_index> - registered_tape_indices = get_registered_tape_indices(); - stream << "Registered tapes: "; - auto it_registered_tape = registered_tape_indices.begin(); - for (unsigned int i = 0; i < registered_tape_indices.size(); - ++i, ++it_registered_tape) - stream << *it_registered_tape - << (i < (registered_tape_indices.size() - 1) ? "," : ""); - stream << "\n"; - - stream << "Keep values? " << keep_independent_values() << "\n"; - stream << "Use stored tape buffer sizes? " - << use_stored_taped_buffer_sizes << "\n"; - } + const bool write_tapes_to_file); - static void + std::vector::tape_index> + get_registered_tape_indices() const; + + void + activate_tape(const typename Types::tape_index tape_index); + + bool + requires_retaping( + const typename Types::tape_index tape_index) const; + + bool + last_action_requires_retaping() const; + + void + remove_tape(const typename Types::tape_index tape_index); + + void + reset(const bool clear_registered_tapes); + + void + print(std::ostream &stream) const; + + void print_tape_stats( const typename Types::tape_index tape_index, - std::ostream & stream) - { - // See ADOL-C manual section 2.1 - // and adolc/taping.h - std::vector counts(STAT_SIZE); - ::tapestats(tape_index, counts.data()); - Assert(counts.size() >= 18, ExcInternalError()); - stream - << "Tape index: " << tape_index << "\n" - << "Number of independent variables: " << counts[0] << "\n" - << "Number of dependent variables: " << counts[1] << "\n" - << "Max number of live, active variables: " << counts[2] << "\n" - << "Size of taylor stack (number of overwrites): " << counts[3] - << "\n" - << "Operations buffer size: " << counts[4] << "\n" - << "Total number of recorded operations: " << counts[5] << "\n" - << "Operations file written or not: " << counts[6] << "\n" - << "Overall number of locations: " << counts[7] << "\n" - << "Locations file written or not: " << counts[8] << "\n" - << "Overall number of values: " << counts[9] << "\n" - << "Values file written or not: " << counts[10] << "\n" - << "Locations buffer size: " << counts[11] << "\n" - << "Values buffer size: " << counts[12] << "\n" - << "Taylor buffer size: " << counts[13] << "\n" - << "Number of eq_*_prod for sparsity pattern: " << counts[14] << "\n" - << "Use of 'min_op', deferred to 'abs_op' for piecewise calculations: " - << counts[15] << "\n" - << "Number of 'abs' calls that can switch branch: " << counts[16] - << "\n" - << "Number of parameters (doubles) interchangeable without retaping: " - << counts[17] << "\n" - << std::flush; - } - - - // === Scalar drivers === - - static scalar_type - value(const typename Types::tape_index active_tape_index, - const std::vector &independent_variables) - { - scalar_type value = 0.0; + std::ostream & stream) const; + + //@} - ::function(active_tape_index, - 1, // Only one dependent variable - independent_variables.size(), - const_cast(independent_variables.data()), - &value); + /** + * @name Drivers for scalar functions (one dependent variable) + */ + //@{ - return value; - } + scalar_type + value(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables) const; - static void + void gradient(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - Vector & gradient) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(gradient.size() == independent_variables.size(), - ExcDimensionMismatch(gradient.size(), - independent_variables.size())); - - // Note: ADOL-C's ::gradient function expects a *double as the last - // parameter. Here we take advantage of the fact that the data in the - // Vector class is aligned (e.g. stored as an Array) - ::gradient(active_tape_index, - independent_variables.size(), - const_cast(independent_variables.data()), - gradient.data()); - } + Vector & gradient) const; - static void + void hessian(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - FullMatrix & hessian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 2, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 2)); - Assert(hessian.m() == independent_variables.size(), - ExcDimensionMismatch(hessian.m(), independent_variables.size())); - Assert(hessian.n() == independent_variables.size(), - ExcDimensionMismatch(hessian.n(), independent_variables.size())); - - const unsigned int n_independent_variables = - independent_variables.size(); - std::vector H(n_independent_variables); - for (unsigned int i = 0; i < n_independent_variables; ++i) - H[i] = &hessian[i][0]; - - ::hessian(active_tape_index, - n_independent_variables, - const_cast(independent_variables.data()), - H.data()); - - // ADOL-C builds only the lower-triangular part of the - // symmetric Hessian, so we should copy the relevant - // entries into the upper triangular part. - for (unsigned int i = 0; i < n_independent_variables; i++) - for (unsigned int j = 0; j < i; j++) - hessian[j][i] = hessian[i][j]; // Symmetry - } - - // === Vector drivers === + FullMatrix & hessian) const; - static void + //@} + + /** + * @name Drivers for vector functions (multiple dependent variables) + */ + //@{ + + void values(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - Vector & values) - { - Assert(values.size() == n_dependent_variables, - ExcDimensionMismatch(values.size(), n_dependent_variables)); - - // Note: ADOL-C's ::function function expects a *double as the last - // parameter. Here we take advantage of the fact that the data in the - // Vector class is aligned (e.g. stored as an Array) - ::function(active_tape_index, - n_dependent_variables, - independent_variables.size(), - const_cast(independent_variables.data()), - values.data()); - } + Vector & values) const; - static void + void jacobian(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - FullMatrix & jacobian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(jacobian.m() == n_dependent_variables, - ExcDimensionMismatch(jacobian.m(), n_dependent_variables)); - Assert(jacobian.n() == independent_variables.size(), - ExcDimensionMismatch(jacobian.n(), - independent_variables.size())); - - std::vector J(n_dependent_variables); - for (unsigned int i = 0; i < n_dependent_variables; ++i) - J[i] = &jacobian[i][0]; - - ::jacobian(active_tape_index, - n_dependent_variables, - independent_variables.size(), - independent_variables.data(), - J.data()); - } + FullMatrix & jacobian) const; + + //@} protected: /** @@ -1171,6 +845,33 @@ namespace Differentiation */ bool is_recording_flag; + /** + * The status of the last function or derivative evaluation performed on + * the selected tape. As quoted from the ADOL-C manual, this can take + * on one of six different values with the following interpretation: + * + * - +3: The function is locally analytic. + * - +2: The function is locally analytic but the sparsity + * structure (compared to the situation at the taping point) may have + * changed, e.g. while at taping arguments fmax(a,b) + * returned a, we get b at the argument + * currently used. + * - +1: At least one of the functions fmin, + * fmax or fabs is evaluated at a tie or + * zero, respectively. Hence, the function to be differentiated is + * Lipschitz-continuous but possibly non-differentiable. + * - 0: Some arithmetic comparison involving adoubles yields a + * tie. Hence, the function to be differentiated may be discontinuous. + * - -1: An adouble comparison yields different + * results from the evaluation point at which the tape was generated. + * - -2: The argument of a user-defined quadrature has changed from + * the evaluation point at which the tape was generated. + * + * When the @p status variable takes a negative value, retaping of the dependent + * function is necessary for a reliable computation to be performed. + */ + mutable std::map::tape_index, int> status; + /** * A flag indicating that we should preferentially use the user-defined * taped buffer sizes as opposed to either the default values selected @@ -1198,30 +899,9 @@ namespace Differentiation * ADOL-C Taylor buffer size. */ typename Types::tape_buffer_sizes tbufsize; - - /** - * Return a list of registered tape indices - */ - std::vector::tape_index> - get_registered_tape_indices() const - { - // We've chosen to use unsigned shorts for the tape - // index type (a safety precaution) so we need to - // perform a conversion betwwen ADOL-C's native tape - // index type and that chosen by us. - std::vector registered_tape_indices_s; - cachedTraceTags(registered_tape_indices_s); - - std::vector::tape_index> - registered_tape_indices(registered_tape_indices_s.size()); - std::copy(registered_tape_indices_s.begin(), - registered_tape_indices_s.end(), - registered_tape_indices.begin()); - return registered_tape_indices; - } }; -# else // DEAL_II_WITH_ADOLC +# else /** * Specialization for taped ADOL-C auto-differentiable numbers. @@ -1229,7 +909,7 @@ namespace Differentiation * Although we could revert to the default definition for the * unspecialized TapedDrivers class, we add this specialization * to provide a more descriptive error message if any of its - * static member functions are called. + * member functions are called. */ template struct TapedDrivers< @@ -1240,134 +920,107 @@ namespace Differentiation { using scalar_type = double; - // === Taping === + /** + * @name Taping + */ + //@{ bool - is_recording() const - { - AssertThrow(false, ExcRequiresADOLC()); - return false; - } + is_recording() const; typename Types::tape_index - active_tape_index() const - { - AssertThrow(false, ExcRequiresADOLC()); - return Numbers::invalid_tape_index; - } + active_tape_index() const; bool - keep_independent_values() const - { - AssertThrow(false, ExcRequiresADOLC()); - return false; - } + keep_independent_values() const; bool - is_registered_tape(const typename Types::tape_index) const - { - AssertThrow(false, ExcRequiresADOLC()); - return false; - } + is_registered_tape( + const typename Types::tape_index tape_index) const; void set_tape_buffer_sizes( const typename Types::tape_buffer_sizes, const typename Types::tape_buffer_sizes, const typename Types::tape_buffer_sizes, - const typename Types::tape_buffer_sizes) - { - AssertThrow(false, ExcRequiresADOLC()); - } + const typename Types::tape_buffer_sizes); void - start_taping(const typename Types::tape_index, const bool) - { - AssertThrow(false, ExcRequiresADOLC()); - } + start_taping(const typename Types::tape_index, const bool); void - stop_taping(const typename Types::tape_index, const bool) - { - AssertThrow(false, ExcRequiresADOLC()); - } + stop_taping(const typename Types::tape_index, const bool); + + std::vector::tape_index> + get_registered_tape_indices() const; void - activate_tape(const typename Types::tape_index) - { - AssertThrow(false, ExcRequiresADOLC()); - } + activate_tape(const typename Types::tape_index); + + bool + requires_retaping(const typename Types::tape_index) const; + + bool + last_action_requires_retaping() const; void - reset(const bool) - { - AssertThrow(false, ExcRequiresADOLC()); - } + remove_tape(const typename Types::tape_index); void - print(std::ostream &) const - { - AssertThrow(false, ExcRequiresADOLC()); - } + reset(const bool); - static void + void + print(std::ostream &stream) const; + + void print_tape_stats(const typename Types::tape_index, - std::ostream &) - { - AssertThrow(false, ExcRequiresADOLC()); - } + std::ostream &) const; + //@} - // === Scalar drivers === + /** + * @name Drivers for scalar functions (one dependent variable) + */ + //@{ - static scalar_type + scalar_type value(const typename Types::tape_index, - const std::vector &) - { - AssertThrow(false, ExcRequiresADOLC()); - return 0.0; - } + const std::vector &) const; - static void + void gradient(const typename Types::tape_index, const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADOLC()); - } + Vector &) const; - static void + void hessian(const typename Types::tape_index, const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADOLC()); - } + FullMatrix &) const; - // === Vector drivers === + //@} - static void + /** + * @name Drivers for vector functions (multiple dependent variables) + */ + //@{ + + void values(const typename Types::tape_index, const unsigned int, const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADOLC()); - } + Vector &) const; - static void + void jacobian(const typename Types::tape_index, const unsigned int, const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADOLC()); - } + FullMatrix &) const; + + //@} }; # endif // DEAL_II_WITH_ADOLC - /** * Specialization for ADOL-C taped numbers. It is expected that the * scalar return type for this class is a float. @@ -1385,191 +1038,115 @@ namespace Differentiation { using scalar_type = float; - // === Taping === + /** + * @name Taping + */ + //@{ bool - is_recording() const - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - return taped_driver.is_recording(); - } + is_recording() const; typename Types::tape_index - active_tape_index() const - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - return taped_driver.active_tape_index(); - } + active_tape_index() const; bool - keep_independent_values() const - { - return taped_driver.keep_independent_values(); - } + keep_independent_values() const; bool is_registered_tape( - const typename Types::tape_index tape_index) const - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - return taped_driver.is_registered_tape(tape_index); - } + const typename Types::tape_index tape_index) const; void set_tape_buffer_sizes( const typename Types::tape_buffer_sizes obufsize, const typename Types::tape_buffer_sizes lbufsize, const typename Types::tape_buffer_sizes vbufsize, - const typename Types::tape_buffer_sizes tbufsize) - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - taped_driver.set_tape_buffer_sizes(obufsize, - lbufsize, - vbufsize, - tbufsize); - } + const typename Types::tape_buffer_sizes tbufsize); void start_taping(const typename Types::tape_index tape_index, - const bool keep_independent_values) - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - taped_driver.start_taping(tape_index, keep_independent_values); - } + const bool keep_independent_values); void stop_taping( const typename Types::tape_index active_tape_index, - const bool write_tapes_to_file) - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - taped_driver.stop_taping(active_tape_index, write_tapes_to_file); - } + const bool write_tapes_to_file); + + std::vector::tape_index> + get_registered_tape_indices() const; + + void + activate_tape(const typename Types::tape_index tape_index); + + bool + requires_retaping( + const typename Types::tape_index tape_index) const; + + bool + last_action_requires_retaping() const; void - activate_tape(const typename Types::tape_index tape_index) - { - taped_driver.activate_tape(tape_index); - } + remove_tape(const typename Types::tape_index tape_index); void - reset(const bool clear_registered_tapes) - { - taped_driver.reset(clear_registered_tapes); - } + reset(const bool clear_registered_tapes); void - print(std::ostream &stream) const - { - taped_driver.print(stream); - } + print(std::ostream &stream) const; - static void + void print_tape_stats( const typename Types::tape_index tape_index, - std::ostream & stream) - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - TapedDrivers::print_tape_stats(tape_index, - stream); - } + std::ostream & stream) const; + + //@} - // === Scalar drivers === + /** + * @name Drivers for scalar functions (one dependent variable) + */ + //@{ - static scalar_type + scalar_type value(const typename Types::tape_index active_tape_index, - const std::vector &independent_variables) - { - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - return TapedDrivers::value( - active_tape_index, vector_float_to_double(independent_variables)); - } + const std::vector &independent_variables) const; - static void + void gradient(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - Vector & gradient) - { - Vector gradient_double(gradient.size()); - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - TapedDrivers::gradient(active_tape_index, - vector_float_to_double( - independent_variables), - gradient_double); - gradient = gradient_double; - } + Vector & gradient) const; - static void + void hessian(const typename Types::tape_index active_tape_index, const std::vector &independent_variables, - FullMatrix & hessian) - { - FullMatrix hessian_double(hessian.m(), hessian.n()); - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - TapedDrivers::hessian(active_tape_index, - vector_float_to_double( - independent_variables), - hessian_double); - hessian = hessian_double; - } - - // === Vector drivers === + FullMatrix & hessian) const; - static void + //@} + + /** + * @name Drivers for vector functions (multiple dependent variables) + */ + //@{ + + void values(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - Vector & values) - { - Vector values_double(values.size()); - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - TapedDrivers::values(active_tape_index, - n_dependent_variables, - vector_float_to_double( - independent_variables), - values_double); - values = values_double; - } + Vector & values) const; - static void + void jacobian(const typename Types::tape_index active_tape_index, const unsigned int n_dependent_variables, const std::vector &independent_variables, - FullMatrix & jacobian) - { - FullMatrix jacobian_double(jacobian.m(), jacobian.n()); - // ADOL-C only supports 'double', not 'float', so we can forward to - // the 'double' implementation of this function - TapedDrivers::jacobian(active_tape_index, - n_dependent_variables, - vector_float_to_double( - independent_variables), - jacobian_double); - jacobian = jacobian_double; - } + FullMatrix & jacobian) const; + + //@} private: /** * Copy a vector of floats into a vector of doubles */ - static std::vector - vector_float_to_double(const std::vector &in) - { - std::vector out(in.size()); - std::copy(in.begin(), in.end(), out.begin()); - return out; - } + std::vector + vector_float_to_double(const std::vector &in) const; /** * The object that actually takes care of the taping @@ -1581,235 +1158,6 @@ namespace Differentiation // ------------- TapelessDrivers ------------- - template - void - TapelessDrivers::initialize_global_environment( - const unsigned int) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - template - void - TapelessDrivers:: - allow_dependent_variable_marking() - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - template - void - TapelessDrivers:: - prevent_dependent_variable_marking() - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - template - bool - TapelessDrivers:: - is_dependent_variable_marking_allowed() const - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return false; - } - - - template - ScalarType - TapelessDrivers::value( - const std::vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - return ScalarType(0.0); - } - - - template - void - TapelessDrivers::gradient( - const std::vector &, - const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapelessDrivers::hessian( - const std::vector &, - const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapelessDrivers::values( - const std::vector &, - Vector &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - template - void - TapelessDrivers::jacobian( - const std::vector &, - const std::vector &, - FullMatrix &) - { - AssertThrow(false, ExcRequiresADNumberSpecialization()); - } - - - namespace internal - { - /** - * A dummy function to define the active dependent variable when using - * reverse-mode AD. - */ - template - typename std::enable_if::type_code == - NumberTypes::sacado_rad || - ADNumberTraits::type_code == - NumberTypes::sacado_rad_dfad)>::type - reverse_mode_dependent_variable_activation(ADNumberType &) - {} - -# ifdef DEAL_II_TRILINOS_WITH_SACADO - - - /** - * Define the active dependent variable when using reverse-mode AD. - * - * If there are multiple dependent variables then it is necessary to - * inform the independent variables, from which the adjoints are computed, - * which dependent variable they are computing the gradients with respect - * to. This function broadcasts this information. - */ - template - typename std::enable_if::type_code == - NumberTypes::sacado_rad || - ADNumberTraits::type_code == - NumberTypes::sacado_rad_dfad>::type - reverse_mode_dependent_variable_activation( - ADNumberType &dependent_variable) - { - // Compute all gradients (adjoints) for this - // reverse-mode Sacado dependent variable. - // For reverse-mode Sacado numbers it is necessary to broadcast to - // all independent variables that it is time to compute gradients. - // For one dependent variable one would just need to call - // ADNumberType::Gradcomp(), but since we have a more - // generic implementation for vectors of dependent variables - // (vector mode) we default to the complex case. - ADNumberType::Outvar_Gradcomp(dependent_variable); - } - -# endif - - - /** - * A dummy function to enable vector mode for tapeless - * auto-differentiable numbers. - */ - template - typename std::enable_if::type_code == - NumberTypes::adolc_tapeless)>::type - configure_tapeless_mode(const unsigned int) - {} - - -# ifdef DEAL_II_WITH_ADOLC - - - /** - * Enable vector mode for ADOL-C tapeless numbers. - * - * This function checks to see if its legal to increase the maximum - * number of directional derivatives to be considered during calculations. - * If not then it throws an error. - */ - template - typename std::enable_if::type_code == - NumberTypes::adolc_tapeless>::type - configure_tapeless_mode(const unsigned int n_directional_derivatives) - { -# ifdef DEAL_II_ADOLC_WITH_TAPELESS_REFCOUNTING - // See ADOL-C manual section 7.1 - // - // NOTE: It is critical that this is done for tapeless mode BEFORE - // any adtl::adouble are created. If this is not done, then we see - // this scary warning: - // - // " - // ADOL-C Warning: Tapeless: Setting numDir could change memory - // allocation of derivatives in existing adoubles and may lead to - // erroneous results or memory corruption - // " - // - // So we use this dummy function to configure this setting before - // we create and initialize our class data - const std::size_t n_live_variables = adtl::refcounter::getNumLiveVar(); - if (n_live_variables == 0) - { - adtl::setNumDir(n_directional_derivatives); - } - else - { - // So there are some live active variables floating around. Here we - // check if we ask to increase the number of computable - // directional derivatives. If this really is necessary then it's - // absolutely vital that there exist no live variables before doing - // so. - const std::size_t n_set_directional_derivatives = adtl::getNumDir(); - if (n_directional_derivatives > n_set_directional_derivatives) - AssertThrow( - n_live_variables == 0, - ExcMessage( - "There are currently " + - Utilities::to_string(n_live_variables) + - " live " - "adtl::adouble variables in existence. They currently " - "assume " + - Utilities::to_string(n_set_directional_derivatives) + - " directional derivatives " - "but you wish to increase this to " + - Utilities::to_string(n_directional_derivatives) + - ". \n" - "To safely change (or more specifically in this case, " - "increase) the number of directional derivatives, there " - "must be no tapeless doubles in local/global scope.")); - } -# else - // If ADOL-C is not configured with tapeless number reference counting - // then there is no way that we can guarantee that the following call - // is safe. No comment... :-/ - adtl::setNumDir(n_directional_derivatives); -# endif - } - -# else // DEAL_II_WITH_ADOLC - - template - typename std::enable_if::type_code == - NumberTypes::adolc_tapeless>::type - configure_tapeless_mode(const unsigned int /*n_directional_derivatives*/) - { - AssertThrow(false, ExcRequiresADOLC()); - } - -# endif - - } // namespace internal - - /** * Specialization for auto-differentiable numbers that use * reverse mode to compute the first derivatives (and, if supported, @@ -1824,192 +1172,72 @@ namespace Differentiation ADNumberTraits::type_code == NumberTypes::sacado_rad_dfad>::type> { - TapelessDrivers() - : dependent_variable_marking_safe(false) - {} + /** + * Constructor + */ + TapelessDrivers(); - // === Configuration === + /** + * @name Configuration + */ + //@{ static void - initialize_global_environment(const unsigned int n_independent_variables) - { - internal::configure_tapeless_mode( - n_independent_variables); - } + initialize_global_environment(const unsigned int n_independent_variables); + + //@} + + /** + * Operation status + */ + //@{ - // === Operation status === void - allow_dependent_variable_marking() - { - dependent_variable_marking_safe = true; - } + allow_dependent_variable_marking(); void - prevent_dependent_variable_marking() - { - dependent_variable_marking_safe = false; - } + prevent_dependent_variable_marking(); bool - is_dependent_variable_marking_allowed() const - { - return dependent_variable_marking_safe; - } - - // === Scalar drivers === - - static ScalarType - value(const std::vector &dependent_variables) - { - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - return ADNumberTraits::get_scalar_value( - dependent_variables[0]); - } + is_dependent_variable_marking_allowed() const; - static void + //@} + + /** + * @name Drivers for scalar functions + */ + //@{ + + ScalarType + value(const std::vector &dependent_variables) const; + + void gradient(const std::vector &independent_variables, const std::vector &dependent_variables, - Vector & gradient) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - Assert(gradient.size() == independent_variables.size(), - ExcDimensionMismatch(gradient.size(), - independent_variables.size())); - - // In reverse mode, the gradients are computed from the - // independent variables (i.e. the adjoint) - internal::reverse_mode_dependent_variable_activation( - const_cast(dependent_variables[0])); - const std::size_t n_independent_variables = - independent_variables.size(); - for (unsigned int i = 0; i < n_independent_variables; i++) - gradient[i] = internal::NumberType::value( - ADNumberTraits::get_directional_derivative( - independent_variables[i], - 0 /*This number doesn't really matter*/)); - } + Vector & gradient) const; - static void + void hessian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & hessian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 2, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 2)); - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - Assert(hessian.m() == independent_variables.size(), - ExcDimensionMismatch(hessian.m(), independent_variables.size())); - Assert(hessian.n() == independent_variables.size(), - ExcDimensionMismatch(hessian.n(), independent_variables.size())); - - // In reverse mode, the gradients are computed from the - // independent variables (i.e. the adjoint) - internal::reverse_mode_dependent_variable_activation( - const_cast(dependent_variables[0])); - const std::size_t n_independent_variables = - independent_variables.size(); - for (unsigned int i = 0; i < n_independent_variables; i++) - { - using derivative_type = - typename ADNumberTraits::derivative_type; - const derivative_type gradient_i = - ADNumberTraits::get_directional_derivative( - independent_variables[i], i); - - for (unsigned int j = 0; j <= i; ++j) // Symmetry - { - // Extract higher-order directional derivatives. Depending on - // the AD number type, the result may be another AD number or a - // floating point value. - const ScalarType hessian_ij = - internal::NumberType::value( - ADNumberTraits::get_directional_derivative( - gradient_i, j)); - hessian[i][j] = hessian_ij; - if (i != j) - hessian[j][i] = hessian_ij; // Symmetry - } - } - } - - // === Vector drivers === + FullMatrix & hessian) const; - static void - values(const std::vector &dependent_variables, - Vector & values) - { - Assert(values.size() == dependent_variables.size(), - ExcDimensionMismatch(values.size(), dependent_variables.size())); + //@} - const std::size_t n_dependent_variables = dependent_variables.size(); - for (unsigned int i = 0; i < n_dependent_variables; i++) - values[i] = ADNumberTraits::get_scalar_value( - dependent_variables[i]); - } + /** + * @name Drivers for vector functions + */ + //@{ - static void + void + values(const std::vector &dependent_variables, + Vector & values) const; + + void jacobian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & jacobian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(jacobian.m() == dependent_variables.size(), - ExcDimensionMismatch(jacobian.m(), dependent_variables.size())); - Assert(jacobian.n() == independent_variables.size(), - ExcDimensionMismatch(jacobian.n(), - independent_variables.size())); - - const std::size_t n_independent_variables = - independent_variables.size(); - const std::size_t n_dependent_variables = dependent_variables.size(); - - // In reverse mode, the gradients are computed from the - // independent variables (i.e. the adjoint). - // For a demonstration of why this accumulation process is - // required, see the unit tests - // sacado/basic_01b.cc and sacado/basic_02b.cc - // Here we also take into consideration the derivative type: - // The Sacado number may be of the nested variety, in which - // case the effect of the accumulation process on the - // sensitivities of the nested number need to be accounted for. - using accumulation_type = - typename ADNumberTraits::derivative_type; - std::vector rad_accumulation( - n_independent_variables, - dealii::internal::NumberType::value(0.0)); - - for (unsigned int i = 0; i < n_dependent_variables; i++) - { - internal::reverse_mode_dependent_variable_activation( - const_cast(dependent_variables[i])); - for (unsigned int j = 0; j < n_independent_variables; j++) - { - const accumulation_type df_i_dx_j = - ADNumberTraits::get_directional_derivative( - independent_variables[j], - i /*This number doesn't really matter*/) - - rad_accumulation[j]; - jacobian[i][j] = - internal::NumberType::value(df_i_dx_j); - rad_accumulation[j] += df_i_dx_j; - } - } - } + FullMatrix & jacobian) const; + + //@} private: /** @@ -2036,163 +1264,72 @@ namespace Differentiation ADNumberTraits::type_code == NumberTypes::sacado_dfad_dfad>::type> { - TapelessDrivers() - : dependent_variable_marking_safe(false) - {} + /** + * Constructor + */ + TapelessDrivers(); - // === Configuration === + /** + * @name Configuration + */ + //@{ static void - initialize_global_environment(const unsigned int n_independent_variables) - { - internal::configure_tapeless_mode( - n_independent_variables); - } + initialize_global_environment(const unsigned int n_independent_variables); + + //@} + + /** + * Operation status + */ + //@{ - // === Operation status === void - allow_dependent_variable_marking() - { - dependent_variable_marking_safe = true; - } + allow_dependent_variable_marking(); void - prevent_dependent_variable_marking() - { - dependent_variable_marking_safe = false; - } + prevent_dependent_variable_marking(); bool - is_dependent_variable_marking_allowed() const - { - return dependent_variable_marking_safe; - } - - // === Scalar drivers === - - static ScalarType - value(const std::vector &dependent_variables) - { - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - return ADNumberTraits::get_scalar_value( - dependent_variables[0]); - } + is_dependent_variable_marking_allowed() const; - static void + //@} + + /** + * @name Drivers for scalar functions + */ + //@{ + + ScalarType + value(const std::vector &dependent_variables) const; + + void gradient(const std::vector &independent_variables, const std::vector &dependent_variables, - Vector & gradient) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - Assert(gradient.size() == independent_variables.size(), - ExcDimensionMismatch(gradient.size(), - independent_variables.size())); - - // In forward mode, the gradients are computed from the - // dependent variables - const std::size_t n_independent_variables = - independent_variables.size(); - for (unsigned int i = 0; i < n_independent_variables; i++) - gradient[i] = internal::NumberType::value( - ADNumberTraits::get_directional_derivative( - dependent_variables[0], i)); - } + Vector & gradient) const; - static void + void hessian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & hessian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 2, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 2)); - Assert(dependent_variables.size() == 1, - ExcDimensionMismatch(dependent_variables.size(), 1)); - Assert(hessian.m() == independent_variables.size(), - ExcDimensionMismatch(hessian.m(), independent_variables.size())); - Assert(hessian.n() == independent_variables.size(), - ExcDimensionMismatch(hessian.n(), independent_variables.size())); - - // In forward mode, the gradients are computed from the - // dependent variables - const std::size_t n_independent_variables = - independent_variables.size(); - for (unsigned int i = 0; i < n_independent_variables; i++) - { - using derivative_type = - typename ADNumberTraits::derivative_type; - const derivative_type gradient_i = - ADNumberTraits::get_directional_derivative( - dependent_variables[0], i); - - for (unsigned int j = 0; j <= i; ++j) // Symmetry - { - // Extract higher-order directional derivatives. Depending on - // the AD number type, the result may be another AD number or a - // floating point value. - const ScalarType hessian_ij = - internal::NumberType::value( - ADNumberTraits::get_directional_derivative( - gradient_i, j)); - hessian[i][j] = hessian_ij; - if (i != j) - hessian[j][i] = hessian_ij; // Symmetry - } - } - } - - // === Vector drivers === + FullMatrix & hessian) const; - static void - values(const std::vector &dependent_variables, - Vector & values) - { - Assert(values.size() == dependent_variables.size(), - ExcDimensionMismatch(values.size(), dependent_variables.size())); + //@} - const std::size_t n_dependent_variables = dependent_variables.size(); - for (unsigned int i = 0; i < n_dependent_variables; i++) - values[i] = ADNumberTraits::get_scalar_value( - dependent_variables[i]); - } + /** + * @name Drivers for vector functions + */ + //@{ - static void + void + values(const std::vector &dependent_variables, + Vector & values) const; + + void jacobian(const std::vector &independent_variables, const std::vector &dependent_variables, - FullMatrix & jacobian) - { - Assert( - AD::ADNumberTraits::n_supported_derivative_levels >= 1, - ExcSupportedDerivativeLevels( - AD::ADNumberTraits::n_supported_derivative_levels, - 1)); - Assert(jacobian.m() == dependent_variables.size(), - ExcDimensionMismatch(jacobian.m(), dependent_variables.size())); - Assert(jacobian.n() == independent_variables.size(), - ExcDimensionMismatch(jacobian.n(), - independent_variables.size())); - - const std::size_t n_independent_variables = - independent_variables.size(); - const std::size_t n_dependent_variables = dependent_variables.size(); - - // In forward mode, the gradients are computed from the - // dependent variables - for (unsigned int i = 0; i < n_dependent_variables; i++) - for (unsigned int j = 0; j < n_independent_variables; j++) - jacobian[i][j] = internal::NumberType::value( - ADNumberTraits::get_directional_derivative( - dependent_variables[i], j)); - } + FullMatrix & jacobian) const; + + //@} private: /** diff --git a/include/deal.II/differentiation/ad/ad_helpers.h b/include/deal.II/differentiation/ad/ad_helpers.h index 83e8a899c5..7aad5de7ed 100644 --- a/include/deal.II/differentiation/ad/ad_helpers.h +++ b/include/deal.II/differentiation/ad/ad_helpers.h @@ -328,6 +328,9 @@ namespace Differentiation * image space. * @param[in] clear_registered_tapes A flag that indicates the that * list of @p registered_tapes must be cleared. + * If set to true then the data structure that tracks which + * tapes have been recorded is cleared as well. It is then expected that + * any preexisting tapes be re-recorded. * * @note This also resets the active tape number to an invalid number, and * deactivates the recording mode for taped variables. diff --git a/source/differentiation/ad/CMakeLists.txt b/source/differentiation/ad/CMakeLists.txt index 81e83bb633..9a2f31e4dc 100644 --- a/source/differentiation/ad/CMakeLists.txt +++ b/source/differentiation/ad/CMakeLists.txt @@ -1,6 +1,6 @@ ## --------------------------------------------------------------------- ## -## Copyright (C) 2017 by the deal.II authors +## Copyright (C) 2017 - 2018 by the deal.II authors ## ## This file is part of the deal.II library. ## @@ -16,12 +16,15 @@ INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}) SET(_src + ad_drivers.cc ad_helpers.cc adolc_number_types.cc sacado_number_types.cc ) SET(_inst + ad_drivers.inst1.in + ad_drivers.inst2.in ad_helpers.inst1.in ad_helpers.inst2.in adolc_number_types.inst.in diff --git a/source/differentiation/ad/ad_drivers.cc b/source/differentiation/ad/ad_drivers.cc new file mode 100644 index 0000000000..a0447c8d77 --- /dev/null +++ b/source/differentiation/ad/ad_drivers.cc @@ -0,0 +1,2258 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +#if defined(DEAL_II_WITH_ADOLC) || defined(DEAL_II_TRILINOS_WITH_SACADO) + +# include +# include +# include + +# include +# include +# include +# include +# include + +# include +# include + +# ifdef DEAL_II_WITH_ADOLC + +DEAL_II_DISABLE_EXTRA_DIAGNOSTICS +# include +# include +# include +DEAL_II_ENABLE_EXTRA_DIAGNOSTICS + +# endif // DEAL_II_WITH_ADOLC + +# include + + +DEAL_II_NAMESPACE_OPEN + + +namespace Differentiation +{ + namespace AD + { + // ------------- TapedDrivers ------------- + + + template + bool + TapedDrivers::is_recording() const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + typename Types::tape_index + TapedDrivers::active_tape_index() const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return Numbers::invalid_tape_index; + } + + + template + bool + TapedDrivers::is_registered_tape( + const typename Types::tape_index) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + bool + TapedDrivers::keep_independent_values() const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + void + TapedDrivers::set_tape_buffer_sizes( + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::start_taping( + const typename Types::tape_index, + const bool) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::stop_taping( + const typename Types::tape_index, + const bool) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + std::vector::tape_index> + TapedDrivers::get_registered_tape_indices() + const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return std::vector::tape_index>(); + } + + + template + void + TapedDrivers::activate_tape( + const typename Types::tape_index) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + bool + TapedDrivers::requires_retaping( + const typename Types::tape_index) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + bool + TapedDrivers::last_action_requires_retaping() + const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + void + TapedDrivers::remove_tape( + const typename Types::tape_index) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::reset(const bool) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::print(std::ostream &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::print_tape_stats( + const typename Types::tape_index, + std::ostream &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + ScalarType + TapedDrivers::value( + const typename Types::tape_index, + const std::vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return ScalarType(0.0); + } + + + template + void + TapedDrivers::gradient( + const typename Types::tape_index, + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::hessian( + const typename Types::tape_index, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::values( + const typename Types::tape_index, + const unsigned int, + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::jacobian( + const typename Types::tape_index, + const unsigned int, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + +# ifdef DEAL_II_WITH_ADOLC + + // Specialization for taped ADOL-C auto-differentiable numbers. + + template + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::TapedDrivers() + : active_tape(Numbers::invalid_tape_index) + , keep_values(true) + , is_recording_flag(false) + , use_stored_taped_buffer_sizes(false) + , obufsize(0u) + , lbufsize(0u) + , vbufsize(0u) + , tbufsize(0u) + {} + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::is_recording() + const + { + return is_recording_flag; + } + + + template + typename Types::tape_index + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::active_tape_index() const + { + return active_tape; + } + + + template + bool + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::keep_independent_values() + const + { + return keep_values; + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + is_registered_tape( + const typename Types::tape_index tape_index) const + { + // Sigh... This is a mess :-/ + // The most succinct way to get this piece of information, would be to + // use the getTapeInfos() function, but would come at the expense of + // creating an inactive tape data within ADOL-C's global store. For + // hints as to why this is the way it is, see the getTapeInfos() + // function in + // https://gitlab.com/adol-c/adol-c/blob/master/ADOL-C/src/tape_handling.cpp + // An alternative solution would be to manually access the tape data; + // see the removeTape() function in + // https://gitlab.com/adol-c/adol-c/blob/master/ADOL-C/src/tape_handling.cpp + // with the consideration of the #defines in + // https://gitlab.com/adol-c/adol-c/blob/master/ADOL-C/src/taping_p.h + // as to how this would be performed. + // Doing things "manually" (the second way) without creating the + // additional data object would be a lot more work... + // + // Both of the above solutions would be possible IF ADOL-C exposed + // this data object or a method to access it to the outside world. + // But they don't :-( + // Instead, what we'll have to do is get the statistics for this tape + // and make our own determination as to whether or not this tape exists. + // This effectively executes the first solution, with even more + // overhead! If the tape is in existence, we can assume that it should a + // non-zero number of dependent and independent variables. Those are + // stored in the zeroth and first entries of the statistics vector. + // + // But, oh wait... Surprise! This will trigger an error if the tape + // doesn't exist at all! So lets first check their tape info cache to + // see if the tape REALLY exists (i.e. has been touched, even if nothing + // has been written to it) before trying to access it. It'll only take + // an O(n) search, but at this point who really cares about efficiency? + // + // It has been suggested in + // https://gitlab.com/adol-c/adol-c/issues/11 + // that a simply try-catch block around ::tapestats is the solution that + // we want here. Unfortunately this results in unwanted pollution of + // the terminal, of the form + // ADOL-C error: reading integer tape number 4! + // >>> File or directory not found! <<< + // , every time a query is made about a non-existant tape. + // So either way we have to guard that check with something more + // conervative so that we don't output useless messages for our users. + const std::vector::tape_index> + registered_tape_indices = get_registered_tape_indices(); + const auto it = std::find(registered_tape_indices.begin(), + registered_tape_indices.end(), + tape_index); + if (it == registered_tape_indices.end()) + return false; + + // See https://gitlab.com/adol-c/adol-c/issues/11#note_108341333 + try + { + std::vector counts(STAT_SIZE); + ::tapestats(tape_index, counts.data()); + return true; + } + catch (const ::FatalError &exc) + { + return false; + } + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + set_tape_buffer_sizes( + const typename Types::tape_buffer_sizes in_obufsize, + const typename Types::tape_buffer_sizes in_lbufsize, + const typename Types::tape_buffer_sizes in_vbufsize, + const typename Types::tape_buffer_sizes in_tbufsize) + { + // When valid for the chosen AD number type, these values will be used + // the next time start_recording_operations() is called. + obufsize = in_obufsize; + lbufsize = in_lbufsize; + vbufsize = in_vbufsize; + tbufsize = in_tbufsize; + use_stored_taped_buffer_sizes = true; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + start_taping(const typename Types::tape_index tape_index, + const bool keep_independent_values) + { + if (use_stored_taped_buffer_sizes) + trace_on(tape_index, + keep_independent_values, + obufsize, + lbufsize, + vbufsize, + tbufsize); + else + trace_on(tape_index, keep_independent_values); + + // Set some other flags to their indicated / required values + keep_values = keep_independent_values; + is_recording_flag = true; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + stop_taping( + const typename Types::tape_index active_tape_index, + const bool write_tapes_to_file) + { + if (write_tapes_to_file) + trace_off(active_tape_index); // Slow + else + trace_off(); // Fast(er) + + // Now that we've turned tracing off, we've definitely + // stopped all tape recording. + is_recording_flag = false; + + // If the keep_values flag is set, then we expect the user to use this + // tape immediately after recording it. There is therefore no need to + // invalidate it. However, there is now also no way to double-check + // that the newly recorded tape is indeed the active tape. + if (keep_independent_values() == false) + active_tape = Numbers::invalid_tape_index; + } + + + template + std::vector::tape_index> + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + get_registered_tape_indices() const + { + // We've chosen to use unsigned shorts for the tape + // index type (a safety precaution) so we need to + // perform a conversion betwwen ADOL-C's native tape + // index type and that chosen by us. + std::vector registered_tape_indices_s; + cachedTraceTags(registered_tape_indices_s); + + std::vector::tape_index> + registered_tape_indices(registered_tape_indices_s.size()); + std::copy(registered_tape_indices_s.begin(), + registered_tape_indices_s.end(), + registered_tape_indices.begin()); + return registered_tape_indices; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + activate_tape(const typename Types::tape_index tape_index) + { + active_tape = tape_index; + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + requires_retaping( + const typename Types::tape_index tape_index) const + { + Assert( + is_registered_tape(tape_index) == true, + ExcMessage( + "Cannot ask for the status of a tape that has not yet been recorded and used.")); + + const auto it_status_tape = status.find(tape_index); + + // This tape's status has not been found in the map. This could be + // because a non-existant tape_index has been used as an argument, or + // because the tape exists but has not been used (in a way that + // initiated a status update). For example, on can create a tape in one + // section of code and then query the status of all existing tapes in a + // completely different section of code that knows nothing about the + // first tape. There is no prerequisite that the first tape is ever + // used, and it can therefore not have a status. So, in this case + // there's not much we can do other than to report that the tape does + // not require retaping. + if (it_status_tape == status.end()) + return false; + + const auto status_tape = it_status_tape->second; + + // See ADOL-C manual section 1.7 and comments in last paragraph of + // section 3.1 + Assert(status_tape < 4 && status_tape >= -2, + ExcIndexRange(status_tape, -2, 4)); + return (status_tape < 0); + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + last_action_requires_retaping() const + { + return requires_retaping(active_tape); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + remove_tape(const typename Types::tape_index tape_index) + { + Assert(is_registered_tape(tape_index), + ExcMessage( + "This tape does not exist, and therefore cannot be cleared.")); + removeTape(tape_index, TapeRemovalType::ADOLC_REMOVE_COMPLETELY); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + reset(const bool clear_registered_tapes) + { + active_tape = Numbers::invalid_tape_index; + is_recording_flag = false; + status.clear(); + if (clear_registered_tapes) + { + const std::vector::tape_index> + registered_tape_indices = get_registered_tape_indices(); + for (const auto &tape_index : registered_tape_indices) + remove_tape(tape_index); + } + } + + + template + void + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::print(std::ostream &stream) + const + { + const std::vector::tape_index> + registered_tape_indices = get_registered_tape_indices(); + stream << "Registered tapes and their status: "; + auto it_registered_tape = registered_tape_indices.begin(); + for (unsigned int i = 0; i < registered_tape_indices.size(); + ++i, ++it_registered_tape) + { + const auto tape_index = *it_registered_tape; + const auto it_status_tape = status.find(tape_index); + Assert(it_status_tape != status.end(), + ExcMessage( + "This tape's status has not been found in the map.")); + const auto status_tape = it_status_tape->second; + + stream << tape_index << "->" << status_tape + << (i < (registered_tape_indices.size() - 1) ? "," : ""); + } + stream << "\n"; + + stream << "Keep values? " << keep_independent_values() << "\n"; + stream << "Use stored tape buffer sizes? " + << use_stored_taped_buffer_sizes << "\n"; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + print_tape_stats( + const typename Types::tape_index tape_index, + std::ostream & stream) const + { + // See ADOL-C manual section 2.1 + // and adolc/taping.h + std::vector counts(STAT_SIZE); + ::tapestats(tape_index, counts.data()); + Assert(counts.size() >= 18, ExcInternalError()); + stream + << "Tape index: " << tape_index << "\n" + << "Number of independent variables: " << counts[0] << "\n" + << "Number of dependent variables: " << counts[1] << "\n" + << "Max number of live, active variables: " << counts[2] << "\n" + << "Size of taylor stack (number of overwrites): " << counts[3] << "\n" + << "Operations buffer size: " << counts[4] << "\n" + << "Total number of recorded operations: " << counts[5] << "\n" + << "Operations file written or not: " << counts[6] << "\n" + << "Overall number of locations: " << counts[7] << "\n" + << "Locations file written or not: " << counts[8] << "\n" + << "Overall number of values: " << counts[9] << "\n" + << "Values file written or not: " << counts[10] << "\n" + << "Locations buffer size: " << counts[11] << "\n" + << "Values buffer size: " << counts[12] << "\n" + << "Taylor buffer size: " << counts[13] << "\n" + << "Number of eq_*_prod for sparsity pattern: " << counts[14] << "\n" + << "Use of 'min_op', deferred to 'abs_op' for piecewise calculations: " + << counts[15] << "\n" + << "Number of 'abs' calls that can switch branch: " << counts[16] + << "\n" + << "Number of parameters (doubles) interchangeable without retaping: " + << counts[17] << "\n" + << std::flush; + } + + + template + typename TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::scalar_type + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + value(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables) const + { + Assert(is_registered_tape(active_tape_index), + ExcMessage("This tape has not yet been recorded.")); + + scalar_type value = 0.0; + + status[active_tape_index] = + ::function(active_tape_index, + 1, // Only one dependent variable + independent_variables.size(), + const_cast(independent_variables.data()), + &value); + + return value; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + gradient(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables, + Vector & gradient) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(), + independent_variables.size())); + Assert(is_registered_tape(active_tape_index), + ExcMessage("This tape has not yet been recorded.")); + + // Note: ADOL-C's ::gradient function expects a *double as the last + // parameter. Here we take advantage of the fact that the data in the + // Vector class is aligned (e.g. stored as an Array) + status[active_tape_index] = + ::gradient(active_tape_index, + independent_variables.size(), + const_cast(independent_variables.data()), + gradient.data()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + hessian(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables, + FullMatrix & hessian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 2, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 2)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(), independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(), independent_variables.size())); + Assert(is_registered_tape(active_tape_index), + ExcMessage("This tape has not yet been recorded.")); + + const unsigned int n_independent_variables = independent_variables.size(); + std::vector H(n_independent_variables); + for (unsigned int i = 0; i < n_independent_variables; ++i) + H[i] = &hessian[i][0]; + + status[active_tape_index] = + ::hessian(active_tape_index, + n_independent_variables, + const_cast(independent_variables.data()), + H.data()); + + // ADOL-C builds only the lower-triangular part of the + // symmetric Hessian, so we should copy the relevant + // entries into the upper triangular part. + for (unsigned int i = 0; i < n_independent_variables; i++) + for (unsigned int j = 0; j < i; j++) + hessian[j][i] = hessian[i][j]; // Symmetry + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + values(const typename Types::tape_index active_tape_index, + const unsigned int n_dependent_variables, + const std::vector &independent_variables, + Vector & values) const + { + Assert(values.size() == n_dependent_variables, + ExcDimensionMismatch(values.size(), n_dependent_variables)); + Assert(is_registered_tape(active_tape_index), + ExcMessage("This tape has not yet been recorded.")); + + // Note: ADOL-C's ::function function expects a *double as the last + // parameter. Here we take advantage of the fact that the data in the + // Vector class is aligned (e.g. stored as an Array) + status[active_tape_index] = + ::function(active_tape_index, + n_dependent_variables, + independent_variables.size(), + const_cast(independent_variables.data()), + values.data()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + jacobian(const typename Types::tape_index active_tape_index, + const unsigned int n_dependent_variables, + const std::vector &independent_variables, + FullMatrix & jacobian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(jacobian.m() == n_dependent_variables, + ExcDimensionMismatch(jacobian.m(), n_dependent_variables)); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(), independent_variables.size())); + Assert(is_registered_tape(active_tape_index), + ExcMessage("This tape has not yet been recorded.")); + + std::vector J(n_dependent_variables); + for (unsigned int i = 0; i < n_dependent_variables; ++i) + J[i] = &jacobian[i][0]; + + status[active_tape_index] = ::jacobian(active_tape_index, + n_dependent_variables, + independent_variables.size(), + independent_variables.data(), + J.data()); + } + +# else + + // Specialization for taped ADOL-C auto-differentiable numbers. + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::is_recording() + const + { + AssertThrow(false, ExcRequiresADOLC()); + return false; + } + + + template + typename Types::tape_index + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::active_tape_index() const + { + AssertThrow(false, ExcRequiresADOLC()); + return Numbers::invalid_tape_index; + } + + + template + bool + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::keep_independent_values() + const + { + AssertThrow(false, ExcRequiresADOLC()); + return false; + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + is_registered_tape( + const typename Types::tape_index tape_index) const + { + AssertThrow(false, ExcRequiresADOLC()); + return false; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + set_tape_buffer_sizes( + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes, + const typename Types::tape_buffer_sizes) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + start_taping(const typename Types::tape_index, const bool) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + stop_taping(const typename Types::tape_index, const bool) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + std::vector::tape_index> + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + get_registered_tape_indices() const + { + AssertThrow(false, ExcRequiresADOLC()); + return std::vector::tape_index>(); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + activate_tape(const typename Types::tape_index) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + requires_retaping(const typename Types::tape_index) const + { + AssertThrow(false, ExcRequiresADOLC()); + return false; + } + + + template + bool + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + last_action_requires_retaping() const + { + AssertThrow(false, ExcRequiresADOLC()); + return false; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + remove_tape(const typename Types::tape_index) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::reset(const bool) + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::print(std::ostream &stream) + const + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + print_tape_stats(const typename Types::tape_index, + std::ostream &) const + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::scalar_type + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + value(const typename Types::tape_index, + const std::vector &) const + { + AssertThrow(false, ExcRequiresADOLC()); + return 0.0; + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + gradient(const typename Types::tape_index, + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADOLC()); + } + + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + hessian(const typename Types::tape_index, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + values(const typename Types::tape_index, + const unsigned int, + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADOLC()); + } + + + template + void + TapedDrivers< + ADNumberType, + double, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + jacobian(const typename Types::tape_index, + const unsigned int, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADOLC()); + } + +# endif // DEAL_II_WITH_ADOLC + + + // Specialization for ADOL-C taped numbers. It is expected that the + // scalar return type for this class is a float. + + template + bool + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::is_recording() + const + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + return taped_driver.is_recording(); + } + + + template + typename Types::tape_index + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::active_tape_index() const + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + return taped_driver.active_tape_index(); + } + + + template + bool + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::keep_independent_values() + const + { + return taped_driver.keep_independent_values(); + } + + + template + bool + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + is_registered_tape( + const typename Types::tape_index tape_index) const + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + return taped_driver.is_registered_tape(tape_index); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + set_tape_buffer_sizes( + const typename Types::tape_buffer_sizes obufsize, + const typename Types::tape_buffer_sizes lbufsize, + const typename Types::tape_buffer_sizes vbufsize, + const typename Types::tape_buffer_sizes tbufsize) + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.set_tape_buffer_sizes(obufsize, + lbufsize, + vbufsize, + tbufsize); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + start_taping(const typename Types::tape_index tape_index, + const bool keep_independent_values) + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.start_taping(tape_index, keep_independent_values); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + stop_taping( + const typename Types::tape_index active_tape_index, + const bool write_tapes_to_file) + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.stop_taping(active_tape_index, write_tapes_to_file); + } + + + template + std::vector::tape_index> + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + get_registered_tape_indices() const + { + return taped_driver.get_registered_tape_indices(); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + activate_tape(const typename Types::tape_index tape_index) + { + taped_driver.activate_tape(tape_index); + } + + + template + bool + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + requires_retaping( + const typename Types::tape_index tape_index) const + { + return taped_driver.requires_retaping(tape_index); + } + + + template + bool + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + last_action_requires_retaping() const + { + return taped_driver.last_action_requires_retaping(); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + remove_tape(const typename Types::tape_index tape_index) + { + taped_driver.remove_tape(tape_index); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + reset(const bool clear_registered_tapes) + { + taped_driver.reset(clear_registered_tapes); + } + + + template + void + TapedDrivers::type_code == + NumberTypes::adolc_taped>::type>::print(std::ostream &stream) + const + { + taped_driver.print(stream); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + print_tape_stats( + const typename Types::tape_index tape_index, + std::ostream & stream) const + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.print_tape_stats(tape_index, stream); + } + + + template + typename TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>::scalar_type + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + value(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables) const + { + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + return taped_driver.value(active_tape_index, + vector_float_to_double(independent_variables)); + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + gradient(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables, + Vector & gradient) const + { + Vector gradient_double(gradient.size()); + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.gradient(active_tape_index, + vector_float_to_double(independent_variables), + gradient_double); + gradient = gradient_double; + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + hessian(const typename Types::tape_index active_tape_index, + const std::vector &independent_variables, + FullMatrix & hessian) const + { + FullMatrix hessian_double(hessian.m(), hessian.n()); + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.hessian(active_tape_index, + vector_float_to_double(independent_variables), + hessian_double); + hessian = hessian_double; + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + values(const typename Types::tape_index active_tape_index, + const unsigned int n_dependent_variables, + const std::vector &independent_variables, + Vector & values) const + { + Vector values_double(values.size()); + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.values(active_tape_index, + n_dependent_variables, + vector_float_to_double(independent_variables), + values_double); + values = values_double; + } + + + template + void + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + jacobian(const typename Types::tape_index active_tape_index, + const unsigned int n_dependent_variables, + const std::vector &independent_variables, + FullMatrix & jacobian) const + { + FullMatrix jacobian_double(jacobian.m(), jacobian.n()); + // ADOL-C only supports 'double', not 'float', so we can forward to + // the 'double' implementation of this function + taped_driver.jacobian(active_tape_index, + n_dependent_variables, + vector_float_to_double(independent_variables), + jacobian_double); + jacobian = jacobian_double; + } + + + template + std::vector + TapedDrivers< + ADNumberType, + float, + typename std::enable_if::type_code == + NumberTypes::adolc_taped>::type>:: + vector_float_to_double(const std::vector &in) const + { + std::vector out(in.size()); + std::copy(in.begin(), in.end(), out.begin()); + return out; + } + + + // ------------- TapelessDrivers ------------- + + + template + void + TapelessDrivers::initialize_global_environment( + const unsigned int) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + template + void + TapelessDrivers:: + allow_dependent_variable_marking() + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + template + void + TapelessDrivers:: + prevent_dependent_variable_marking() + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + template + bool + TapelessDrivers:: + is_dependent_variable_marking_allowed() const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return false; + } + + + template + ScalarType + TapelessDrivers::value( + const std::vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return ScalarType(0.0); + } + + + template + void + TapelessDrivers::gradient( + const std::vector &, + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::hessian( + const std::vector &, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::values( + const std::vector &, + Vector &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::jacobian( + const std::vector &, + const std::vector &, + FullMatrix &) const + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + namespace internal + { + /** + * A dummy function to define the active dependent variable when using + * reverse-mode AD. + */ + template + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad)>::type + reverse_mode_dependent_variable_activation(ADNumberType &) + {} + +# ifdef DEAL_II_TRILINOS_WITH_SACADO + + + /** + * Define the active dependent variable when using reverse-mode AD. + * + * If there are multiple dependent variables then it is necessary to + * inform the independent variables, from which the adjoints are computed, + * which dependent variable they are computing the gradients with respect + * to. This function broadcasts this information. + */ + template + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type + reverse_mode_dependent_variable_activation( + ADNumberType &dependent_variable) + { + // Compute all gradients (adjoints) for this + // reverse-mode Sacado dependent variable. + // For reverse-mode Sacado numbers it is necessary to broadcast to + // all independent variables that it is time to compute gradients. + // For one dependent variable one would just need to call + // ADNumberType::Gradcomp(), but since we have a more + // generic implementation for vectors of dependent variables + // (vector mode) we default to the complex case. + ADNumberType::Outvar_Gradcomp(dependent_variable); + } + +# endif + + + /** + * A dummy function to enable vector mode for tapeless + * auto-differentiable numbers. + */ + template + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless)>::type + configure_tapeless_mode(const unsigned int) + {} + + +# ifdef DEAL_II_WITH_ADOLC + + + /** + * Enable vector mode for ADOL-C tapeless numbers. + * + * This function checks to see if its legal to increase the maximum + * number of directional derivatives to be considered during calculations. + * If not then it throws an error. + */ + template + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless>::type + configure_tapeless_mode(const unsigned int n_directional_derivatives) + { +# ifdef DEAL_II_ADOLC_WITH_TAPELESS_REFCOUNTING + // See ADOL-C manual section 7.1 + // + // NOTE: It is critical that this is done for tapeless mode BEFORE + // any adtl::adouble are created. If this is not done, then we see + // this scary warning: + // + // " + // ADOL-C Warning: Tapeless: Setting numDir could change memory + // allocation of derivatives in existing adoubles and may lead to + // erroneous results or memory corruption + // " + // + // So we use this dummy function to configure this setting before + // we create and initialize our class data + const std::size_t n_live_variables = adtl::refcounter::getNumLiveVar(); + if (n_live_variables == 0) + { + adtl::setNumDir(n_directional_derivatives); + } + else + { + // So there are some live active variables floating around. Here we + // check if we ask to increase the number of computable + // directional derivatives. If this really is necessary then it's + // absolutely vital that there exist no live variables before doing + // so. + const std::size_t n_set_directional_derivatives = adtl::getNumDir(); + if (n_directional_derivatives > n_set_directional_derivatives) + AssertThrow( + n_live_variables == 0, + ExcMessage( + "There are currently " + + Utilities::to_string(n_live_variables) + + " live " + "adtl::adouble variables in existence. They currently " + "assume " + + Utilities::to_string(n_set_directional_derivatives) + + " directional derivatives " + "but you wish to increase this to " + + Utilities::to_string(n_directional_derivatives) + + ". \n" + "To safely change (or more specifically in this case, " + "increase) the number of directional derivatives, there " + "must be no tapeless doubles in local/global scope.")); + } +# else + // If ADOL-C is not configured with tapeless number reference counting + // then there is no way that we can guarantee that the following call + // is safe. No comment... :-/ + adtl::setNumDir(n_directional_derivatives); +# endif + } + +# else // DEAL_II_WITH_ADOLC + + template + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless>::type + configure_tapeless_mode(const unsigned int /*n_directional_derivatives*/) + { + AssertThrow(false, ExcRequiresADOLC()); + } + +# endif + + } // namespace internal + + + + // Specialization for auto-differentiable numbers that use + // reverse mode to compute the first derivatives (and, if supported, + // forward mode for the second). + + template + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if< + ADNumberTraits::type_code == NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>::TapelessDrivers() + : dependent_variable_marking_safe(false) + {} + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + initialize_global_environment(const unsigned int n_independent_variables) + { + internal::configure_tapeless_mode(n_independent_variables); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + allow_dependent_variable_marking() + { + dependent_variable_marking_safe = true; + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + prevent_dependent_variable_marking() + { + dependent_variable_marking_safe = false; + } + + + template + bool + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + is_dependent_variable_marking_allowed() const + { + return dependent_variable_marking_safe; + } + + + template + ScalarType + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + value(const std::vector &dependent_variables) const + { + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + return ADNumberTraits::get_scalar_value( + dependent_variables[0]); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + gradient(const std::vector &independent_variables, + const std::vector &dependent_variables, + Vector & gradient) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(), + independent_variables.size())); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint) + internal::reverse_mode_dependent_variable_activation( + const_cast(dependent_variables[0])); + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i = 0; i < n_independent_variables; i++) + gradient[i] = internal::NumberType::value( + ADNumberTraits::get_directional_derivative( + independent_variables[i], 0 /*This number doesn't really matter*/)); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + hessian(const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix & hessian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 2, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 2)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(), independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(), independent_variables.size())); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint) + internal::reverse_mode_dependent_variable_activation( + const_cast(dependent_variables[0])); + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i = 0; i < n_independent_variables; i++) + { + using derivative_type = + typename ADNumberTraits::derivative_type; + const derivative_type gradient_i = + ADNumberTraits::get_directional_derivative( + independent_variables[i], i); + + for (unsigned int j = 0; j <= i; ++j) // Symmetry + { + // Extract higher-order directional derivatives. Depending on + // the AD number type, the result may be another AD number or a + // floating point value. + const ScalarType hessian_ij = + internal::NumberType::value( + ADNumberTraits::get_directional_derivative( + gradient_i, j)); + hessian[i][j] = hessian_ij; + if (i != j) + hessian[j][i] = hessian_ij; // Symmetry + } + } + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + values(const std::vector &dependent_variables, + Vector & values) const + { + Assert(values.size() == dependent_variables.size(), + ExcDimensionMismatch(values.size(), dependent_variables.size())); + + const std::size_t n_dependent_variables = dependent_variables.size(); + for (unsigned int i = 0; i < n_dependent_variables; i++) + values[i] = ADNumberTraits::get_scalar_value( + dependent_variables[i]); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::sacado_rad || + ADNumberTraits::type_code == + NumberTypes::sacado_rad_dfad>::type>:: + jacobian(const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix & jacobian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(jacobian.m() == dependent_variables.size(), + ExcDimensionMismatch(jacobian.m(), dependent_variables.size())); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(), independent_variables.size())); + + const std::size_t n_independent_variables = independent_variables.size(); + const std::size_t n_dependent_variables = dependent_variables.size(); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint). + // For a demonstration of why this accumulation process is + // required, see the unit tests + // sacado/basic_01b.cc and sacado/basic_02b.cc + // Here we also take into consideration the derivative type: + // The Sacado number may be of the nested variety, in which + // case the effect of the accumulation process on the + // sensitivities of the nested number need to be accounted for. + using accumulation_type = + typename ADNumberTraits::derivative_type; + std::vector rad_accumulation( + n_independent_variables, + dealii::internal::NumberType::value(0.0)); + + for (unsigned int i = 0; i < n_dependent_variables; i++) + { + internal::reverse_mode_dependent_variable_activation( + const_cast(dependent_variables[i])); + for (unsigned int j = 0; j < n_independent_variables; j++) + { + const accumulation_type df_i_dx_j = + ADNumberTraits::get_directional_derivative( + independent_variables[j], + i /*This number doesn't really matter*/) - + rad_accumulation[j]; + jacobian[i][j] = + internal::NumberType::value(df_i_dx_j); + rad_accumulation[j] += df_i_dx_j; + } + } + } + + + + // Specialization for auto-differentiable numbers that use + // forward mode to compute the first (and, if supported, second) + // derivatives. + + template + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if< + ADNumberTraits::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>::TapelessDrivers() + : dependent_variable_marking_safe(false) + {} + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + initialize_global_environment(const unsigned int n_independent_variables) + { + internal::configure_tapeless_mode(n_independent_variables); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + allow_dependent_variable_marking() + { + dependent_variable_marking_safe = true; + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + prevent_dependent_variable_marking() + { + dependent_variable_marking_safe = false; + } + + + template + bool + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + is_dependent_variable_marking_allowed() const + { + return dependent_variable_marking_safe; + } + + + template + ScalarType + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + value(const std::vector &dependent_variables) const + { + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + return ADNumberTraits::get_scalar_value( + dependent_variables[0]); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + gradient(const std::vector &independent_variables, + const std::vector &dependent_variables, + Vector & gradient) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(), + independent_variables.size())); + + // In forward mode, the gradients are computed from the + // dependent variables + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i = 0; i < n_independent_variables; i++) + gradient[i] = internal::NumberType::value( + ADNumberTraits::get_directional_derivative( + dependent_variables[0], i)); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + hessian(const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix & hessian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 2, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 2)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(), 1)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(), independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(), independent_variables.size())); + + // In forward mode, the gradients are computed from the + // dependent variables + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i = 0; i < n_independent_variables; i++) + { + using derivative_type = + typename ADNumberTraits::derivative_type; + const derivative_type gradient_i = + ADNumberTraits::get_directional_derivative( + dependent_variables[0], i); + + for (unsigned int j = 0; j <= i; ++j) // Symmetry + { + // Extract higher-order directional derivatives. Depending on + // the AD number type, the result may be another AD number or a + // floating point value. + const ScalarType hessian_ij = + internal::NumberType::value( + ADNumberTraits::get_directional_derivative( + gradient_i, j)); + hessian[i][j] = hessian_ij; + if (i != j) + hessian[j][i] = hessian_ij; // Symmetry + } + } + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + values(const std::vector &dependent_variables, + Vector & values) const + { + Assert(values.size() == dependent_variables.size(), + ExcDimensionMismatch(values.size(), dependent_variables.size())); + + const std::size_t n_dependent_variables = dependent_variables.size(); + for (unsigned int i = 0; i < n_dependent_variables; i++) + values[i] = ADNumberTraits::get_scalar_value( + dependent_variables[i]); + } + + + template + void + TapelessDrivers< + ADNumberType, + ScalarType, + typename std::enable_if::type_code == + NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad || + ADNumberTraits::type_code == + NumberTypes::sacado_dfad_dfad>::type>:: + jacobian(const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix & jacobian) const + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= + 1, + ExcSupportedDerivativeLevels( + AD::ADNumberTraits::n_supported_derivative_levels, + 1)); + Assert(jacobian.m() == dependent_variables.size(), + ExcDimensionMismatch(jacobian.m(), dependent_variables.size())); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(), independent_variables.size())); + + const std::size_t n_independent_variables = independent_variables.size(); + const std::size_t n_dependent_variables = dependent_variables.size(); + + // In forward mode, the gradients are computed from the + // dependent variables + for (unsigned int i = 0; i < n_dependent_variables; i++) + for (unsigned int j = 0; j < n_independent_variables; j++) + jacobian[i][j] = internal::NumberType::value( + ADNumberTraits::get_directional_derivative( + dependent_variables[i], j)); + } + + + } // namespace AD +} // namespace Differentiation + + +/* --- Explicit instantiations --- */ +# ifdef DEAL_II_WITH_ADOLC +# include "ad_drivers.inst1" +# endif +# ifdef DEAL_II_TRILINOS_WITH_SACADO +# include "ad_drivers.inst2" +# endif + + +DEAL_II_NAMESPACE_CLOSE + + +#endif // defined(DEAL_II_WITH_ADOLC) || defined(DEAL_II_TRILINOS_WITH_SACADO) diff --git a/source/differentiation/ad/ad_drivers.inst1.in b/source/differentiation/ad/ad_drivers.inst1.in new file mode 100644 index 0000000000..f0829398fb --- /dev/null +++ b/source/differentiation/ad/ad_drivers.inst1.in @@ -0,0 +1,73 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +// TODO: Include complex types + +for (number : REAL_SCALARS) +{ + namespace Differentiation + \{ + namespace AD + \{ + + // -------------------------- TapedDrivers ---------------------- + + template + struct TapedDrivers::ad_type,number>; + + template + struct TapedDrivers::ad_type,number>; + + // -------------------------- TapelessDrivers ---------------------- + + template + struct TapelessDrivers::ad_type,number>; + + template + struct TapelessDrivers::ad_type,number>; + + \} + \} +} + +// Instantiations for ADHelpers for which the underlying number type is fixed +for () +{ + namespace Differentiation + \{ + namespace AD + \{ + + + + // -------------------------- Types ---------------------- + + template + struct Types::ad_type>; + + template + struct Types::ad_type>; + + // -------------------------- Numbers ---------------------- + + template + struct Numbers::ad_type>; + + template + struct Numbers::ad_type>; + + \} + \} +} \ No newline at end of file diff --git a/source/differentiation/ad/ad_drivers.inst2.in b/source/differentiation/ad/ad_drivers.inst2.in new file mode 100644 index 0000000000..f8aa39e134 --- /dev/null +++ b/source/differentiation/ad/ad_drivers.inst2.in @@ -0,0 +1,83 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +// TODO: Include complex types + +for (number : REAL_SCALARS) +{ + namespace Differentiation + \{ + namespace AD + \{ + + // -------------------------- Types ---------------------- + + template + struct Types::ad_type>; + + template + struct Types::ad_type>; + + template + struct Types::ad_type>; + + template + struct Types::ad_type>; + + // -------------------------- Numbers ---------------------- + + template + struct Numbers::ad_type>; + + template + struct Numbers::ad_type>; + + template + struct Numbers::ad_type>; + + template + struct Numbers::ad_type>; + + // -------------------------- TapedDrivers ---------------------- + + template + struct TapedDrivers::ad_type,number>; + + template + struct TapedDrivers::ad_type,number>; + + template + struct TapedDrivers::ad_type,number>; + + template + struct TapedDrivers::ad_type,number>; + + // -------------------------- TapelessDrivers ---------------------- + + template + struct TapelessDrivers::ad_type,number>; + + template + struct TapelessDrivers::ad_type,number>; + + template + struct TapelessDrivers::ad_type,number>; + + template + struct TapelessDrivers::ad_type,number>; + + \} + \} +} diff --git a/source/differentiation/ad/ad_helpers.cc b/source/differentiation/ad/ad_helpers.cc index 941f6c830f..c1a29cb1f3 100644 --- a/source/differentiation/ad/ad_helpers.cc +++ b/source/differentiation/ad/ad_helpers.cc @@ -377,7 +377,7 @@ namespace Differentiation Assert(is_registered_tape(tape_index), ExcMessage("Tape number not registered")); - TapedDrivers::print_tape_stats(tape_index, stream); + this->taped_driver.print_tape_stats(tape_index, stream); } @@ -829,8 +829,8 @@ namespace Differentiation ExcDimensionMismatch(this->independent_variable_values.size(), this->n_independent_variables())); - return TapedDrivers::value( - this->active_tape_index(), this->independent_variable_values); + return this->taped_driver.value(this->active_tape_index(), + this->independent_variable_values); } else { @@ -840,8 +840,7 @@ namespace Differentiation this->n_independent_variables(), ExcInternalError()); - return TapelessDrivers::value( - this->dependent_variables); + return this->tapeless_driver.value(this->dependent_variables); } } @@ -891,10 +890,9 @@ namespace Differentiation ExcDimensionMismatch(this->independent_variable_values.size(), this->n_independent_variables())); - TapedDrivers::gradient( - this->active_tape_index(), - this->independent_variable_values, - gradient); + this->taped_driver.gradient(this->active_tape_index(), + this->independent_variable_values, + gradient); } else { @@ -904,8 +902,9 @@ namespace Differentiation this->n_independent_variables(), ExcInternalError()); - TapelessDrivers::gradient( - this->independent_variables, this->dependent_variables, gradient); + this->tapeless_driver.gradient(this->independent_variables, + this->dependent_variables, + gradient); } } @@ -961,10 +960,9 @@ namespace Differentiation ExcDimensionMismatch(this->independent_variable_values.size(), this->n_independent_variables())); - TapedDrivers::hessian( - this->active_tape_index(), - this->independent_variable_values, - hessian); + this->taped_driver.hessian(this->active_tape_index(), + this->independent_variable_values, + hessian); } else { @@ -973,8 +971,10 @@ namespace Differentiation Assert(this->independent_variables.size() == this->n_independent_variables(), ExcInternalError()); - TapelessDrivers::hessian( - this->independent_variables, this->dependent_variables, hessian); + + this->tapeless_driver.hessian(this->independent_variables, + this->dependent_variables, + hessian); } } @@ -1048,18 +1048,16 @@ namespace Differentiation ExcDimensionMismatch(this->independent_variable_values.size(), this->n_independent_variables())); - TapedDrivers::values( - this->active_tape_index(), - this->n_dependent_variables(), - this->independent_variable_values, - values); + this->taped_driver.values(this->active_tape_index(), + this->n_dependent_variables(), + this->independent_variable_values, + values); } else { Assert(ADNumberTraits::is_tapeless == true, ExcInternalError()); - TapelessDrivers::values( - this->dependent_variables, values); + this->tapeless_driver.values(this->dependent_variables, values); } } @@ -1106,11 +1104,10 @@ namespace Differentiation ExcDimensionMismatch(this->independent_variable_values.size(), this->n_independent_variables())); - TapedDrivers::jacobian( - this->active_tape_index(), - this->n_dependent_variables(), - this->independent_variable_values, - jacobian); + this->taped_driver.jacobian(this->active_tape_index(), + this->n_dependent_variables(), + this->independent_variable_values, + jacobian); } else { @@ -1119,8 +1116,10 @@ namespace Differentiation Assert(this->independent_variables.size() == this->n_independent_variables(), ExcInternalError()); - TapelessDrivers::jacobian( - this->independent_variables, this->dependent_variables, jacobian); + + this->tapeless_driver.jacobian(this->independent_variables, + this->dependent_variables, + jacobian); } } -- 2.39.5