From: Jean-Paul Pelteret Date: Sat, 18 Nov 2017 11:13:50 +0000 (+0100) Subject: Implement driver classes for internally supported AD libraries. X-Git-Tag: v9.1.0-rc1~885^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=e7bd4998fcdb7442cd1b72d1d51af0d930d87cfd;p=dealii.git Implement driver classes for internally supported AD libraries. --- diff --git a/doc/doxygen/headers/automatic_and_symbolic_differentiation.h b/doc/doxygen/headers/automatic_and_symbolic_differentiation.h index 2e8b6f701c..15ea93c26e 100644 --- a/doc/doxygen/headers/automatic_and_symbolic_differentiation.h +++ b/doc/doxygen/headers/automatic_and_symbolic_differentiation.h @@ -408,6 +408,9 @@ * A summary of the files that implement the interface to the supported auto-differentiable * numbers is as follows: * + * - ad_drivers.h: Provides classes that act as drivers to the interface of internally supported + * automatic differentiation libraries. These are used internally as an intermediary to the + * helper classes that we provide. * - ad_number_types.h: Introduces an enumeration (called a type code) for the * auto-differentiable number combinations that will be supported by the driver classes. * The rationale behind the use of this somewhat restrictive mechanism is discussed below. diff --git a/doc/news/changes/minor/20171118Jean-PaulPelteret b/doc/news/changes/minor/20171118Jean-PaulPelteret new file mode 100644 index 0000000000..49bc0b2ad9 --- /dev/null +++ b/doc/news/changes/minor/20171118Jean-PaulPelteret @@ -0,0 +1,5 @@ +New: Classes that act as an interface to the drivers of internally supported automatic +differentiation libraries (ADOL-C and Sacado) have been implemented. A brief summary +of what these classes are used for is given in the @ref auto_symb_diff module. +
+(Jean-Paul Pelteret, 2017/11/18) diff --git a/include/deal.II/differentiation/ad/ad_drivers.h b/include/deal.II/differentiation/ad/ad_drivers.h new file mode 100644 index 0000000000..32f24d5ec6 --- /dev/null +++ b/include/deal.II/differentiation/ad/ad_drivers.h @@ -0,0 +1,1337 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_differentiation_ad_ad_drivers_h +#define dealii_differentiation_ad_ad_drivers_h + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEAL_II_WITH_ADOLC + +DEAL_II_DISABLE_EXTRA_DIAGNOSTICS +#include +#include +DEAL_II_ENABLE_EXTRA_DIAGNOSTICS + +#endif // DEAL_II_WITH_ADOLC + +#include + + +DEAL_II_NAMESPACE_OPEN + + +namespace Differentiation +{ + namespace AD + { + + /** + * @addtogroup Exceptions + */ + //@{ + + /** + * Exception denoting that a class requires some specialization + * in order to be used. + */ + DeclExceptionMsg (ExcRequiresADNumberSpecialization, + "This function is called in a class that is expected to be specialized " + "for auto-differentiable numbers."); + + /** + * Exception denoting that Adol-C is a required feature. + */ + DeclExceptionMsg (ExcRequiresAdolC, + "This function is only available if deal.II is compiled with ADOL-C."); + + /** + * This exception is raised whenever the an auto-differentiable number does not + * support the required number of derivative operations + * + * The first parameter to the constructor is the number of derivative operations + * that it provides, and the second is the minimum number that are required. + * Both parameters are of type int. + */ + DeclException2 (ExcSupportedDerivativeLevels, + std::size_t, std::size_t, + << "The number of derivative levels that this auto-differentiable number supports is " + << arg1 << ", but it is required that it supports at least " << arg2 << " levels."); + + //@} + + /** + * A driver class for taped auto-differentiable numbers. + * + * It is intended that this class be specialized for the valid + * combinations of auto-differentiable numbers and output scalar + * number types. + * + * @tparam ADNumberType A type corresponding to a supported + * auto-differentiable number. + * @tparam ScalarType A real or complex floating point number. + * @tparam T An arbitrary type resulting from the application of + * the SFINAE idiom to selectively specialize this class. + * + * @author Jean-Paul Pelteret, 2017 + */ + template + struct TapedDrivers + { + // This dummy class definition safely supports compilation + // against tapeless numbers or taped number types that have + // not yet been implemented. + + /** + * @name Taping + */ + //@{ + + /** + * Enable the recording mode for a given tape. + * + * @param[in] tape_index The index of the tape to be written + * @param[in] keep_independent_values Determines whether the numerical + * values of all independent variables are recorded in the + * tape buffer. + */ + static void + enable_taping(const unsigned int &tape_index, + const bool &keep_independent_values); + + /** + * Disable the recording mode for a given tape. + * + * @param[in] active_tape_index The index of the (currently active) tape + * to be finalized and potentially written to file. + * @param[in] write_tapes_to_file A flag that specified whether the tape + * should be written to file or kept in memory. + */ + static void + disable_taping(const unsigned int &active_tape_index, + const bool &write_tapes_to_file); + + /** + * Prints the statistics regarding the usage of the tapes. + * + * @param[in] stream The output stream to which the values are to be written. + * @param[in] tape_index The index of the tape to get the statistics of. + */ + template + static void + print_tape_stats(Stream &stream, + const unsigned int &tape_index); + + //@} + + /** + * @name Drivers for scalar functions (one dependent variable) + */ + //@{ + + /** + * Computes the value of the scalar field. + * + * @param[in] active_tape_index The index of the tape on which the dependent + * function is recorded. + * @param[in] n_independent_variables The number of independent variables + * whose sensitivities were tracked. + * @param[in] independent_variables The scalar values of the independent + * variables whose sensitivities were tracked. + * + * @return The scalar values of the function. + */ + static ScalarType + value (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables); + + /** + * Computes the gradient of the scalar field with respect to all independent + * variables. + * + * @param[in] active_tape_index The index of the tape on which the dependent + * function is recorded. + * @param[in] n_independent_variables The number of independent variables + * whose sensitivities were tracked. + * @param[in] independent_variables The scalar values of the independent + * variables whose sensitivities were tracked. + * @param[out] gradient The scalar values of the dependent function's gradients. + */ + static void + gradient (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + Vector &gradient); + + /** + * Computes the hessian of the scalar field with respect to all independent + * variables. + * + * @param[in] active_tape_index The index of the tape on which the dependent + * function is recorded. + * @param[in] n_independent_variables The number of independent variables + * whose sensitivities were tracked. + * @param[in] independent_variables The scalar values of the independent + * variables whose sensitivities were tracked. + * @param[out] hessian The scalar values of the dependent function's hessian. + */ + static void + hessian (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + FullMatrix &hessian); + + //@} + + /** + * @name Drivers for vector functions (multiple dependent variables) + */ + //@{ + + /** + * Computes the values of the vector field. + * + * @param[in] active_tape_index The index of the tape on which the dependent + * function is recorded. + * @param[in] n_dependent_variables The number of dependent variables. + * @param[in] n_independent_variables The number of independent variables + * whose sensitivities were tracked. + * @param[in] independent_variables The scalar values of the independent + * variables whose sensitivities were tracked. + * @param[out] values The scalar values of the dependent functions. + */ + static void + values (const unsigned int &active_tape_index, + const unsigned int &n_dependent_variables, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + Vector &values); + + /** + * Computes the gradient of the vector field. + * + * @param[in] active_tape_index The index of the tape on which the dependent + * function is recorded. + * @param[in] n_dependent_variables The number of dependent variables. + * @param[in] n_independent_variables The number of independent variables + * whose sensitivities were tracked. + * @param[in] independent_variables The scalar values of the independent + * variables whose sensitivities were tracked. + * @param[out] jacobian The scalar values of the dependent function's jacobian. + */ + static void + jacobian (const unsigned int &active_tape_index, + const unsigned int &n_dependent_variables, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + FullMatrix &jacobian); + + //@} + + }; + + + + /** + * A prototype driver class for tapeless auto-differentiable numbers. + * + * It is intended that this class be specialized for the valid + * combinations of auto-differentiable numbers and output scalar + * number types. + * + * @tparam ADNumberType A type corresponding to a supported + * auto-differentiable number. + * @tparam ScalarType A real or complex floating point number. + * @tparam T An arbitrary type resulting from the application of + * the SFINAE idiom to selectively specialize this class. + * + * @author Jean-Paul Pelteret, 2017 + */ + template + struct TapelessDrivers + { + // This dummy class definition safely supports compilation + // against taped numbers or tapeless number types that have + // not yet been implemented. + + /** + * @name Configuration + */ + //@{ + + /** + * In the event that the tapeless mode requires a priori knowledge + * of how many directional derivatives might need to be computed, this function + * informs the auto-differentiable library of what this number is. + * + * @param[in] n_independent_variables The number of independent variables + * that will be used in the entire duration of the + * simulation. + * + * @warning For Adol-C tapeless numbers, the value given to @p n_independent_variables + * should be the maximum number of independent variables that will be + * used in the entire duration of the simulation. This is important in the + * context of, for example, hp-FEM and for multiple constitutive models with + * a different number of fields from which a linearization must be computed. + */ + static void + initialize (const unsigned int &n_independent_variables); + + //@} + + /** + * @name Drivers for scalar functions + */ + //@{ + + /** + * Computes the value of the scalar field. + * + * @param[in] dependent_variables The dependent variables whose values are to + * be extracted. + * + * @return The scalar values of the function. + */ + static ScalarType + value (const std::vector &dependent_variables); + + /** + * Computes the gradient of the scalar field with respect to all independent + * variables. + * + * @param[in] independent_variables The independent variables whose sensitivities + * were tracked. + * @param[in] dependent_variables The (single) dependent variable whose gradients + * are to be extracted. + * @param[out] gradient The scalar values of the dependent function's gradients. + */ + static void + gradient (const std::vector &independent_variables, + const std::vector &dependent_variables, + Vector &gradient); + + /** + * Computes the hessian of the scalar field with respect to all independent + * variables. + * + * @param[in] independent_variables The independent variables whose sensitivities + * were tracked. + * @param[in] dependent_variables The (single) dependent variable whose hessians + * are to be extracted. + * @param[out] hessian The scalar values of the function's hessian. + */ + static void + hessian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &hessian); + + //@} + + /** + * @name Drivers for vector functions + */ + //@{ + + /** + * Computes the values of the vector field. + * + * @param[in] dependent_variables The dependent variables whose hessians + * are to be extracted. + * @param[out] values The scalar values of the dependent functions. + */ + static void + values (const std::vector &dependent_variables, + Vector &values); + + /** + * Computes the gradient of the vector field. + * + * @param[in] independent_variables The independent variables whose sensitivities + * were tracked. + * @param[in] dependent_variables The dependent variables whose jacobian + * are to be extracted. + * @param[out] jacobian The scalar values of the function's jacobian. + */ + static void + jacobian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &jacobian); + + //@} + + }; + + } +} + + + +/* --------------------------- inline and template functions ------------------------- */ + + +#ifndef DOXYGEN + +namespace Differentiation +{ + namespace AD + { + + // ------------- TapedDrivers ------------- + + + template + void + TapedDrivers::enable_taping( + const unsigned int &, + const bool &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::disable_taping( + const unsigned int &, + const bool &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + template + void + TapedDrivers::print_tape_stats( + Stream &stream, + const unsigned int &tape_index) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + ScalarType + TapedDrivers::value ( + const unsigned int &, + const unsigned int &, + const std::vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return ScalarType(0.0); + } + + + template + void + TapedDrivers::gradient ( + const unsigned int &, + const unsigned int &, + const std::vector &, + Vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::hessian ( + const unsigned int &, + const unsigned int &, + const std::vector &, + FullMatrix &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::values ( + const unsigned int &, + const unsigned int &, + const unsigned int &, + const std::vector &, + Vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapedDrivers::jacobian ( + const unsigned int &, + const unsigned int &, + const unsigned int &, + const std::vector &, + FullMatrix &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + // Specialization for taped Adol-C auto-differentiable numbers. + // + // Note: In the case of Adol-C taped numbers, the associated scalar + // type is always expected to be a double. So we need to make a further + // specialization when ScalarType is a float. + template + struct TapedDrivers::type_code == NumberTypes::adolc_taped + >::type> + { + typedef double scalar_type; + + // === Taping === + + static void + enable_taping(const unsigned int &tape_index, + const bool &keep) + { + trace_on(tape_index,keep); + } + + static void + disable_taping(const unsigned int &active_tape_index, + const bool &write_tapes_to_file) + { +#ifdef DEAL_II_WITH_ADOLC + if (write_tapes_to_file) + { + trace_off(active_tape_index); // Slow + std::vector counts (STAT_SIZE); + ::tapestats(active_tape_index, counts.data()); + } + else + trace_off(); // Fast(er) +#else + AssertThrow(false, ExcRequiresAdolC()); +#endif + } + + template + static void + print_tape_stats(Stream &stream, + const unsigned int &tape_index) + { + // See Adol-C manual section 2.1 + // and adolc/taping.h + std::vector counts (STAT_SIZE); + ::tapestats(tape_index, counts.data()); + Assert(counts.size() >= 18, ExcInternalError()); + stream + << "Tape index: " << tape_index << "\n" + << "Number of independent variables: " << counts[0] << "\n" + << "Number of dependent variables: " << counts[1] << "\n" + << "Max number of live, active variables: " << counts[2] << "\n" + << "Size of taylor stack (number of overwrites): " << counts[3] << "\n" + << "Operations buffer size: " << counts[4] << "\n" + << "Total number of recorded operations: " << counts[5] << "\n" + << "Operations file written or not: " << counts[6] << "\n" + << "Overall number of locations: " << counts[7] << "\n" + << "Locations file written or not: " << counts[8] << "\n" + << "Overall number of values: " << counts[9] << "\n" + << "Values file written or not: " << counts[10] << "\n" + << "Locations buffer size: " << counts[11] << "\n" + << "Values buffer size: " << counts[12] << "\n" + << "Taylor buffer size: " << counts[13] << "\n" + << "Number of eq_*_prod for sparsity pattern: " << counts[14] << "\n" + << "Use of 'min_op', deferred to 'abs_op' for piecewise calculations: " << counts[15] << "\n" + << "Number of 'abs' calls that can switch branch: " << counts[16] << "\n" + << "Number of parameters (doubles) interchangable without retaping: " << counts[17] << "\n" + << std::flush; + } + + // === Scalar drivers === + + static scalar_type + value (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables) + { + + scalar_type *f = new scalar_type(); + +#ifdef DEAL_II_WITH_ADOLC + ::function(active_tape_index, + 1, // Only one dependent variable + n_independent_variables, + const_cast(independent_variables.data()), + f); +#else + AssertThrow(false, ExcRequiresAdolC()); +#endif + + const scalar_type value = f[0]; + + // Cleanup :-/ + delete f; + f = nullptr; + + return value; + } + + static void + gradient (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + Vector &gradient) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(),independent_variables.size())); + + scalar_type *g = new scalar_type[n_independent_variables]; + +#ifdef DEAL_II_WITH_ADOLC + ::gradient(active_tape_index, + n_independent_variables, + const_cast(independent_variables.data()), + g); +#else + AssertThrow(false, ExcRequiresAdolC()); +#endif + + for (unsigned int i=0; i &independent_variables, + FullMatrix &hessian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 2, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,2)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(),independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(),independent_variables.size())); + + scalar_type **H = new scalar_type*[n_independent_variables]; + for (unsigned int i=0; i(independent_variables.data()), + H); +#else + AssertThrow(false, ExcRequiresAdolC()); +#endif + + for (unsigned int i=0; i &independent_variables, + Vector &values) + { + Assert(values.size() == n_dependent_variables, + ExcDimensionMismatch(values.size(),n_dependent_variables)); + + scalar_type *f = new scalar_type[n_dependent_variables]; + +#ifdef DEAL_II_WITH_ADOLC + ::function(active_tape_index, + n_dependent_variables, + n_independent_variables, + const_cast(independent_variables.data()), + f); +#else + AssertThrow(false, ExcRequiresAdolC()); +#endif + + for (unsigned int i=0; i &independent_variables, + FullMatrix &jacobian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(jacobian.m() == n_dependent_variables, + ExcDimensionMismatch(jacobian.m(),n_dependent_variables)); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(),independent_variables.size())); + + scalar_type **J = new scalar_type*[n_dependent_variables]; + for (unsigned int i=0; i + struct TapedDrivers::type_code == NumberTypes::adolc_taped + >::type> + { + typedef float scalar_type; + + static std::vector + vector_float_to_double (const std::vector &in) + { + std::vector out (in.size()); + std::copy(in.begin(), in.end(), out.begin()); + return out; + } + + // === Taping === + + static void + enable_taping(const unsigned int &tape_index, + const bool &keep) + { + TapedDrivers::enable_taping(tape_index,keep); + } + + static void + disable_taping(const unsigned int &active_tape_index, + const bool &write_tapes_to_file) + { + TapedDrivers::disable_taping(active_tape_index,write_tapes_to_file); + } + + template + static void + print_tape_stats(Stream &stream, + const unsigned int &tape_index) + { + TapedDrivers::print_tape_stats(stream,tape_index); + } + + // === Scalar drivers === + + static scalar_type + value (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables) + { + + return TapedDrivers::value( + active_tape_index, + n_independent_variables, + vector_float_to_double(independent_variables)); + } + + static void + gradient (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + Vector &gradient) + { + Vector gradient_double (gradient.size()); + TapedDrivers::gradient( + active_tape_index, + n_independent_variables, + vector_float_to_double(independent_variables), + gradient_double); + gradient = gradient_double; + } + + static void + hessian (const unsigned int &active_tape_index, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + FullMatrix &hessian) + { + FullMatrix hessian_double (hessian.m(), hessian.n()); + TapedDrivers::hessian( + active_tape_index, + n_independent_variables, + vector_float_to_double(independent_variables), + hessian_double); + hessian = hessian_double; + } + + // === Vector drivers === + + static void + values (const unsigned int &active_tape_index, + const unsigned int &n_dependent_variables, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + Vector &values) + { + Vector values_double (values.size()); + TapedDrivers::values( + active_tape_index, + n_dependent_variables, + n_independent_variables, + vector_float_to_double(independent_variables), + values_double); + values = values_double; + } + + static void + jacobian (const unsigned int &active_tape_index, + const unsigned int &n_dependent_variables, + const unsigned int &n_independent_variables, + const std::vector &independent_variables, + FullMatrix &jacobian) + { + FullMatrix jacobian_double (jacobian.m(), jacobian.n()); + TapedDrivers::jacobian( + active_tape_index, + n_dependent_variables, + n_independent_variables, + vector_float_to_double(independent_variables), + jacobian_double); + jacobian = jacobian_double; + } + }; + + + // ------------- TapelessDrivers ------------- + + + template + void + TapelessDrivers::initialize ( + const unsigned int &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + ScalarType + TapelessDrivers::value ( + const std::vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + return ScalarType(0.0); + } + + + template + void + TapelessDrivers::gradient ( + const std::vector &, + const std::vector &, + Vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::hessian ( + const std::vector &, + const std::vector &, + FullMatrix &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::values ( + const std::vector &, + Vector &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + template + void + TapelessDrivers::jacobian ( + const std::vector &, + const std::vector &, + FullMatrix &) + { + AssertThrow(false, ExcRequiresADNumberSpecialization()); + } + + + namespace internal + { + + // A dummy function to define the active dependent variable when using + // reverse-mode AD. + template + static typename std::enable_if< + !is_sacado_rad_number::value + >::type + reverse_mode_dependent_variable_activation (ADNumberType &) + { + + } + +#ifdef DEAL_II_WITH_TRILINOS + + + // Define the active dependent variable when using reverse-mode AD. + // + // If there are multiple dependent variables then it is necessary to + // inform the independent variables, from which the adjoints are computed, + // which dependent variable they are computing the gradients with respect + // to. This function broadcasts this information. + template + static typename std::enable_if< + is_sacado_rad_number::value + >::type + reverse_mode_dependent_variable_activation (ADNumberType &dependent_variable) + { + // Compute all gradients (adjoints) for this + // reverse-mode Sacado dependent variable. + // For reverse-mode Sacado numbers it is necessary to broadcast to + // all independent variables that it is time to compute gradients. + // For one dependent variable one would just need to all + // ADNumberType::Gradcomp(), but since we have a more + // generic implementation for vectors of dependent variables + // (vector mode) we default to the complex case. + ADNumberType::Outvar_Gradcomp(dependent_variable); + } + +#endif + + + // A dummy function to enable vector mode for tapeless + // auto-differentiable numbers. + template + static typename std::enable_if< + !(is_adolc_number::value && + is_tapeless_ad_number::value) + >::type + configure_tapeless_mode (const unsigned int) + { + + } + +#ifdef DEAL_II_WITH_ADOLC + + + // Enable vector mode for Adol-C tapeless numbers. + // + // This function checks to see if its legal to increase the maximum + // number of directional derivatives to be considered during calculations. + // If not then it throws an error. + template + static typename std::enable_if< + is_adolc_number::value && + is_tapeless_ad_number::value + >::type + configure_tapeless_mode (const unsigned int n_directional_derivatives) + { + // See Adol-C manual section 7.1 + // + // NOTE: It is critical that this is done for tapeless mode BEFORE + // any adtl::adouble are created. If this is not done, then we see + // this scary warning: + // + // " + // ADOL-C Warning: Tapeless: Setting numDir could change memory + // allocation of derivatives in existing adoubles and may lead to + // erroneous results or memory corruption + // " + // + // So we use this dummy function to configure this setting before + // we create and initialize our class data + const std::size_t n_live_variables = adtl::refcounter::getNumLiveVar(); + if (n_live_variables == 0) + { + adtl::setNumDir(n_directional_derivatives); + } + else + { + // So there are some live active variables floating around. Here we + // check if we ask to increase the number of number of computable + // directional derivatives. If this really is necessary then its + // absolutely vital that there exist no live variables before doing + // so. + const std::size_t n_set_directional_derivatives = adtl::getNumDir(); + if (n_directional_derivatives > n_set_directional_derivatives) + AssertThrow(n_live_variables == 0, + ExcMessage("There are currently " + + Utilities::to_string(n_live_variables) + " live " + "adtl::adouble variables in existence. They currently " + "assume " + + Utilities::to_string(n_set_directional_derivatives) + " directional derivatives " + "but you wish to increase this to " + + Utilities::to_string(n_directional_derivatives) + ". \n" + "To safely change (or more specifically in this case, " + "increase) the number of directional derivatives, there " + "must be no tapeless doubles in local/global scope.")); + } + } + +#endif + + } + + + // Specialization for auto-differentiable numbers that use + // reverse mode to compute the first derivatives (and, if supported, + // forward mode for the second). + template + struct TapelessDrivers::type_code == NumberTypes::sacado_rad || + ADNumberTraits::type_code == NumberTypes::sacado_rad_dfad + >::type> + { + + // === Configuration === + + static void + initialize (const unsigned int &n_independent_variables) + { + internal::configure_tapeless_mode(n_independent_variables); + } + + // === Scalar drivers === + + static ScalarType + value (const std::vector &dependent_variables) + { + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + return ADNumberTraits::get_scalar_value(dependent_variables[0]); + } + + static void + gradient (const std::vector &independent_variables, + const std::vector &dependent_variables, + Vector &gradient) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(),independent_variables.size())); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint) + internal::reverse_mode_dependent_variable_activation(const_cast(dependent_variables[0])); + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i=0; i::value( + ADNumberTraits::get_directional_derivative( + independent_variables[i], + 0 /*This number doesn't really matter*/)); + } + + static void + hessian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &hessian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 2, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,2)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(),independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(),independent_variables.size())); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint) + internal::reverse_mode_dependent_variable_activation(const_cast(dependent_variables[0])); + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i=0; i::derivative_type derivative_type; + const derivative_type gradient_i + = ADNumberTraits::get_directional_derivative(independent_variables[i], i); + + for (unsigned int j=0; j <= i; ++j) // Symmetry + { + // Extract higher-order directional derivatives. Depending on the AD number type, + // the result may be another AD number or a floating point value. + const ScalarType hessian_ij + = internal::NumberType::value( + ADNumberTraits::get_directional_derivative(gradient_i, j)); + hessian[i][j] = hessian_ij; + if (i != j) + hessian[j][i] = hessian_ij; // Symmetry + } + } + } + + // === Vector drivers === + + static void + values (const std::vector &dependent_variables, + Vector &values) + { + Assert(values.size() == dependent_variables.size(), + ExcDimensionMismatch(values.size(),dependent_variables.size())); + + const std::size_t n_dependent_variables = dependent_variables.size(); + for (unsigned int i=0; i::get_scalar_value(dependent_variables[i]); + } + + static void + jacobian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &jacobian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(jacobian.m() == dependent_variables.size(), + ExcDimensionMismatch(jacobian.m(),dependent_variables.size())); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(),independent_variables.size())); + + const std::size_t n_independent_variables = independent_variables.size(); + const std::size_t n_dependent_variables = dependent_variables.size(); + + // In reverse mode, the gradients are computed from the + // independent variables (i.e. the adjoint). + // For a demonstration of why this accumulation process is + // required, see the unit tests + // sacado/basic_01b.cc and sacado/basic_02b.cc + // Here we also take into consideration the derivative type: + // The Sacado number may be of the nested variety, in which + // case the effect of the accumulation process on the + // sensitivities of the nested number need to be accounted for. + typedef typename ADNumberTraits::derivative_type AccumulationType; + std::vector rad_accumulation ( + n_independent_variables, + dealii::internal::NumberType::value(0.0)); + + for (unsigned int i=0; i(dependent_variables[i])); + for (unsigned int j=0; j::get_directional_derivative( + independent_variables[j], i /*This number doesn't really matter*/) + - rad_accumulation[j]; + jacobian[i][j] = internal::NumberType::value(df_i_dx_j); + rad_accumulation[j] += df_i_dx_j; + } + } + } + + }; + + + // Specialization for auto-differentiable numbers that use + // forward mode to compute the first (and, if supported, second) derivatives. + template + struct TapelessDrivers::type_code == NumberTypes::adolc_tapeless || + ADNumberTraits::type_code == NumberTypes::sacado_dfad || + ADNumberTraits::type_code == NumberTypes::sacado_dfad_dfad + >::type> + { + + // === Configuration === + + static void + initialize (const unsigned int &n_independent_variables) + { + internal::configure_tapeless_mode(n_independent_variables); + } + + // === Scalar drivers === + + static ScalarType + value (const std::vector &dependent_variables) + { + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + return ADNumberTraits::get_scalar_value(dependent_variables[0]); + } + + static void + gradient (const std::vector &independent_variables, + const std::vector &dependent_variables, + Vector &gradient) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + Assert(gradient.size() == independent_variables.size(), + ExcDimensionMismatch(gradient.size(),independent_variables.size())); + + // In forward mode, the gradients are computed from the + // dependent variables + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i=0; i::value( + ADNumberTraits::get_directional_derivative( + dependent_variables[0], i)); + } + + static void + hessian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &hessian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 2, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,2)); + Assert(dependent_variables.size() == 1, + ExcDimensionMismatch(dependent_variables.size(),1)); + Assert(hessian.m() == independent_variables.size(), + ExcDimensionMismatch(hessian.m(),independent_variables.size())); + Assert(hessian.n() == independent_variables.size(), + ExcDimensionMismatch(hessian.n(),independent_variables.size())); + + // In forward mode, the gradients are computed from the + // dependent variables + const std::size_t n_independent_variables = independent_variables.size(); + for (unsigned int i=0; i::derivative_type derivative_type; + const derivative_type gradient_i + = ADNumberTraits::get_directional_derivative(dependent_variables[0], i); + + for (unsigned int j=0; j <= i; ++j) // Symmetry + { + // Extract higher-order directional derivatives. Depending on the AD number type, + // the result may be another AD number or a floating point value. + const ScalarType hessian_ij + = internal::NumberType::value( + ADNumberTraits::get_directional_derivative(gradient_i, j)); + hessian[i][j] = hessian_ij; + if (i != j) + hessian[j][i] = hessian_ij; // Symmetry + } + } + } + + // === Vector drivers === + + static void + values (const std::vector &dependent_variables, + Vector &values) + { + Assert(values.size() == dependent_variables.size(), + ExcDimensionMismatch(values.size(),dependent_variables.size())); + + const std::size_t n_dependent_variables = dependent_variables.size(); + for (unsigned int i=0; i::get_scalar_value(dependent_variables[i]); + } + + static void + jacobian (const std::vector &independent_variables, + const std::vector &dependent_variables, + FullMatrix &jacobian) + { + Assert(AD::ADNumberTraits::n_supported_derivative_levels >= 1, + ExcSupportedDerivativeLevels(AD::ADNumberTraits::n_supported_derivative_levels,1)); + Assert(jacobian.m() == dependent_variables.size(), + ExcDimensionMismatch(jacobian.m(),dependent_variables.size())); + Assert(jacobian.n() == independent_variables.size(), + ExcDimensionMismatch(jacobian.n(),independent_variables.size())); + + const std::size_t n_independent_variables = independent_variables.size(); + const std::size_t n_dependent_variables = dependent_variables.size(); + + // In forward mode, the gradients are computed from the + // dependent variables + for (unsigned int i=0; i::value( + ADNumberTraits::get_directional_derivative(dependent_variables[i], j)); + } + + }; + + + } // namespace AD +} // namespace Differentiation + + +#endif // DOXYGEN + +DEAL_II_NAMESPACE_CLOSE + + +#endif