From e91a69a83a096cb5fa271bd9abfd75a0b19d5e1a Mon Sep 17 00:00:00 2001 From: Joshua Christopher Date: Sat, 28 Apr 2018 14:41:14 -0600 Subject: [PATCH] Initial commit of parallel in time --- parallel_in_time/CMakeLists.txt | 101 +++++ parallel_in_time/Readme.md | 225 ++++++++++ parallel_in_time/doc/author | 1 + parallel_in_time/doc/builds-on | 1 + parallel_in_time/doc/dependencies | 1 + parallel_in_time/doc/entry-name | 1 + .../doc/results/strongscaling.png | Bin 0 -> 10079 bytes parallel_in_time/doc/tooltip | 1 + parallel_in_time/src/BraidFuncs.cc | 181 ++++++++ parallel_in_time/src/BraidFuncs.hh | 220 ++++++++++ parallel_in_time/src/HeatEquation.hh | 179 ++++++++ parallel_in_time/src/HeatEquationImplem.hh | 391 ++++++++++++++++++ parallel_in_time/src/Utilities.cc | 91 ++++ parallel_in_time/src/Utilities.hh | 10 + parallel_in_time/src/parallel_in_time.cc | 121 ++++++ parallel_in_time/test/CMakeLists.txt | 2 + parallel_in_time/test/test_braid.cc | 79 ++++ parallel_in_time/test/test_braid.output | 99 +++++ 18 files changed, 1704 insertions(+) create mode 100644 parallel_in_time/CMakeLists.txt create mode 100644 parallel_in_time/Readme.md create mode 100644 parallel_in_time/doc/author create mode 100644 parallel_in_time/doc/builds-on create mode 100644 parallel_in_time/doc/dependencies create mode 100644 parallel_in_time/doc/entry-name create mode 100644 parallel_in_time/doc/results/strongscaling.png create mode 100644 parallel_in_time/doc/tooltip create mode 100644 parallel_in_time/src/BraidFuncs.cc create mode 100644 parallel_in_time/src/BraidFuncs.hh create mode 100644 parallel_in_time/src/HeatEquation.hh create mode 100644 parallel_in_time/src/HeatEquationImplem.hh create mode 100644 parallel_in_time/src/Utilities.cc create mode 100644 parallel_in_time/src/Utilities.hh create mode 100644 parallel_in_time/src/parallel_in_time.cc create mode 100644 parallel_in_time/test/CMakeLists.txt create mode 100644 parallel_in_time/test/test_braid.cc create mode 100644 parallel_in_time/test/test_braid.output diff --git a/parallel_in_time/CMakeLists.txt b/parallel_in_time/CMakeLists.txt new file mode 100644 index 0000000..5474895 --- /dev/null +++ b/parallel_in_time/CMakeLists.txt @@ -0,0 +1,101 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) + +set (CMAKE_CXX_STANDARD 14) + +# Set the name of the project and target: +SET(TARGET "parallel_in_time") + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -pedantic") + + +set(PROJECT_PATH "${CMAKE_CURRENT_SOURCE_DIR}") +set(SRC_PATH "${PROJECT_PATH}/src") +set(TEST_SRC_PATH "${PROJECT_PATH}/test") +# set(BIN_PATH "${PROJECT_BINARY_DIR}/bin") +set(BIN_NAME "${TARGET}") +set(TEST_PATH "${PROJECT_BINARY_DIR}/test") +set(TEST_NAME "test_${TARGET}") + +SET(TARGET_SRC + ${SRC_PATH}/${TARGET}.cc + ${SRC_PATH}/BraidFuncs.cc + ${SRC_PATH}/Utilities.cc + ) + +SET(TEST_SRC + ${TEST_SRC_PATH}/test_braid.cc + ) + +OPTION(DO_MFG "Do the manufactured solution run" OFF) + +# Tell CMake to find MPI +find_package(MPI REQUIRED) +include_directories(${MPI_INCLUDE_PATH}) +add_definitions(-DUSE_MPI) + +if(DO_MFG) + # If we are doing the manufactured solution, tell + # the C++ processor so it can enable it + add_definitions(-DDO_MFG) +endif(DO_MFG) + +FIND_PACKAGE(deal.II 9.0.0 QUIET + HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR} + ) +IF(NOT ${deal.II_FOUND}) + MESSAGE(FATAL_ERROR "\n" + "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n" + "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n" + "or set an environment variable \"DEAL_II_DIR\" that contains this path." + ) +ENDIF() +# include_directories(${DEAL_II_INCLUDE_DIRS}) + + + +# Find braid details +if (NOT "$ENV{BRAID_DIR}" STREQUAL "") + set(BRAID_DIR "$ENV{BRAID_DIR}" CACHE INTERNAL "Copied BRAID_DIR from environment variable") +endif() +# Include the braid paths and libraries +include_directories(${BRAID_DIR}) + + +############################## +# Finally start building stuff + +# First, build the "library" that consists of all the source files +# set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LIB_PATH}) +# set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LIB_PATH}) +# add_library(${LIB_NAME} ${SRC}) + + +# Next build the main function +# set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${BIN_PATH}) +# add_executable(${BIN_NAME} ${MAIN_SRC}) +# target_link_libraries(${MAIN_NAME} ${LIB_NAME}) # link my library +# target_link_libraries(${MAIN_NAME} ${BRAID_DIR}libbraid.a) # link braid +# DEAL_II_SETUP_TARGET(${MAIN_NAME}) # Link dealii +# if(USE_MPI) +# set_target_properties(${MAIN_NAME} PROPERTIES +# LINK_FLAGS "${MPI_LINK_FLAGS}") # Link MPI +# set_target_properties(${MAIN_NAME} PROPERTIES +# COMPILE_FLAGS "${MPI_COMPILE_FLAGS}") # Use MPI compile flags +# endif(USE_MPI) + +DEAL_II_INITIALIZE_CACHED_VARIABLES() +PROJECT(${TARGET}) +DEAL_II_INVOKE_AUTOPILOT() +target_link_libraries(${TARGET} ${BRAID_DIR}libbraid.a) # link braid +set_target_properties(${TARGET} PROPERTIES + LINK_FLAGS "${MPI_LINK_FLAGS}") # Link MPI +set_target_properties(${MAIN_NAME} PROPERTIES + COMPILE_FLAGS "${MPI_COMPILE_FLAGS}") # Use MPI compile flags + +# Lastly build the tests +enable_testing() +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${TEST_PATH}) +add_subdirectory(test) + + + diff --git a/parallel_in_time/Readme.md b/parallel_in_time/Readme.md new file mode 100644 index 0000000..c28e5c4 --- /dev/null +++ b/parallel_in_time/Readme.md @@ -0,0 +1,225 @@ +# Parallel in Time deal.ii + +## Overview + +Over the last few years, the clock speed per processor core has stagnated. +This stagnation has lead to the design of larger core counts in high performance computing machines. +As a result of these developments, increased concurrency in numerical algorithms must be developed in order to take advantage of this architecture style. +Perhaps the largest bottleneck in concurrency for time-dependent simulations is traditional time integration methods. +Traditional time integration methods solve for the time domain sequentially, and as the spatial grid is refined a proportionally larger number of time steps must be taken to maintain accuracy and stability constraints. +While solving the time domain sequentially with a traditional time integration method is an optimal algorithm of order $\mathcal{O}(n)$, the $n$ time steps are not solved concurrently. + +The goal of this project is to make use of the XBraid library from Lawrence Livermore National Laboratory to solve the time domain in parallel using multigrid-reduction-in-time techniques. +The XBraid library is implemented in C and aims to be a non-intrusive method to implement parallel time marching methods into existing codes. + +## Implementation + +### XBraid introduction + +In order to use the XBraid library, several data structures and functions must be implemented and provided to the XBraid solver struct. +The two required data structures are the app and vector structures. +In general, the app struct contains the time independent data and the vector struct contains the time dependent data. +For this initial example, the time independent data includes the mesh which is fixed for all time steps, and the time dependent data is the solution state vector. +The functions tell XBraid how to perform operations on the data type used by your solver, in this case deal.ii uses the Vector data type. +These operations include how to initialize the data at a given time, how to sum the data, and how to pack and unpack linear buffers for transmission to other processors via MPI. +The XBraid documentation should be read for a full list of functions that must be implemented and the details of what the function should do. +The typical format is the function is called with arguments of the app struct, one or more vector structs, and a status struct that contains information on the current status of the XBraid simulation (the current multigrid iteration, the level the function is being called from, the time and timestep number, etc.). + +Perhaps the most important function is the step function. +This function tells XBraid how to advance the solution forward in time from the initial to the final times given in the status struct. +This method uses a traditional time integration method such as the fourth order explicit Runge Kutta method. + +### deal.ii details + +The solver used in this example is based off the heat equation solver from the [Step-26 Tutorial](https://dealii.org/developer/doxygen/deal.II/step_26.html . +The HeatEquation class becomes member data to XBraid's app struct, and XBraid's vector struct becomes a wrapper for deal.ii's Vector data type. +The HeatEquation class cannot simply be used as is though as it contains both time dependent and time independent member data. +In order to simplify the problem the adaptive mesh refinement is removed. +Theoretically XBraid is capable of working with adaptive mesh refinement and in fact contains support for time refinement (which is also not used for simplicity). +All adaptive mesh refinement functionality is removed from the sovler. +The time-dependent solution state vectors are also removed from the HeatEquation member data. +That time-dependent data will be provided at each timestep by XBraid via the vector struct. + +### Math details + +In the default mode, this code solves the heat equation with, + +@f{align} +\frac{\partial u}{\partial t} - \Delta u = f(\boldsymbol{x},t), \qquad \forall\boldsymbol{x}\in\Omega,t\in\left( 0,T \right), +@f} +with initial conditions, +@f{align} +u(\boldsymbol{x},0) = u_0(\boldsymbol{x}) = 0, \qquad \forall \boldsymbol{x}\in\Omega, +@f} +and dirichlet boundary conditions, +@f{align} +u(\boldsymbol{x},t) = g(\boldsymbol{x},t) = 0, \qquad \forall \boldsymbol{x}\in\partial\Omega,t\in\left( 0,T \right) +@f} +and forcing function, +@f{align} + f(\mathbf x, t) = + \left\{ + \begin{array}{ll} + \chi_1(t) & \text{if \(x>0.5\) and \(y>-0.5\)} \\ + \chi_2(t) & \text{if \(x>-0.5\) and \(y>0.5\)} + \end{array} + \right. +@f} +with, +@f{align} + \chi_1(t) = \exp\left(-0.5\frac{(t-0.125)^2}{0.005}\right) +@f} +and, +@f{align} + \chi_2(t) = \exp\left(-0.5\frac{(t-0.375)^2}{0.005}\right) +@f} +for some time $t$. +The forcing function is a Gaussian pulse in time that is centered around 0.125 time units for $\chi_1$ and 0.375 time units for $\chi_2$. +A gaussian function was chosen because it is a continuous function in time and so the solution state can be compared bitwise with the serial-in-time solution. + +The method of manufactured solutions is used to test the correctness of the implementation. +In the method of manufactured solutions, we create a solution $u_h$ to the heat equation, then compute the boundary conditions, initial conditions, and forcing functions required to generate that solution. +The created solution used is, +@f{align} +u_h = \exp\left(-4\pi^2t\right)\cos(2 \pi x)\cos(2 \pi y), \qquad \forall \boldsymbol{x} \in \Omega \cup \partial\Omega +@f} +with derivatives, +@f{align} +\frac{\partial u}{\partial t} &= -4 \pi^2 e^{-4\pi^2t}\cos(2 \pi x)\cos(2 \pi y), \\ +-\Delta u &= 8 \pi^2 \exp\left(-4\pi^2t\right)\cos(2 \pi x)\cos(2 \pi y) \\ +\frac{\partial u}{\partial x} &= -2 \pi \exp\left(-4\pi^2t\right)\sin(2\pi x)\cos(2\pi y) \\ +\frac{\partial u}{\partial x} &= -2 \pi \exp\left(-4\pi^2t\right)\cos(2\pi x)\sin(2\pi y) +@f} +and therefore we specify the forcing term, initial conditions, and boundary conditions of the governing equations as, +@f{align} +f(\boldsymbol{x},t) &= 4 \pi^2 \exp\left(-4\pi^2t\right)\cos(2 \pi x)\cos(2 \pi y), &\forall\boldsymbol{x}\in\Omega,t\in\left( 0,T \right), \\ +u_0(\boldsymbol{x}) &= \cos(2 \pi x)\cos(2 \pi y), &\forall \boldsymbol{x}\in\Omega, \\ +g(\boldsymbol{x},t) &= \exp\left(-4\pi^2t\right)\cos(2 \pi x)\cos(2 \pi y), &\forall \boldsymbol{x} \in \partial\Omega. +@f} + +The manufactured solution is run on progressively more refined grids and the solution generated by the finite element method is compared to the exact solution $u_h$. +The convergence rate of the error is calculated with +@f{align} + \Delta \epsilon_n = \frac{\ln{\epsilon_{n-1}/\epsilon_{n}}}{\ln{r_n}} +@f} +where $\Delta \epsilon_n$ is the convergence rate of error $\epsilon$ between a mesh $n$ and +coarser mesh $n-1$ that have a refinement ratio of $r_n$. +Shown in Table 1 is the convergence rate as the mesh is refined. +The $\Delta t$ is reduced by a factor of 2 for every global refinement of the mesh. + +@f{align} +\begin{tabular}{|c|c|c|c|c|c|} \hline + cycle & \# cells & \# dofs & L^2-error & H^1-error & L^\infty-error \\ \hline + 125 & 48 & 65 & 6.036e-03 & 6.970e-02 & 7.557e-03\\ \hline + 250 & 192 & 225 & 1.735e-03 & 3.414e-02 & 2.721e-03 \\ \hline + 500 & 768 & 833 & 4.513e-04 & 1.690e-02 & 7.410e-04 \\ \hline + 1000 & 3072 & 3201 & 1.140e-04 & 8.426e-03 & 1.877e-04 \\ \hline + 2000 & 12288 & 12545 & 2.859e-05 & 4.209e-03 & 4.715e-05 \\ \hline +\end{tabular} +@f} + +@f{align} +\begin{tabular}{|c|c|c|c|c|c|} \hline + cycle & \# cells & \# dofs & Slope L^2 & Slope H^1 & Slope \textrm{L}^\infty \\ \hline + 125 & 48 & 65 & --- & --- & --- \\ \hline + 250 & 192 & 225 & 1.798 & 1.030 & 1.474 \\ \hline + 500 & 768 & 833 & 1.943 & 1.014 & 1.877 \\ \hline + 1000 & 3072 & 3201 & 1.985 & 1.004 & 1.981 \\ \hline + 2000 & 12288 & 12545 & 1.995 & 1.001 & 1.993 \\ \hline +\end{tabular} +@f} + +As can be seen, the slope converges at a second order rate for the $\textrm{L}^2$ and $\textrm{L}^\infty$ norms and at a first order rate for the $\textrm{H}^1$ norms. +This is expected behavior as second order finite elements are used. + +### Code Organization + +#### The src directory + +The entry point of the code is in parallel_in_time.cc and sets up XBraid for a simulation. The XBraid setup involves initializing the app struct and configuring XBraid for the desired number of timesteps, number of iterations, etc. +The functions implemented for XBraid's use are declared in BraidFuncs.hh and defined in BraidFuncs.cc. The HeatEquation class and all deal.ii functionality is declared in HeatEquation.hh and defiend in HeatEquationImplem.hh. Since HeatEquation is a class template, its definition file HeatEquationImplem.hh is included at the bottom of HeatEquation.hh. Lastly various helper functions and variables such as the current processor id and the output stream are declared in Utilities.hh and defined in Utilities.cc. + +#### The test directory + +This directory contains tests to be built and run with CMake. +These tests verify the correct implementation of the various functions. + +#### The doc directory + +This directory is for storing further documentation of the code. +Not much is in this directory right now as most of the documentation is in this file (Readme.md) or in comments in the source code files. + +## Compiling + +To compile, you need deal.ii and XBraid to be installed with development headers somwehere on your system. +Some implementation of MPI such as OpenMPI with development headers must also be installed. +The source code for deal.ii is available at [https://dealii.org/](https://dealii.org/) and the source code for XBraid is available at [https://computation.llnl.gov/projects/parallel-time-integration-multigrid](https://computation.llnl.gov/projects/parallel-time-integration-multigrid). +See the documentation of each package for compilation and installation instructions. + +Depending on where they are installed, parallel_in_time may need help finding these libraries. +To find deal.ii, parallel_in_time first looks in typical deal.ii install directories followed by one directory up(../), two directories up (../../), and lastly in the environment variable DEAL_II_DIR. +In contrast, XBraid currently does not have any default locations to look for and so the environment variable BRAID_DIR must be specified. +For MPI, parallel_in_time looks in standard installation folders only, for that reason I recommend you install MPI with your package manager. + +A compile process of the parallel in time code may look like, + + mkdir build + cd build + BRAID_DIR=/path/to/braid/ cmake ../ + make + +There is currently no option to install parallel_in_time anywhere. +The binaries are generated in the bin folder, and tests are placed into the test folder. +Options that can be passed to CMake for parallel_in_time include: + +* CMAKE_BUILD_TYPE=Debug/Release +* DO_MFG=ON/OFF +* USE_MPI=ON/OFF + +The build type specifies whether to compile with debugging symbols, assertions, and optimizations or not. + +The option for manufactured solutions (DO_MFG) switches from solving the "standard" heat equation to solving a known solution heat equation so that the correctness of the code can be tested. + +Lastly the MPI option is only used to specify where to write output information when using the pout() function from the Utilities.hh file. +If USE_MPI is set to ON, then every processor writes to its own file called pout.<#> where <#> is the processor number. +If USE_MPI is set to OFF, then every processor writes to stdout. + +## Running + +Once parallel_in_time has been compiled, the program can be run by calling the binary generated in ./build/bin/. +The test can be run by calling ctest from inside the build/ directory. +Unless the output path has been changed in the source code (currently hardcoded), then the output files will be placed into the folder the command was called from. + +There are no argument parameters for any of the binaries or tests. + +## Results + +To test the performance, a strong scaling study is run. +The spatial grid is fixed at 3201 degrees of freedom, and the spatial grid consists of 25,000 uniform timesteps. +No spatial parallelization is used and the grid is fixed for all timesteps. +The parallel in time solution is solved using XBraid's multigrid reduction in time algorithm on 1, 2, 4, 16, 32, and 64 processors. +The serial in time solution is run on a single processor using traditional sequential time stepping. +The results are shown in the figure below. +Running the multigrid algorithm on a single processor takes about an order of magnitude longer to run on a single processor than the serial algorihtm on a single processor. +At 16 processors the multigrid algorithm the wall clock time is approximately the same for the serial algorithm as for the multigrid algorithm, and for 32 and 64 processors in time the wall clock is faster by about a factor of 2 for 64 processors. + +![Strong scaling results](./doc/results/strongscaling.png) + +## Conclusions + +Using 64 times as many proceossors results in a speedup factor of approximately 2. +This is a fairly heavy computational cost for a slight speedup. +For comparison, in the reference paper Falgout et. al. they achieved a speedup of approximately 10 times when using 256 times as many processors as their serial case when solving the heat equation in two dimensions. +A similar increase in processors may be needed to increase the speedup factor of this code from 2 to 10. +The choice of whether to use serial in time or parallel in time largely rests in the size of problem being solved and the amount of computing resources available. Increasing the required number of timesteps will benefit the parallel in time algorithm provided enough extra resources are available. + +## Future Work + +There are many routes available for future work. +First there are many optimizations that could be made to speed up the existing code base such as exploring the different multigrid cycles and finding the optimal load balancing. +Ultimately those optimizations will probably only result in marginal gains per the Falgout paper. +Allowing XBraid to prolong and restrict the spatial grid may be one of the more promising avenues of improvement. + +Future work that is of interest to the authors of XBraid is the development of adaptive mesh refinement (AMR) in a parallel in time algorithm. +A particular challenges with parallel-in-time AMR is the time subcylcing that occurs in spatial subdomains with sequential time stepping. +This code base does not use spatial subdomains with AMR and so could provide an easier understanding of the coarsening and refining of the time domain. diff --git a/parallel_in_time/doc/author b/parallel_in_time/doc/author new file mode 100644 index 0000000..278fde0 --- /dev/null +++ b/parallel_in_time/doc/author @@ -0,0 +1 @@ +Joshua Christopher diff --git a/parallel_in_time/doc/builds-on b/parallel_in_time/doc/builds-on new file mode 100644 index 0000000..1aabbdf --- /dev/null +++ b/parallel_in_time/doc/builds-on @@ -0,0 +1 @@ +step-26 diff --git a/parallel_in_time/doc/dependencies b/parallel_in_time/doc/dependencies new file mode 100644 index 0000000..cb477f1 --- /dev/null +++ b/parallel_in_time/doc/dependencies @@ -0,0 +1 @@ +DEAL_II_WITH_MPI diff --git a/parallel_in_time/doc/entry-name b/parallel_in_time/doc/entry-name new file mode 100644 index 0000000..7b35f31 --- /dev/null +++ b/parallel_in_time/doc/entry-name @@ -0,0 +1 @@ +Parallel in Time Heat Equation diff --git a/parallel_in_time/doc/results/strongscaling.png b/parallel_in_time/doc/results/strongscaling.png new file mode 100644 index 0000000000000000000000000000000000000000..e0565349c3b1d98bc3d9404f4c784cf9a0fe44ce GIT binary patch literal 10079 zcmb`s2Ut@cGlJafX`%lx~*oaA$9lep~Ob`b28aTO}S)F*5I@UM(o3- zpPsMhX(~8IHQwcli4s(J@bdXrT@Iw4e03E&yOoRxiurvRojxgL8ud}$&o^@uB$xS3 zr&p`ewmXuixg^wpP*DT2)><}T5n*fIR#u42SMXj~$C(`my@rO!UZSt`{r4+AE;whW z%<}T^{mjLV;ocPl-te7ZWTt@tmn0NM!uM2C;)W~V)aI*Jh^oIe?BnPC_3>O6?u+cD z-vg$;lmq~JjtgAk+mKtoU8Ig^=+UoFC`Y)4yN<737Gnymej5K#X5SG7i&7U*{zh}} z^NSnYA|)FHpOUkC?>!kuPq5hGEwK=YUl4oFc7N=*Ln4#n?d_ng;auCYFW^@VfYGnr zZy_DvxdV89vG1+nR9uGI0`)L;*GDFI#=knbQr{0_dyWVE z!o_%Z_p1c9j6u}>pNurh_*#*kj8CG74CRQE=$46aWC}C|)3Bk>n|^qG!s84tc`x<} z{{lx0*EGagHtfffSNF3cZX%?<;(eA>JA>MC1YfL2u6lVfGB;rJt_(+5nTFn@VV zO+gCsqFAK-vyWGr{yLoY0woW1!^ILdpC5nNDfq6*JVGr)k62tLyN! z=&aiKb=Q&g1p9={3r{@cI{3+AnyaFyk_kUAer6n!%i& zRlHLEkW#B+qJ_s1R-5A)=LtKCI?4b89g>`SzAk$nIJ=XdbDqF3QZ`eS(osoZ;J{{k$O(8 zqQx4|dd*tHTFT0&OhAJt7xvYfh5yAv1P#f<{6dOCQLSXHmO`VVh{Dyvl|rYYCq;xs zQ-zU5(mni8ddMR5q}Q`oB3+@!zR$PErsq*g8&Ld-Qf5usQXge+UWOkxNuNWXU!O~w zH1iyvbm{v~^My$tG>ctzEftdsUX?yX7{GmAx*0{oA>|aMI&e>w4OZ=x9!nrUS5_N1 z8QF@gagT7%I|x2#B~b0sVAUv6TPfz0NiU}@GJ;QjsU1KrhVxL~?(~SxLO!!$b z*8UOech=^&P5KcUlMpk@c4j+;>ljkg6wG|mZcy+BCl;*1GZEv(r$**#(gs15f%!Mo zSma^YVfVt=B6Q@aJ0&`uJ3q(Q#-AyUDxT`6=!;cMBasd(bk+<@%1d(JNz`5ecYxw* zzF?`GY#0I9SPd~S(l0w;->>+~v!1ObE1;9!$=SBO>#Fr!iBLP%GZsx1WVmVc-B{RI zz1*ri#$>e0e)>fhXMf4yi&q1+#C1}RJhd)4N|o^N&^(NNpkn$knFHPUx? z6NelYqfPovgqx0{{@rn`bP&sZRrTK9-K^vz6~00Uk*t20YS`G<;Y5 z?z69Dt3+E#o0$)t@0#!F`49iGOTo+5)4D^R{eVTpM&`o!lEyyCzRN*AVHF8G(Sv)N zxXuKv3@mgMbR(2;L?~&JN)Z+*V@4oJc`;P%meRpgrd#r?2^uXPg@N zO+T1hq@MFcE!V@uY1*V!>8kpy>uvDcdU+G8SBp(7_FDd0E=VhPuLFx1^%pWR{G-gT zgG!X=IbVUphR5Kqi_n=;T{rg!;uvVGVPmjS*kj~X%^*Y-!UjEc`7vAN&1l(zdWBPsN(F6 zoA=002pKv>&X3++AZuRu??#T4(y&INpDtBD*P!6gQH1;*ov1?kAp-YfsUWy-qPibAoUD#le%T zCom;5!84H~2Iyla`!pwR`zwF$jjX-K)BRas4IFNBzM4YS$odg_A&-h~VcXX5s@tp| zt9v>bqaLD7gD|@xyXXr;S0MZbtA=Xd=}H@;)gAb@g2jYAqr|Mm!cApN!`4g&k3EgO z8g5HYe^(#&u9fmBo3S@3H)aL7?Q0DdP4sbO+E&P$)9f0~9WL9?k6yN!wPB5$RL(T+ z_*~mRM(hp~mC~)b7)?uf#|D%h)^BVl1g74c%z*D@Nv+w(&S@8@^d~EUoTraFb5t7AJh<6p9^SOUHMj;rhh1a z6ZGQB@X+Y+c;d6;?Itx&3~&~2mixx^N^f<$+WY&iuZ^T_ew&8Bw6y-2%xdR;;#Oin z{#_}xL0cXRYECJKfSsGg3#jK)H@AIh-VaHw?i?fOn6Qb_T3JgC0PqC@0KsnofC~&O zcpU(E%?|)Nx;_p)mxe^&jz9_Kz4Q{p0`R z{Oj_M#*{aITz;yqYFvLIlPf0>T!pX^r!GMd4OG`_OX}W*^KBmOO!^6VD1dM-# zF*2CefAn8e|1@LDD*yna{~yT1|D^xv{|6N#fWeP}iedg6$Ug$Z#}NPc7(E!`n4Fv( zV~CNF@jqY!xB&nee0==>tp0n>0{{zuDNOehDk>@h0!*BXc^w#YnCl;;f2J`~|A>F^ zFy%GI>p&pzzkH9e2m*mH!`IT%l9H0r*Vl(YAXZjZ_V)G|R|p6QFflQal9DnrGXnr? z0Knv5%tHV`KLCIR0A>IH3>Zw>2LOHp0Lz#m0027xz}UZ+h?xL8%{jH0_H0irb!|ZJ zBqFMNdG2IEKB~KAYpz9pVDigVOXk(JE9vPjOnzVYSb*7k%YwATCh_d@bP?7s;N8Eo zGn3K&y0gfpFDmhK33KK*UH+dd=lOLvv-uZiWxFk_mp3=juP=&cG|w1ohpwBp8cz4- zCcmLZ6A?L~-9J9>wdAA9=PoW$Hyg(5GpA!qmo^Oo4Rd3IW3bnV(SiM?>^*cfE~~Ty z2IU~%#D_#ViS51y?df7_Oy$Te#7(gNQx=!0{ z<(MKIbIHjEo%(&mq~_bKRDg5BiEOJ|(%zG{16((XPhzCof|jPQGK+R$TaRzyyXGa! zG`r^^=}nNlDbW*mkJ2M62aoUQ{X4tHCtM;T8sdvR&n_F(FU#I^%{j{NBTikM2Z9VQ z-u2wyIBl;trV=x+fKX z0E=hNifT!;w#sdLngXYaf#{RSM1p&7)l4&sGxgFpo}7AQ?vQvD=Q;N+pOp4OB)zlx zJkKc7(QdG}x`yzz{J>ALK%*x0p5=HbsqC5LF^g+yA0*%i_c6oGkM6)Tf^;+uOr9NG zz+FGt`vv)7>vj~1^r{Fv+8i8wB>cJL5AlR&wn-Ov82An$uZ*9W=s`K z2lYySqej}6gW>%^B=1z9jaa;fi=#8IG#&G`|e+H&FPs2{1mD-}|Jh zcxoVbkf!a|Td{*4t+;A65Q;_%!C80!iQzE zk@vWx-m0NSp2I*n&RPMe!T~tfh`BA~qQ9xUd`17l=nF7qg;vfCy=EqO(l5WsI*@CY zqueGB3=mFE3I$_8#;5i)D@F#G~$@3aca3H0Av4a0RI=GChE22O|i zZp^;?Zk+jQC#je-Kfp8XQ23cTN++Grg#yH2_TDJDnE9~;H^lc4T0#lqL|z|ZtqfzO z;3cWZiJy8=h0I+>=NIZF7yEkec!Ukzj;Kms3$XMtrQRbxIVDjhR076Zkb~@MqibCu z!}n6}C;h`}sf`7RdE4CSpRG&^;~^I=H-G22Z6Eygz zAmth+smpu=;Mz`$%`cpC4e~h-G@6$#Mkkx=ggvNI1;&5%50t#ItOfs)cOl03A{yj2 z^zOKq!locqIrS3*=;^3;IS6|9hFZt5I9ju>noxRbTq02SEGTjD!X{lo9kc5 z)+$Mmn|E_v4+X26MYO1MGML-`>kj~C5|BeX;B~z$ldP$)>2mDTRnnr3f#j!RW%_Wz zWExHp^`9VERk0cDYVqk%^r5d)Dn3T2hXYr&VoDg(a04)**Dh59S!!>CCdwrR-1O%z zhQRd*EBNzH+e&ZQJHn$qTR2*6`~w%B)e+?AW;O~7?fCwtg&JE#ab;)Gs*uMo9S;G- zVGSKSxi`Ie7#LZOQI-e7DR0dURdd)Fs5^2e5H6Pi8h)F#jKyG`90YfMuNVM9h25yb zXrVDiJmh_5V^6dn)U}F4stLV=70w*Y(hH9QwI^?8O}_F_VHr1pJIg|oPB8;O(K@*%;^({==HfSDtgFJ9ZFVy||Jsqf8bKwQ_ zAkoQ)5944hpqnE)@Xrzu>|ibt^tfnrcRD@iyWhSiA^afNp=!hSvyad2-UQ+Lmmkle z@6Lz+Y=@VRFP{1T;MUH($3y7F+vZm2;}box)C}!)&ExH0J1d$hCM4N!;_I*n zE&MUnPju;KN^6TOi_MhwJWKDB)*ueCfnBwRkb5B@n0;}_ob`!93)q=nmBztPtD6xP zVUfC8aA(}*FhvaUWeeN}vb%CTgjlUMn!)6`x%EADfsVr*+DYuVt%3TOjrFv`RaZgZ zWw5V1OWqc06>b{PE5FNCnD51dx#v`DxfSd<4&-ueBPEh~xCUR^#e+>wOqp#6Qod2C zD{A~w(K@$xiG_5)S?}&Cqgd3?@&rWGYm+SGT;{_$Xguo8KTG`2Djx+IPO*?OrV2N1 z#s1yNYNJ^~k5Q0b`xaX(+g)MmO*pJ=vO+P=(CIWz*lof)d!lrExZg6h-KJtGHh{+p zJv)WRDX(KKxc*K1npP4)EXu6C-r=)QIY?mYKN zxY}gIJ%cTWm(Hdw*^@juwq^L8xhV0Z(P1aooy9_)B<2s^EJB^-Z6n|HCckH@P&TWj1j}Pgk1vx ziNt&sDMEffQnKt81kgF*V3+ndmBi%e*=ZUpxOX&c6{Q1SNk`S<4M|+?t*_>MHA`OD3L$28=97Q7fU*XPtsUR+= z%-QLKyk>P0QgF{QS+M@X`@X#9H$85s+N%d&?||~6I+N|r)n|A?hL&5LSg6`ERG>a$ zBpn)qj+&*H!}f(7F#CG={@SrYB+O2iMBsTPsry&Rdm92B)=u(f zi2GO^XYyyDux3$t&5UAyzh?)q5|28Eyk;bL_Q~uc`4IhR%rN;A#P^<&y5H?kSnKdjAr&m5uUx(+Qu)caC#-BnokzwzgM&vq z7F?|DcpWY^^c>bKgt5etM!uQML1VOBs|QyviWrK8Z8qgHYfaGALErJb-Lkz#@gMu! zCEj)BTPNh`Pgfx#on{79eiT7ukA2g7*+_k&i>t1dpt6l3Ryz1(YsP#(qgZ@AU=3&f zb-X}R2%a{jShU1)!>Bc<>OeN0#~K(Dp^PSv>>UcP(${(CO7n-*N2fk}JF2PT$8 z+inKyKoJ*l@02g#bI6;&o*Oo>u`T=bOVO)x{B(=uVY_hF7zt^To#7KF4I(ADQ6%1E z8_Pq`=uGj{#yc>?r~(Dg;ZY~bH$#JhjxzdeHwjpx`9iNjh| zbK8wN@|nz`mEuXwH|I9CFAoVk(Ql)GsnapP`u%!tqpSS`W6bu7HPBoqv*&fIm-prY zQ)t+BSbbkwp82pv!N$U@0Tt8RIl91H^JjqFZqANLG+wn|3Mz?ag<-c0MvrAnQ94djWoEdf{I8tlR%BGh~umCH3~Ggv)tmnlhNU^!8k zx*^e;&~*n}cT2*kX;mY*lfK819LJFS8U9Srb*Z^_u5^$YZ@)_#QcJRW%H)2|MeBm(h} z3p`_mO$+0yWj>M=_1znsPn_~Ba=w`52)}bJ)B_2(&MX|YAs!J$=hN46UL$*FIuUnW z89)k4gSifMIab(L7Z#t}Ktz|GH;bSNHzt|)3P7$G?wK8^&3i55MydQF0g1D%A;zy% zI;_JoWVSYvdk%bMW8RI@OnQXHZ4~7!8T9%T2-}QQt&DqhZm`Feh2S{ZtHz*@>Yj~j zXy1Bf-h~M%A(g|)+mi&;Sy8p@JSuKRP~tO$h_J|z;MI(&Gfc{bwQ?h#4mA?Rf0Z!t zn{qG}6EVDBA$+wuNG4imL3*)aGpNVlK?k$%zM@=(LkM*CwlBc1OCB$E^C7xn2~J7H zV_$mq{A6^Saznw*PyN;SA@+I?1JTR6xvy4fzqfx~%EX?JC+r-^`_S{_`C*5vbvK`_ ziTY3BS3P)VDIuvLvBgp+?lB3DABJ+Lv@E(|Px;~*D>pp&1_QN+`^!^H6=zReSL4j8 zf5n+Wj2IMogAAdHe4ujBeG%6yWbYrQ-9zWu&~R3m$;=&5kZxzAon~46lm%Gz342c= zm16HM*+aaPC0a63m=IBkzYVhUCmB}4bGQw*x}sVbXE)d#bf_kZ8o8eXZl=*`648(O zB8j}09V&x<-}_YijWkH;o(EiJAXP<)ZI@0%qWF1aiDly*JM1@V()$DZzpcF+l|7?+ zVJh155R~Aj16VPIr#Klb`z;OneZq`n>qzf$a`8(O5vKU}EUB4iv!D1feNZD+UwVS= zG*@K$*yFdqxZM?3uPdt!2a7$mpdG1p@KBd9xuZEmFpVk{x$kl%_-TpSSCSNN$1XUd zjS0=rM|PSJqKA|bB7U?KAQ)xDTvLxS*!ayjE3uYW{gQ#QF8ppL$WJNJA$+N0b%(1dq>ex*~b*FMRM?$rNs zG9Ti;abq6X^s>u$>&O)@CX`On8nC;*{&i~Z(Uu4=cK&$33dG+2@8^RwZL52gUyB=h z)4<`21m+9kFxGRta%R@41Y9O$F|A_%+0co>fhdb1VRxo!R2h~~F0`kQg51-uoRNC< z9*jM2Q~z3@7ClB9-0;4<#lyD64n!JE*LPEwgFBEC4Va$)8JeS}oMTNj7iei*Mk!*$lxJPGusaObnK}sqyjKhxwzkyT1rpq8(Vee)B>j&THHMNt_WMr zEq)7Pp++Ua7|Y=WFe{JVmRf{&zax?emY?Y-%u~#X>xi#s)I<0(YH#jT<5$p$Fk+N~g9@&_+|ulYCWa z&;x}z1vu49Z;*@%oW)@&G;l}`KBnpMr)rrs^846eE4E%;)um#O!y;s)9(z#;NZy~+ zvwdmT8$Nqd`?`*mJZ=0AAD93wSa0ampf-{Y)=c53Wv-L5FIi#c0|}LtYlo>11=;T9 z#U7*55bw_Jv^5ETNWtfo@blSSs0nJ*P6W5_Dv3$c!Dzo7e?A`$sG}O1$}g1zzK$iV zM7Yh9!DvTE0=6vMU;JuQ#T7ycmR!862vLRDlTC$L^)!7XaT#aSwsU=cb_Z0)W6zn9 zE8cvo6%Seo!@}oaFJE$*Yohsh(JXnz!aNN+=WW*qd{G!k6>gv<#m zg8)b&$Zg+zmEKutVm9y`-Gh&_IcqU_kDC(A{%U&f%S{PyS#GXolD$y0O@&{73#BAT zh7xuca(6%u3x)kRzXb>XNB%!ys3k>jEX-J*T(1gt9PRD!Uj}*zdOYv{qw4=o(f`i& zf2l!XJETRAdbMDcZifmRI8jHz0GOPF|1bZo{5J;X80$~{e-ios#QA@9`d=7gjHzsI zx0PrizN1?~I-RkfNAX}=;KINyy-d5(k99xqzjJ&GwlkN+gUG@+k7@)`GwIR~N#;aP zb{tk}Zc8f%{+#?h&gA*(P|0Kbv+~`E;bF|J(Vv6A+dBQV&)fk+klQnnYc#U!_Pexx zbkcVOEw{pRRqp!}Rq zbxvSH`bKTny#9=CEmN{6+jV$++#S>x%oRQ9v_EG=c)-&}vnzmcMxoiEdnHFz`u9@*X{K~n!eNX={pP#9^ zpYGTxU0vp0SLXj2MNk82SN?3!>G6L-xOF?Rnj3JqXt^~sgzF>-Tb4Y;oxeS9tK=BW zm*%4sF-s#iYkj)3aTcu6!`kGwRQsg;neA;XB4Dg_aqiWh|7;zLs4TC3(je}83pOLn3^?ku#n-;YnTb1yoU_sZS?8On@3CFtjAe^AU9m%f2v z?So!k3H3sRF^h#O-h$)i;_e<5ilXpMkYGK>jZwrsqZCb@N5>umEhVEdiw(enS7#m< zCE*XGy3AGg^|Lf}9v7u$yrXD;f9Ak>(@PeEIT-)%srd!t5;Ln*id{A~g5J;92*Q#o zn4uL>U;rifkSkDU4kur}>aP{vzKqOp4LM!Olh;+?7>nUi$IBK6X7MD!v z;EbAmqJ3bjATN7uXfLkEtd3efUh*eP0?D+&H1K=bL@0yaK|((Tn0aLxB>_w4_Qy8+ zt{m=`irP&sESK+RMqLdsivJ*9>k-V(NQ=3m{uB;A3}b^vQr6y=Rp>*5&Wn_uR^*s@ zJ@Povl|Cq{7$#4L0XGH7d^!)cXF)MmOh{1|?NRXwnELX-`#b}MtH*H->iaBOL7eW< z-7a})9JHCo)gZONNU=s8FM|-UAs$8eNA|yzYVSAO9cJ6 z#`h^2Tli1o>_M=+YH51UK_A!5uf$Z2UTe6-;$CPMBs&IHcC)O)o1U4_piXLg9FxUy z+xgSj2*R1pa0F`p9Z4N)<%7~K?H)|#NQ4~X+>IGb+FamcP8Hc2SZNbHW=hVI;Gil? zUve5-q4lO!8dzTEVF0D~5{u!Fh}{oYZ<8fp3SW=a5qp{S9YioAzAR~{#YG)y&IfP%43P0g32<_{9W;Fc%NMuoypovdZ;;@8IMDv9o#ezwaWei_rf8H6rZ_ literal 0 HcmV?d00001 diff --git a/parallel_in_time/doc/tooltip b/parallel_in_time/doc/tooltip new file mode 100644 index 0000000..129224e --- /dev/null +++ b/parallel_in_time/doc/tooltip @@ -0,0 +1 @@ +Solves the heat equation time steps in parallel using the multigrid reduction in time technique from the XBraid library. diff --git a/parallel_in_time/src/BraidFuncs.cc b/parallel_in_time/src/BraidFuncs.cc new file mode 100644 index 0000000..89f2b3a --- /dev/null +++ b/parallel_in_time/src/BraidFuncs.cc @@ -0,0 +1,181 @@ +/*-------- Project --------*/ +#include "BraidFuncs.hh" + +int my_Step(braid_App app, + braid_Vector ustop, + braid_Vector fstop, + braid_Vector u, + braid_StepStatus status) +{ + UNUSED(ustop); + UNUSED(fstop); + double tstart; /* current time */ + double tstop; /* evolve to this time*/ + int level; + double deltaT; + + int index; + braid_StepStatusGetLevel(status, &level); + braid_StepStatusGetTstartTstop(status, &tstart, &tstop); + braid_StepStatusGetTIndex(status, &index); + + deltaT = tstop - tstart; + + dealii::Vector& solution = u->data; + + HeatEquation<2>& heateq = app->eq; + + heateq.step(solution, deltaT, tstart, index); + + return 0; +} + +int +my_Init(braid_App app, + double t, + braid_Vector *u_ptr) +{ + my_Vector *u = new(my_Vector); + int size = app->eq.size(); + u->data.reinit(size); + + app->eq.initialize(t, u->data); + + *u_ptr = u; + + return 0; +} + +int +my_Clone(braid_App app, + braid_Vector u, + braid_Vector *v_ptr) +{ + UNUSED(app); + my_Vector *v = new(my_Vector); + int size = u->data.size(); + v->data.reinit(size); + for(size_t i=0, end=v->data.size(); i != end; ++i) + { + v->data[i] = u->data[i]; + } + *v_ptr = v; + + return 0; +} + +int +my_Free(braid_App app, + braid_Vector u) +{ + UNUSED(app); + delete u; + + return 0; +} + +int my_Sum(braid_App app, + double alpha, + braid_Vector x, + double beta, + braid_Vector y) +{ + UNUSED(app); + Vector& vec = y->data; + vec.sadd(beta, alpha, x->data); + + return 0; +} + +int +my_SpatialNorm(braid_App app, + braid_Vector u, + double *norm_ptr) +{ + UNUSED(app); + double dot = 0.0; + dot = u->data.l2_norm(); + *norm_ptr = dot; + + return 0; +} + +int +my_Access(braid_App app, + braid_Vector u, + braid_AccessStatus astatus) +{ + double t; + int index; + + braid_AccessStatusGetT(astatus, &t); + braid_AccessStatusGetTIndex(astatus, &index); + + app->eq.output_results(index, t, u->data); + +#if DO_MFG + if(index == app->final_step) + { + pout() << "Doing error calc of step: " << index << std::endl; + app->eq.process_solution(t, index, u->data); + } +#endif + + return 0; +} + +int +my_BufSize(braid_App app, + int *size_ptr, + braid_BufferStatus bstatus) +{ + UNUSED(bstatus); + int size = app->eq.size(); + *size_ptr = (size+1)*sizeof(double); + + return 0; +} + +int +my_BufPack(braid_App app, + braid_Vector u, + void *buffer, + braid_BufferStatus bstatus) +{ + + UNUSED(app); + double *dbuffer = (double*)buffer; + int size = u->data.size(); + dbuffer[0] = size; + for(int i=0; i != size; ++i) + { + dbuffer[i+1] = (u->data)[i]; + } + braid_BufferStatusSetSize(bstatus, (size+1)*sizeof(double)); + + return 0; +} + +int +my_BufUnpack(braid_App app, + void *buffer, + braid_Vector *u_ptr, + braid_BufferStatus bstatus) +{ + UNUSED(app); + UNUSED(bstatus); + + my_Vector *u = NULL; + double *dbuffer = (double*)buffer; + int size = dbuffer[0]; + u = new(my_Vector); + u->data.reinit(size); + + for(int i = 0; i != size; ++i) + { + (u->data)[i] = dbuffer[i+1]; + } + *u_ptr = u; + + return 0; +} diff --git a/parallel_in_time/src/BraidFuncs.hh b/parallel_in_time/src/BraidFuncs.hh new file mode 100644 index 0000000..0032d89 --- /dev/null +++ b/parallel_in_time/src/BraidFuncs.hh @@ -0,0 +1,220 @@ +#ifndef _BRAIDFUNCS_H_ +#define _BRAIDFUNCS_H_ + +/** + * \file BraidFuncs.cc + * \brief Contains the implementation of the mandatory X-Braid functions + * + * X-Braid mandates several functions in order to drive the solution. + * This file contains the implementation of said mandatory functions. + * See the X-Braid documentation for more information. + * There are several functions that are optional in X-Braid that may + * or may not be implemented in here. + * + */ + +/*-------- System --------*/ + +/*-------- Third Party --------*/ +#include + +#include "braid.h" +#include "braid_test.h" + +/*-------- Project --------*/ +#include "HeatEquation.hh" + +/** + * \brief Struct that contains the data + */ +typedef struct _braid_Vector_struct +{ + dealii::Vector data; +} my_Vector; + +// Wrap the Heat Equation in a struct +typedef struct _braid_App_struct +{ + HeatEquation<2> eq; + int final_step; +} my_App; + + +/** + * @brief my_Step - Takes a step in time, advancing the u vector + * + * @param app - The braid app struct + * @param ustop - The solution data at the end of this time step + * @param fstop - RHS data (such as forcing function?) + * @param u - The solution data at the beginning of this time step + * @param status - Status structure that contains various info of this time + * + * @return Success (0) or failure (1) + **/ +int my_Step(braid_App app, + braid_Vector ustop, + braid_Vector fstop, + braid_Vector u, + braid_StepStatus status); + + +/** + * @brief my_Init - Initializes a solution data at the given time + * For now, initializes the solution to zero no matter what time we are at + * + * @param app - The braid app struct containing user data + * @param t - Time at which the solution is initialized + * @param u_ptr - The solution data that needs to be filled + * + * @return Success (0) or failure (1) + **/ +int +my_Init(braid_App app, + double t, + braid_Vector *u_ptr); + + +/** + * @brief my_Clone - Clones a vector into a new vector + * + * @param app - The braid app struct containing user data + * @param u - The existing vector containing data + * @param v_ptr - The empty vector that needs to be filled + * + * @return Success (0) or failure (1) + **/ +int +my_Clone(braid_App app, + braid_Vector u, + braid_Vector *v_ptr); + + +/** + * @brief my_Free - Deletes a vector + * + * @param app - The braid app struct containing user data + * @param u - The vector that needs to be deleted + * + * @return Success (0) or failure (1) + **/ +int +my_Free(braid_App app, + braid_Vector u); + + +/** + * @brief my_Sum - Sums two vectors in an AXPY operation + * The operation is y = alpha*x + beta*y + * + * @param app - The braid app struct containing user data + * @param alpha - The coefficient in front of x + * @param x - A vector that is multiplied by alpha then added to y + * @param beta - The coefficient of y + * @param y - A vector that is multiplied by beta then summed with x + * + * @return Success (0) or failure (1) + **/ +int +my_Sum(braid_App app, + double alpha, + braid_Vector x, + double beta, + braid_Vector y); + +/** + * \brief Returns the spatial norm of the provided vector + * + * Calculates and returns the spatial norm of the provided vector. + * Interestingly enough, X-Braid does not specify a particular norm. + * to keep things simple, we implement the Euclidean norm. + * + * \param app - The braid app struct containing user data + * \param u - The vector we need to take the norm of + * \param norm_ptr - Pointer to the norm that was calculated, need to modify this + * \return Success (0) or failure (1) + */ +int +my_SpatialNorm(braid_App app, + braid_Vector u, + double *norm_ptr); + +/** + * \brief Allows the user to output details + * + * The Access function is called at various points to allow the user to output + * information to the screen or to files. + * The astatus parameter provides various information about the simulation, + * see the XBraid documentation for details on what information you can get. + * Example information is what the current timestep number and current time is. + * If the access level (in parallel_in_time.cc) is set to 0, this function is + * never called. + * If the access level is set to 1, the function is called after the last + * XBraid cycle. + * If the access level is set to 2, it is called every XBraid cycle. + * + * \param app - The braid app struct containing user data + * \param u - The vector containing the data at the status provided + * \param astatus - The Braid status structure + * \return Success (0) or failure (1) + */ +int +my_Access(braid_App app, + braid_Vector u, + braid_AccessStatus astatus); + +/** + * \brief Calculates the size of a buffer for MPI data transfer + * + * Calculates the size of the buffer that is needed to transfer + * a solution vector to another processor. + * The bstatus parameter provides various information on the + * simulation, see the XBraid documentation for all possible + * fields. + * + * \param app - The braid app struct containing user data + * \param size_ptr A pointer to the calculated size + * \param bstatus The XBraid status structure + * \return Success (0) or failure (1) + */ +int +my_BufSize(braid_App app, + int *size_ptr, + braid_BufferStatus bstatus); + +/** + * \brief Linearizes a vector to be sent to another processor + * + * Linearizes (packs) a data buffer with the contents of + * some solution state u. + * + * \param app - The braid app struct containing user data + * \param u The vector that must be packed into buffer + * \param buffer The buffer that must be filled with u + * \param bstatus The XBraid status structure + * \return Success (0) or failure (1) + */ +int +my_BufPack(braid_App app, + braid_Vector u, + void *buffer, + braid_BufferStatus bstatus); + +/** + * \brief Unpacks a vector that was sent from another processor + * + * Unpacks a linear data buffer into the vector pointed to by + * u_ptr. + * + * \param app - The braid app struct containing user data + * \param buffer The buffer that must be unpacked + * \param u_ptr The pointer to the vector that is filled + * \param bstatus The XBraid status structure + * \return Success (0) or failure (1) + */ +int +my_BufUnpack(braid_App app, + void *buffer, + braid_Vector *u_ptr, + braid_BufferStatus bstatus); + +#endif // _BRAIDFUNCS_H_ diff --git a/parallel_in_time/src/HeatEquation.hh b/parallel_in_time/src/HeatEquation.hh new file mode 100644 index 0000000..07737e4 --- /dev/null +++ b/parallel_in_time/src/HeatEquation.hh @@ -0,0 +1,179 @@ +#ifndef _HEATEQUATION_H_ +#define _HEATEQUATION_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace dealii; + +template +class HeatEquation +{ +public: + HeatEquation(); + void define(); + void step(Vector& braid_data, + double deltaT, + double a_time, + int a_time_idx); + + int size() const; /// Returns the size of the solution vector + + void dump_vec(std::ofstream& file, + const Vector& vector) const; + + void output_results(int a_time_idx, + double a_time, + Vector& a_solution) const; + + void initialize(double a_time, + Vector& a_vector) const; + + void process_solution(double a_time, + int a_index, + const Vector& a_vector); + +private: + void setup_system(); + void solve_time_step(Vector& a_solution); + + Triangulation triangulation; + FE_Q fe; + DoFHandler dof_handler; + + ConstraintMatrix constraints; + + SparsityPattern sparsity_pattern; + SparseMatrix mass_matrix; + SparseMatrix laplace_matrix; + SparseMatrix system_matrix; + + Vector system_rhs; + + std::ofstream myfile; + + const double theta; + + // These were originally in the run() function but because + // I am splitting the run() function up into define and step + // they need to become member data + Vector tmp; + Vector forcing_terms; + + ConvergenceTable convergence_table; +}; + +template +class RightHandSide : public Function +{ +public: + RightHandSide () + : + Function(), + period (0.2) + {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + +private: + const double period; +}; + +template +class BoundaryValues : public Function +{ +public: + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + +template +class RightHandSideMFG : public Function +{ +public: + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + + +template +class InitialValuesMFG : public Function +{ +public: + virtual double value (const Point &p, + const unsigned int component = 0) const; +}; + +/** + * Provides the exact value for the manufactured solution. + * User must set the time using ExactValuesMFG::set_time(const Number new_time); + * Calculates \f$u_h=e^(-4\pi\pi t)*\cos(2\pi x)\cos(2\pi y)\f$ for all points given to it. + * In the context of manufactured solutions, this is used on the boundary. + */ +template +class ExactValuesMFG : public Function +{ +public: + /** + * \brief Computes the value at the given point and member data time + * + * Computes the exact value of the manufactured solution at point p and + * the member data time. See the class documentation and the design doc + * for details on what the exact solution is. + * + * \param p The point that the exact solution is computed at + * \param component The component of the exact solution (always 0 for now) + * \return double The exact value that was computed + */ + virtual double value (const Point &p, + const unsigned int component = 0) const; + + /** + * \brief Computes the gradient of the exact solution at the given point + * + * Computes the gradient of the exact/manufactured solution value at + * point p and member data time. See the design doc for details on + * what the gradient of the exact solution is + * + * \param p The point that the gradient is calculated at + * \param component The component of the system of equations this gradient is for + * \return Tensor<1,dim> A rank 1 tensor that contains the gradient + * in each spatial dimension + */ + virtual Tensor<1,dim> gradient (const Point &p, + const unsigned int component = 0) const; +}; + + +#include "HeatEquationImplem.hh" + +#endif // _HEATEQUATION_H_ diff --git a/parallel_in_time/src/HeatEquationImplem.hh b/parallel_in_time/src/HeatEquationImplem.hh new file mode 100644 index 0000000..050fec6 --- /dev/null +++ b/parallel_in_time/src/HeatEquationImplem.hh @@ -0,0 +1,391 @@ +#include +#include +#include "Utilities.hh" + +template +double RightHandSide::value (const Point &p, + const unsigned int component) const +{ + (void) component; + Assert (component == 0, ExcIndexRange(component, 0, 1)); + Assert (dim == 2, ExcNotImplemented()); + + double time = this->get_time(); + + if ((p[0] > 0.5) && (p[1] > -0.5)) + { + return std::exp(-0.5*(time-0.125)*(time-0.125)/(0.005)); + } + else if ((p[0] > -0.5) && (p[1] > 0.5)) + { + return std::exp(-0.5*(time-0.375)*(time-0.375)/(0.005)); + } + else + { + return 0; + } + + return 0; // No forcing function +} + +template +double RightHandSideMFG::value (const Point &p, + const unsigned int component) const +{ + (void) component; + Assert (component == 0, ExcIndexRange(component, 0, 1)); + Assert (dim == 2, ExcNotImplemented()); + + double time = this->get_time(); + + // return the manufactured solution of the right hand side + double pi = numbers::PI; + return 4*pi*pi*std::exp(-4*pi*pi*time)*std::cos(2*pi*p[0])*std::cos(2*pi*p[1]); +} + +template +double BoundaryValues::value (const Point &p, + const unsigned int component) const +{ + UNUSED(p); + (void) component; + Assert (component == 0, ExcIndexRange(component, 0, 1)); + return 0; +} + +template +double ExactValuesMFG::value (const Point &p, + const unsigned int component) const +{ + (void) component; + Assert (component == 0, ExcIndexRange(component, 0, 1)); + + double time = this->get_time(); + const double pi = numbers::PI; + // Return our manufactured solution boundary value + return std::exp(-4*pi*pi*time)*std::cos(2*pi*p[0])*std::cos(2*pi*p[1]); +} + + +// TODO: Find bug in here +template +Tensor<1,dim> ExactValuesMFG::gradient (const Point &p, + const unsigned int) const +{ + Tensor<1,dim> return_value; + const double pi = numbers::PI; + double time = this->get_time(); + return_value[0] = -2*pi*std::exp(-4*pi*pi*time)*std::cos(2*pi*p[1])*std::sin(2*pi*p[0]); + return_value[1] = -2*pi*std::exp(-4*pi*pi*time)*std::cos(2*pi*p[0])*std::sin(2*pi*p[1]); + return return_value; +} + +template +double InitialValuesMFG::value (const Point &p, + const unsigned int component) const +{ + (void) component; + Assert (component == 0, ExcIndexRange(component, 0, 1)); + const double pi = numbers::PI; + // Return our manufactured solution initial value + return std::cos(2*pi*p[0])*std::cos(2*pi*p[1]); +} + +template +HeatEquation::HeatEquation () + : + fe(1), + dof_handler(triangulation), + theta(0.5) +{ +} + +template +void HeatEquation::initialize(double a_time, + Vector& a_vector) const +{ +#if DO_MFG + // We only initialize values in the manufactured solution case + InitialValuesMFG iv_function; + iv_function.set_time(a_time); + VectorTools::project (dof_handler, constraints, + QGauss(fe.degree+1), iv_function, + a_vector); +#else + UNUSED(a_time); + UNUSED(a_vector); +#endif // DO_MFG + // If not the MFG solution case, a_vector is already zero'd so do nothing +} + +template +void HeatEquation::setup_system() +{ + dof_handler.distribute_dofs(fe); + + // pout() << std::endl + // << "===========================================" + // << std::endl + // << "Number of active cells: " << triangulation.n_active_cells() + // << std::endl + // << "Number of degrees of freedom: " << dof_handler.n_dofs() + // << std::endl + // << std::endl; + + constraints.clear (); + DoFTools::make_hanging_node_constraints (dof_handler, + constraints); + constraints.close(); + + DynamicSparsityPattern dsp(dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern(dof_handler, + dsp, + constraints, + /*keep_constrained_dofs = */ true); + sparsity_pattern.copy_from(dsp); + + mass_matrix.reinit(sparsity_pattern); + laplace_matrix.reinit(sparsity_pattern); + system_matrix.reinit(sparsity_pattern); + + MatrixCreator::create_mass_matrix(dof_handler, + QGauss(fe.degree+1), + mass_matrix); + MatrixCreator::create_laplace_matrix(dof_handler, + QGauss(fe.degree+1), + laplace_matrix); + + system_rhs.reinit(dof_handler.n_dofs()); +} + + +template +void HeatEquation::solve_time_step(Vector& a_solution) +{ + SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm()); + SolverCG<> cg(solver_control); + + PreconditionSSOR<> preconditioner; + preconditioner.initialize(system_matrix, 1.0); + + cg.solve(system_matrix, a_solution, system_rhs, + preconditioner); + + constraints.distribute(a_solution); +} + + + +template +void HeatEquation::output_results(int a_time_idx, + double a_time, + Vector& a_solution) const +{ + + DataOutBase::VtkFlags vtk_flags; + vtk_flags.time = a_time; + vtk_flags.cycle = a_time_idx; + + DataOut data_out; + data_out.set_flags(vtk_flags); + + data_out.attach_dof_handler(dof_handler); + data_out.add_data_vector(a_solution, "U"); + + data_out.build_patches(); + + const std::string filename = "solution-" + + Utilities::int_to_string(a_time_idx, 3) + + ".vtk"; + std::ofstream output(filename.c_str()); + data_out.write_vtk(output); +} + +// This function won't make much sense in real parallel in time... +template +void HeatEquation::define() +{ + const unsigned int initial_global_refinement = 6; + + GridGenerator::hyper_L (triangulation); + triangulation.refine_global (initial_global_refinement); + + setup_system(); + + tmp.reinit (dof_handler.n_dofs()); + forcing_terms.reinit (dof_handler.n_dofs()); +} + +template +void HeatEquation::step(Vector& braid_data, + double deltaT, + double a_time, + int a_time_idx) +{ + // Set old solution to the braid data + // old_solution = braid_data; + a_time += deltaT; + ++a_time_idx; + + mass_matrix.vmult(system_rhs, braid_data); + + laplace_matrix.vmult(tmp, braid_data); + + system_rhs.add(-(1 - theta) * deltaT, tmp); + +#if DO_MFG + RightHandSideMFG rhs_function; +#else + RightHandSide rhs_function; +#endif + rhs_function.set_time(a_time); + VectorTools::create_right_hand_side(dof_handler, + QGauss(fe.degree+1), + rhs_function, + tmp); + + forcing_terms = tmp; + forcing_terms *= deltaT * theta; + + rhs_function.set_time(a_time - deltaT); + VectorTools::create_right_hand_side(dof_handler, + QGauss(fe.degree+1), + rhs_function, + tmp); + + forcing_terms.add(deltaT * (1 - theta), tmp); + system_rhs += forcing_terms; + + system_matrix.copy_from(mass_matrix); + system_matrix.add(theta * deltaT, laplace_matrix); + + constraints.condense (system_matrix, system_rhs); + + { +#if DO_MFG + // Set boundary to exact value in MFG solution + ExactValuesMFG boundary_values_function; +#else + BoundaryValues boundary_values_function; +#endif + boundary_values_function.set_time(a_time); + + std::map boundary_values; + VectorTools::interpolate_boundary_values(dof_handler, + 0, + boundary_values_function, + boundary_values); + + MatrixTools::apply_boundary_values(boundary_values, + system_matrix, + braid_data, + system_rhs); + } + + solve_time_step(braid_data); +} + +template +int HeatEquation::size() const +{ + return dof_handler.n_dofs(); +} + +template void +HeatEquation::dump_vec(std::ofstream& file, + const Vector& vector) const +{ + file << "Dumping vec:"; + for(int i=0; i != vector.size(); ++i) + { + file << "\n" << vector[i]; + } + file << std::endl; +} + +template void +HeatEquation::process_solution(double a_time, + int a_index, + const Vector& a_vector) +{ + // Compute the exact value for the manufactured solution case + ExactValuesMFG exact_function; + exact_function.set_time(a_time); + + Vector difference_per_cell (triangulation.n_active_cells()); + VectorTools::integrate_difference(dof_handler, + a_vector, + exact_function, + difference_per_cell, + QGauss(fe.degree+1), + VectorTools::L2_norm); + + const double L2_error = VectorTools::compute_global_error(triangulation, + difference_per_cell, + VectorTools::L2_norm); + + VectorTools::integrate_difference(dof_handler, + a_vector, + exact_function, + difference_per_cell, + QGauss(fe.degree+1), + VectorTools::H1_seminorm); + + const double H1_error = VectorTools::compute_global_error(triangulation, + difference_per_cell, + VectorTools::H1_seminorm); + + const QTrapez<1> q_trapez; + const QIterated q_iterated (q_trapez, 5); + VectorTools::integrate_difference (dof_handler, + a_vector, + exact_function, + difference_per_cell, + q_iterated, + VectorTools::Linfty_norm); + const double Linfty_error = VectorTools::compute_global_error(triangulation, + difference_per_cell, + VectorTools::Linfty_norm); + + const unsigned int n_active_cells = triangulation.n_active_cells(); + const unsigned int n_dofs = dof_handler.n_dofs(); + + std::cout << "Cycle " << a_index << ':' + << std::endl + << " Number of active cells: " + << n_active_cells + << std::endl + << " Number of degrees of freedom: " + << n_dofs + << std::endl; + + convergence_table.add_value("cycle", a_index); + convergence_table.add_value("cells", n_active_cells); + convergence_table.add_value("dofs", n_dofs); + convergence_table.add_value("L2", L2_error); + convergence_table.add_value("H1", H1_error); + convergence_table.add_value("Linfty", Linfty_error); + + convergence_table.set_precision("L2", 3); + convergence_table.set_precision("H1", 3); + convergence_table.set_precision("Linfty", 3); + + convergence_table.set_scientific("L2", true); + convergence_table.set_scientific("H1", true); + convergence_table.set_scientific("Linfty", true); + + convergence_table.set_tex_caption("cells", "\\# cells"); + convergence_table.set_tex_caption("dofs", "\\# dofs"); + convergence_table.set_tex_caption("L2", "@f$L^2@f$-error"); + convergence_table.set_tex_caption("H1", "@f$H^1@f$-error"); + convergence_table.set_tex_caption("Linfty", "@f$L^\\infty@f$-error"); + + convergence_table.set_tex_format("cells", "r"); + convergence_table.set_tex_format("dofs", "r"); + + std::cout << std::endl; + convergence_table.write_text(std::cout); + + std::ofstream error_table_file("tex-conv-table.tex"); + convergence_table.write_tex(error_table_file); +} diff --git a/parallel_in_time/src/Utilities.cc b/parallel_in_time/src/Utilities.cc new file mode 100644 index 0000000..e0019eb --- /dev/null +++ b/parallel_in_time/src/Utilities.cc @@ -0,0 +1,91 @@ +#include "Utilities.hh" + +#include +#include + +#include "mpi.h" + +int procID = 0; + +// the shared variables + +static std::string s_pout_filename ; +static std::string s_pout_basename ; +static std::ofstream s_pout ; + +static bool s_pout_init = false ; +static bool s_pout_open = false ; + +#ifdef USE_MPI +// in parallel, compute the filename give the basename +//[NOTE: dont call this before MPI is initialized.] +static void setFileName() +{ + static const size_t ProcnumSize = 1 + 10 + 1 ; //'.' + 10digits + '\0' + char procnum[ProcnumSize] ; + snprintf( procnum ,ProcnumSize ,".%d" ,procID); + s_pout_filename = s_pout_basename + procnum ; +} + +// in parallel, close the file if nec., open it and check for success +static void openFile() +{ + if ( s_pout_open ) + { + s_pout.close(); + } + s_pout.open( s_pout_filename.c_str() ); + // if open() fails, we have problems, but it's better + // to try again later than to make believe it succeeded + s_pout_open = (bool)s_pout ; +} + +#else +// in serial, filename is always cout +static void setFileName() +{ + s_pout_filename = "cout" ; +} + +// in serial, this does absolutely nothing +static void openFile() +{ +} +#endif + +std::ostream& pout() +{ +#ifdef USE_MPI + // the common case is _open == true, which just returns s_pout + if ( ! s_pout_open ) + { + // the uncommon cae: the file isn't opened, MPI may not be + // initialized, and the basename may not have been set + int flag_i, flag_f; + MPI_Initialized(&flag_i); + MPI_Finalized(&flag_f); + // app hasn't set a basename yet, so set the default + if ( ! s_pout_init ) + { + s_pout_basename = "pout" ; + s_pout_init = true ; + } + // if MPI not initialized, we cant open the file so return cout + if ( ! flag_i || flag_f) + { + return std::cout; // MPI hasn't been started yet, or has ended.... + } + // MPI is initialized, so file must not be, so open it + setFileName() ; + openFile() ; + // finally, in case the open failed, return cout + if ( ! s_pout_open ) + { + return std::cout ; + } + } + return s_pout ; +#else + return std::cout; +#endif +} diff --git a/parallel_in_time/src/Utilities.hh b/parallel_in_time/src/Utilities.hh new file mode 100644 index 0000000..6e41cd0 --- /dev/null +++ b/parallel_in_time/src/Utilities.hh @@ -0,0 +1,10 @@ +#ifndef _UTILITIES_H_ +#define _UTILITIES_H_ +#include + +#define UNUSED(x) (void)(x) + +extern int procID; + +std::ostream& pout(); +#endif diff --git a/parallel_in_time/src/parallel_in_time.cc b/parallel_in_time/src/parallel_in_time.cc new file mode 100644 index 0000000..460ee35 --- /dev/null +++ b/parallel_in_time/src/parallel_in_time.cc @@ -0,0 +1,121 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2013 - 2018 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + + * + * Author: Joshua Christopher, Colorado State University, 2018 + */ + +#include +#include + +#include "BraidFuncs.hh" +#include "HeatEquation.hh" +#include "Utilities.hh" + +int main(int argc, char *argv[]) +{ + try + { + using namespace dealii; + + /* Initialize MPI */ + MPI_Comm comm; //, comm_x, comm_t; + int rank; + MPI_Init(&argc, &argv); + comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm, &rank); + procID = rank; + + // Set up X-Braid + /* Initialize Braid */ + braid_Core core; + double tstart = 0.0; + double tstop = 0.002; + double ntime = 10; + my_App *app = new(my_App); + + braid_Init(MPI_COMM_WORLD, comm, tstart, tstop, ntime, app, + my_Step, my_Init, my_Clone, my_Free, my_Sum, my_SpatialNorm, + my_Access, my_BufSize, my_BufPack, my_BufUnpack, &core); + + /* Define XBraid parameters + * See -help message forf descriptions */ + int max_levels = 3; + // int nrelax = 1; + // int skip = 0; + double tol = 1.e-7; + // int cfactor = 2; + int max_iter = 5; + // int min_coarse = 10; + // int fmg = 0; + // int scoarsen = 0; + // int res = 0; + // int wrapper_tests = 0; + int print_level = 1; + int access_level = 1; + int use_sequential= 0; + + braid_SetPrintLevel( core, print_level); + braid_SetAccessLevel( core, access_level); + braid_SetMaxLevels(core, max_levels); + // braid_SetMinCoarse( core, min_coarse ); + // braid_SetSkip(core, skip); + // braid_SetNRelax(core, -1, nrelax); + braid_SetAbsTol(core, tol); + // braid_SetCFactor(core, -1, cfactor); + braid_SetMaxIter(core, max_iter); + braid_SetSeqSoln(core, use_sequential); + + app->eq.define(); + app->final_step = ntime; + + braid_Drive(core); + + // Free the memory now that we are done + braid_Destroy(core); + + delete app; + + // Clean up MPI + // MPI_Comm_free(&comm); + MPI_Finalize(); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl << exc.what() + << std::endl << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl << "Aborting!" + << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + } + + return 0; +} + diff --git a/parallel_in_time/test/CMakeLists.txt b/parallel_in_time/test/CMakeLists.txt new file mode 100644 index 0000000..2877d5f --- /dev/null +++ b/parallel_in_time/test/CMakeLists.txt @@ -0,0 +1,2 @@ +set(TEST_LIBRARIES ${LIB_NAME} ${BRAID_DIR}/libbraid.a) +DEAL_II_PICKUP_TESTS() diff --git a/parallel_in_time/test/test_braid.cc b/parallel_in_time/test/test_braid.cc new file mode 100644 index 0000000..817f993 --- /dev/null +++ b/parallel_in_time/test/test_braid.cc @@ -0,0 +1,79 @@ +#include + +#include "braid.h" +#include "braid_test.h" + +#include "BraidFuncs.hh" + +int main(int argc, char** argv) +{ + MPI_Comm comm; + int rank; + MPI_Init(&argc, &argv); + comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm, &rank); + + my_App *app = new(my_App); + app->eq.define(); + + double time = 0.2; + + braid_Int init_access_result = braid_TestInitAccess(app, + comm, + stdout, + time, + my_Init, + my_Access, + my_Free); + + braid_Int clone_result = braid_TestClone(app, + comm, + stdout, + time, + my_Init, + my_Access, + my_Free, + my_Clone); + + braid_Int sum_result = braid_TestSum(app, + comm, + stdout, + time, + my_Init, + my_Access, + my_Free, + my_Clone, + my_Sum); + + braid_Int norm_result = braid_TestSpatialNorm(app, + comm, + stdout, + time, + my_Init, + my_Free, + my_Clone, + my_Sum, + my_SpatialNorm); + + braid_Int buf_result = braid_TestBuf(app, + comm, + stdout, + time, + my_Init, + my_Free, + my_Sum, + my_SpatialNorm, + my_BufSize, + my_BufPack, + my_BufUnpack); + // /* Create spatial communicator for wrapper-tests */ + // braid_SplitCommworld(&comm, 1, &comm_x, &comm_t); +// +// braid_TestAll(app, comm_x, stdout, 0.0, (tstop-tstart)/ntime, +// 2*(tstop-tstart)/ntime, my_Init, my_Free, my_Clone, +// my_Sum, my_SpatialNorm, my_BufSize, my_BufPack, +// my_BufUnpack, my_Coarsen, my_Interp, my_Residual, my_Step); + + /* Finalize MPI */ + MPI_Finalize(); +} diff --git a/parallel_in_time/test/test_braid.output b/parallel_in_time/test/test_braid.output new file mode 100644 index 0000000..1ae28ca --- /dev/null +++ b/parallel_in_time/test/test_braid.output @@ -0,0 +1,99 @@ + +Starting braid_TestInitAccess + + braid_TestInitAccess: Starting Test 1 + braid_TestInitAccess: u = init(t=2.00e-01) + braid_TestInitAccess: access(u) + braid_TestInitAccess: check output: wrote u for initial condition at t=2.00e-01. + + braid_TestInitAccess: free(u) +Finished braid_TestInitAccess + +Starting braid_TestClone + + braid_TestClone: Starting Test 1 + braid_TestClone: u = init(t=2.00e-01) + braid_TestClone: v = clone(u) + braid_TestClone: access(u) + braid_TestClone: access(v) + braid_TestClone: check output: wrote u and v for initial condition at t=2.00e-01. + + braid_TestClone: free(u) + braid_TestClone: free(v) +Finished braid_TestClone + +Starting braid_TestSum + + braid_TestSum: Starting Test 1 + braid_TestSum: u = init(t=2.00e-01) + braid_TestSum: v = clone(u) + braid_TestSum: v = u - v + braid_TestSum: access(v) + braid_TestSum: check output: v should equal the zero vector + + braid_TestSum: Starting Test 2 + braid_TestSum: v = 2*u + v + braid_TestSum: access(v) + braid_TestSum: access(u) + braid_TestSum: check output: v should equal 2*u + + braid_TestSum: free(u) + braid_TestSum: free(v) +Finished braid_TestSum + +Starting braid_TestSpatialNorm + + braid_TestSpatialNorm: Starting Test 1 + braid_TestSpatialNorm: u = init(t=2.00e-01) + braid_TestSpatialNorm: spatialnorm(u) + braid_TestSpatialNorm: Warning: spatialnorm(u) = 0.0 + braid_TestSpatialNorm: v = clone(u) + braid_TestSpatialNorm: v = u - v + braid_TestSpatialNorm: spatialnorm(v) + braid_TestSpatialNorm: Test 1 Passed + braid_TestSpatialNorm: actual output: spatialnorm(v) = 0.00e+00 + braid_TestSpatialNorm: expected output: spatialnorm(v) = 0.0 + + braid_TestSpatialNorm: Starting Test 2 + braid_TestSpatialNorm: w = clone(u) + braid_TestSpatialNorm: w = u + w + braid_TestSpatialNorm: spatialnorm(u) + braid_TestSpatialNorm: spatialnorm(w) + braid_TestSpatialNorm: Test 2 Failed, Likely due to u = 0 + braid_TestSpatialNorm: actual output: spatialnorm(w) / spatialnorm(u) = 0.00e+00 / 0.00e+00 = -nan + braid_TestSpatialNorm: expected output: spatialnorm(w) / spatialnorm(u) = 2.0 + + braid_TestSpatialNorm: Starting Test 3 + braid_TestSpatialNorm: free(w) + braid_TestSpatialNorm: w = clone(u) + braid_TestSpatialNorm: w = 0.0*u + 0.5*w + braid_TestSpatialNorm: spatialnorm(u) + braid_TestSpatialNorm: spatialnorm(w) + braid_TestSpatialNorm: Test 3 Failed, Likely due to u = 0 + braid_TestSpatialNorm: actual output: spatialnorm(w) / spatialnorm(u) = 0.00e+00 / 0.00e+00 = -nan + braid_TestSpatialNorm: expected output: spatialnorm(w) / spatialnorm(u) = 0.5 + + braid_TestSpatialNorm: free(u) + braid_TestSpatialNorm: free(v) + braid_TestSpatialNorm: free(w) +Finished braid_TestSpatialNorm: some tests failed, possibly due to u = 0 + +Starting braid_TestBuf + + braid_TestBuf: Starting Test 1 + braid_TestBuf: u = init(t=2.00e-01) + braid_TestBuf: spatialnorm(u) + braid_TestBuf: Warning: spatialnorm(u) = 0.0 + braid_TestBuf: size = bufsize() + braid_TestBuf: buffer = malloc(size) + braid_TestBuf: buffer = bufpack(u, buffer)) + braid_TestBuf: v = bufunpack(buffer) + braid_TestBuf: v = u - v + braid_TestBuf: spatialnorm(v) + braid_TestBuf: Test 1 Passed + braid_TestBuf: actual output: spatialnorm(v) = 0.00e+00 + braid_TestBuf: expected output: spatialnorm(v) = 0.0 + + braid_TestBuf: free(u) + braid_TestBuf: free(v) +Finished braid_TestBuf: all tests passed successfully -- 2.39.5