From e4749a2908be0d8aba952a8cd017b3b5a7edba6f Mon Sep 17 00:00:00 2001 From: wolf Date: Fri, 20 Aug 2004 20:46:18 +0000 Subject: [PATCH] Add UMFPACK. Also add SuperLU for the moment, though I will remove this again since we don't yet configure for that. git-svn-id: https://svn.dealii.org/trunk@9566 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/lac/include/lac/sparse_direct.h | 294 ++++++++++++- deal.II/lac/source/sparse_direct.cc | 544 ++++++++++++++++++++++++ 2 files changed, 835 insertions(+), 3 deletions(-) diff --git a/deal.II/lac/include/lac/sparse_direct.h b/deal.II/lac/include/lac/sparse_direct.h index dcd119d01d..1e56ead71c 100644 --- a/deal.II/lac/include/lac/sparse_direct.h +++ b/deal.II/lac/include/lac/sparse_direct.h @@ -42,9 +42,10 @@ * indicating error codes, etc. It also manages allocation of the * right amount of temporary storage required by these functions. * - * For a description of the steps necessary for the installation of - * HSL subroutines, read the section on external libraries in the - * deal.II ReadMe file. + * Note that this class only works if configuration of the deal.II library has + * detected the presence of this solver. Please read the README file on what + * the configure script is looking for and how to provide it. + * * * @section SPDMA1 Interface and Method * @@ -610,7 +611,11 @@ class SparseDirectMA27 : public Subscriptor * indicating error codes, etc. It also manages allocation of the * right amount of temporary storage required by these functions. * + * Note that this class only works if configuration of the deal.II library has + * detected the presence of this solver. Please read the README file on what + * the configure script is looking for and how to provide it. * + * * @section SPDMA47a Interface and Method * * For the meaning of the three functions initialize(), factorize(), @@ -983,6 +988,289 @@ class SparseDirectMA47 : public Subscriptor const unsigned int *ICNTL); }; + + + +/** + * This class provides an interface to the sparse direct solver UMFPACK (see + * this + * link). UMFPACK is a set of routines for solving unsymmetric sparse + * linear systems, Ax=b, using the Unsymmetric-pattern MultiFrontal method and + * direct sparse LU factorization. Matrices may have symmetric or unsymmetrix + * sparsity patterns, and may have unsymmetric entries. + * + * Note that this class only works if configuration of the deal.II library has + * detected the presence of this solver. Please read the README file on + * what the configure script is looking for and how to provide it. + * + * @author Wolfgang Bangerth, 2004 + */ +class SparseDirectUMFPACK : public Subscriptor +{ + public: + /** + * Constructor. See the + * documentation of this class + * for the meaning of the + * parameters to this function. + */ + SparseDirectUMFPACK (); + + /** + * Destructor. + */ + ~SparseDirectUMFPACK (); + + /** + * This function does nothing. It is only + * here to provide an interface that is + * consistent with that of the HSL MA27 + * and MA47 solver classes. + */ + void initialize (const SparsityPattern &sparsity_pattern); + + /** + * Factorize the matrix. This function + * may be called multiple times for + * different matrices, after the object + * of this class has been initialized for + * a certain sparsity pattern. You may + * therefore save some computing time if + * you want to invert several matrices + * with the same sparsity + * pattern. However, note that the bulk + * of the computing time is actually + * spent in the factorization, so this + * functionality may not always be of + * large benefit. + * + * If the initialization step has + * not been performed yet, then + * the initialize() function is + * called at the beginning of + * this function. + * + * This function copies the contents of + * the matrix into its own storage; the + * matrix can therefore be deleted after + * this operation, even if subsequent + * solves are required. + */ + void factorize (const SparseMatrix &matrix); + + /** + * Solve for a certain right hand + * side vector. This function may + * be called multiple times for + * different right hand side + * vectors after the matrix has + * been factorized. This yields a + * big saving in computing time, + * since the actual solution is + * fast, compared to the + * factorization of the matrix. + * + * The solution will be returned + * in place of the right hand + * side vector. + * + * If the factorization has not + * happened before, strange + * things will happen. Note that + * we can't actually call the + * factorize() function from + * here if it has not yet been + * called, since we have no + * access to the actual matrix. + */ + void solve (Vector &rhs_and_solution) const; + + /** + * Call the three functions above + * in that order, i.e. perform + * the whole solution process for + * the given right hand side + * vector. + * + * The solution will be returned + * in place of the right hand + * side vector. + */ + void solve (const SparseMatrix &matrix, + Vector &rhs_and_solution); + + /** + * Exception + */ + DeclException0 (ExcMatrixNotSquare); + /** + * Exception + */ + DeclException0 (ExcUMFPACKError); + + private: + /** + * The UMFPACK routines allocate objects + * in which they store information about + * symbolic and numeric values of the + * decomposition. The actual data type of + * these objects is opaque, and only + * passed around as void pointers. + */ + void *symbolic_decomposition; + void *numeric_decomposition; + + /** + * Free all memory that hasn't been freed + * yet. + */ + void clear (); + + /** + * The arrays in which we store the data + * for the solver. + */ + std::vector Ap; + std::vector Ai; + std::vector Ax; + + /** + * Control and info arrays for the solver + * routines. + */ + std::vector control; +}; + + + +class SparseDirectSuperLU : public Subscriptor +{ + public: + /** + * Constructor. See the + * documentation of this class + * for the meaning of the + * parameters to this function. + */ + SparseDirectSuperLU (); + + /** + * Destructor. + */ + ~SparseDirectSuperLU (); + + /** + * This function does nothing. It is only + * here to provide an interface that is + * consistent with that of the HSL MA27 + * and MA47 solver classes. + */ + void initialize (const SparsityPattern &sparsity_pattern); + + /** + * Factorize the matrix. This function + * may be called multiple times for + * different matrices, after the object + * of this class has been initialized for + * a certain sparsity pattern. You may + * therefore save some computing time if + * you want to invert several matrices + * with the same sparsity + * pattern. However, note that the bulk + * of the computing time is actually + * spent in the factorization, so this + * functionality may not always be of + * large benefit. + * + * If the initialization step has + * not been performed yet, then + * the initialize() function is + * called at the beginning of + * this function. + * + * This function copies the contents of + * the matrix into its own storage; the + * matrix can therefore be deleted after + * this operation, even if subsequent + * solves are required. + */ + void factorize (const SparseMatrix &matrix); + + /** + * Solve for a certain right hand + * side vector. This function may + * be called multiple times for + * different right hand side + * vectors after the matrix has + * been factorized. This yields a + * big saving in computing time, + * since the actual solution is + * fast, compared to the + * factorization of the matrix. + * + * The solution will be returned + * in place of the right hand + * side vector. + * + * If the factorization has not + * happened before, strange + * things will happen. Note that + * we can't actually call the + * factorize() function from + * here if it has not yet been + * called, since we have no + * access to the actual matrix. + */ + void solve (Vector &rhs_and_solution) const; + + /** + * Call the three functions above + * in that order, i.e. perform + * the whole solution process for + * the given right hand side + * vector. + * + * The solution will be returned + * in place of the right hand + * side vector. + */ + void solve (const SparseMatrix &matrix, + Vector &rhs_and_solution); + + /** + * Exception + */ + DeclException0 (ExcMatrixNotSquare); + /** + * Exception + */ + DeclException0 (ExcSuperLUError); + + private: + /** + * A data type that holds all the data we + * need to preserve between calls to + * factorize() and solve(). The actual + * definition of this structure is in the + * source file since it depends on + * SuperLU's data types and we don't want + * to include their header file into this + * one. + */ + struct Data; + + /** + * One such object. + */ + Data *data; + + /** + * Free all memory that hasn't been freed + * yet. + */ + void clear (); +}; + /*@}*/ diff --git a/deal.II/lac/source/sparse_direct.cc b/deal.II/lac/source/sparse_direct.cc index ac3ff6d620..cfe1eae350 100644 --- a/deal.II/lac/source/sparse_direct.cc +++ b/deal.II/lac/source/sparse_direct.cc @@ -42,6 +42,17 @@ # include #endif + +// if configured for UMFPACK, include respective file. annoyingly the UMFPACK +// files don't seem to have extern "C" wrapped around their headers... +#ifdef DEAL_II_USE_UMFPACK +extern "C" { +# include +} +#endif + +#include "/home/bangerth/tmp/superlu/SuperLU_3.0/SRC/dsp_defs.h" + // if the HSL functions are not there, define them empty and throw an // exception #ifndef HAVE_HSL_MA27 @@ -1574,6 +1585,539 @@ call_ma47cd (const unsigned int *n_rows, //scalar + +SparseDirectUMFPACK::SparseDirectUMFPACK () + : + symbolic_decomposition (0), + numeric_decomposition (0), + control (UMFPACK_CONTROL) +{ + umfpack_di_defaults (&control[0]); +} + + + +SparseDirectUMFPACK::~SparseDirectUMFPACK () +{ + clear (); +} + + +void +SparseDirectUMFPACK::clear () +{ + // delete objects that haven't been deleted + // yet + if (symbolic_decomposition != 0) + { + umfpack_di_free_symbolic (&symbolic_decomposition); + symbolic_decomposition = 0; + } + + if (numeric_decomposition != 0) + { + umfpack_di_free_numeric (&numeric_decomposition); + numeric_decomposition = 0; + } + + { + std::vector tmp; + tmp.swap (Ap); + } + + { + std::vector tmp; + tmp.swap (Ai); + } + + { + std::vector tmp; + tmp.swap (Ax); + } + + umfpack_di_defaults (&control[0]); +} + + + +void +SparseDirectUMFPACK:: +initialize (const SparsityPattern &) +{} + + + +void +SparseDirectUMFPACK:: +factorize (const SparseMatrix &matrix) +{ + Assert (matrix.m() == matrix.n(), ExcMatrixNotSquare()) + + clear (); + + const unsigned int N = matrix.m(); + + // copy over the data from the matrix to + // the data structures UMFPACK wants. note + // two things: first, UMFPACK wants + // compressed column storage whereas we + // always do compressed row storage; we + // work around this by, rather than + // shuffling things around, copy over the + // data we have, but then call the + // umfpack_di_solve function with the + // UMFPACK_At argument, meaning that we + // want to solve for the transpose system + // + // second: the data we have in the sparse + // matrices is "almost" right already; + // UMFPACK wants the entries in each row + // (i.e. really: column) to be sorted in + // ascending order. we almost have that, + // except that we usually store the + // diagonal first in each row to allow for + // some optimizations. thus, we have to + // resort things a little bit, but only + // within each row + // + // final note: if the matrix has entries in + // the sparsity pattern that are actually + // occupied by entries that have a zero + // numerical value, then we keep them + // anyway. people are supposed to provide + // accurate sparsity patterns. + Ap.resize (N+1); + Ai.resize (matrix.get_sparsity_pattern().n_nonzero_elements()); + Ax.resize (matrix.get_sparsity_pattern().n_nonzero_elements()); + + // first fill row lengths array + Ap[0] = 0; + for (unsigned int row=1; row<=N; ++row) + Ap[row] = Ap[row-1] + matrix.get_sparsity_pattern().row_length(row-1); + Assert (static_cast(Ap.back()) == Ai.size(), + ExcInternalError()); + + // then copy over matrix elements + { + unsigned int index = 0; + for (SparseMatrix::const_iterator p=matrix.begin(); + p!=matrix.end(); ++p, ++index) + { + Ai[index] = p->column(); + Ax[index] = p->value(); + } + Assert (index == Ai.size(), ExcInternalError()); + } + + // finally do the copying around of entries + // so that the diagonal entry is in the + // right place. note that this is easy to + // detect: since all entries apart from the + // diagonal entry are sorted, we know that + // the diagonal entry is in the wrong place + // if and only if its column index is + // larger than the column index of the + // second entry in a row + // + // ignore rows with only one or no entry + { + for (unsigned int row=0; row Ai[cursor+1])) + { + std::swap (Ai[cursor], Ai[cursor+1]); + std::swap (Ax[cursor], Ax[cursor+1]); + ++cursor; + } + } + } + + + + int status; + + status = umfpack_di_symbolic (N, N, + &Ap[0], &Ai[0], &Ax[0], + &symbolic_decomposition, + &control[0], 0); + AssertThrow (status == UMFPACK_OK, ExcUMFPACKError()); + + status = umfpack_di_numeric (&Ap[0], &Ai[0], &Ax[0], + symbolic_decomposition, + &numeric_decomposition, + &control[0], 0); + AssertThrow (status == UMFPACK_OK, ExcUMFPACKError()); + + umfpack_di_free_symbolic (&symbolic_decomposition) ; +} + + + +void +SparseDirectUMFPACK::solve (Vector &rhs_and_solution) const +{ + // make sure that some kind of factorize() + // call has happened before + Assert (Ap.size() != 0, ExcNotInitialized()); + Assert (Ai.size() != 0, ExcNotInitialized()); + Assert (Ai.size() == Ax.size(), ExcNotInitialized()); + + Vector rhs (rhs_and_solution.size()); + rhs = rhs_and_solution; + + // solve the system. note that since + // UMFPACK wants compressed column storage + // instead of the compressed row storage + // format we use in deal.II's + // SparsityPattern classes, we solve for + // UMFPACK's A^T instead + const int status + = umfpack_di_solve (UMFPACK_At, + &Ap[0], &Ai[0], &Ax[0], + rhs_and_solution.begin(), rhs.begin(), + numeric_decomposition, + &control[0], 0); + AssertThrow (status == UMFPACK_OK, ExcUMFPACKError()); +} + + + +void +SparseDirectUMFPACK::solve (const SparseMatrix &matrix, + Vector &rhs_and_solution) +{ + factorize (matrix); + solve (rhs_and_solution); +} + + + + + +SparseDirectSuperLU::SparseDirectSuperLU () + : + data (0) +{} + + + +struct SparseDirectSuperLU::Data +{ + SuperMatrix A, X, L, U; + std::vector perm_r; + std::vector perm_c; + std::vector solution; + std::vector R,C; + std::vector etree; + char equed[1]; + void *work; + int lwork; + + Data (const unsigned int N); + ~Data (); +}; + + +SparseDirectSuperLU::Data::Data (const unsigned int N) + : + perm_r (N), + perm_c (N), + solution (N), + R (N), + C (N), + etree (N), + work (0), + lwork (0) +{} + + +SparseDirectSuperLU::Data::~Data () +{ + Destroy_SuperMatrix_Store(&A); + Destroy_SuperMatrix_Store(&X); + Destroy_SuperNode_Matrix(&L); + Destroy_CompCol_Matrix(&U); +} + + + +SparseDirectSuperLU::~SparseDirectSuperLU () +{ + clear (); +} + + +void +SparseDirectSuperLU::clear () +{ + if (data != 0) + delete data; + data = 0; +} + + + +void +SparseDirectSuperLU:: +initialize (const SparsityPattern &) +{} + + + +void +SparseDirectSuperLU:: +factorize (const SparseMatrix &matrix) +{ + Assert (matrix.m() == matrix.n(), ExcMatrixNotSquare()); + + // delete old objects if there are any + clear (); + + const unsigned int N = matrix.m(); + + // copy over the data from the matrix to + // the data structures SuperLU wants. note + // two things: first, SuperLU wants + // compressed column storage whereas we + // always do compressed row storage; we + // work around this by, rather than + // shuffling things around, copy over the + // data we have, but then call the + // umfpack_di_solve function with the + // SuperLU_At argument, meaning that we + // want to solve for the transpose system + // + // second: the data we have in the sparse + // matrices is "almost" right already; + // SuperLU wants the entries in each row + // (i.e. really: column) to be sorted in + // ascending order. we almost have that, + // except that we usually store the + // diagonal first in each row to allow for + // some optimizations. thus, we have to + // resort things a little bit, but only + // within each row + // + // final note: if the matrix has entries in + // the sparsity pattern that are actually + // occupied by entries that have a zero + // numerical value, then we keep them + // anyway. people are supposed to provide + // accurate sparsity patterns. + std::vector Ap (N+1); + std::vector Ai (matrix.get_sparsity_pattern().n_nonzero_elements()); + std::vector Ax (matrix.get_sparsity_pattern().n_nonzero_elements()); + + // first fill row lengths array + Ap[0] = 0; + for (unsigned int row=1; row<=N; ++row) + Ap[row] = Ap[row-1] + matrix.get_sparsity_pattern().row_length(row-1); + Assert (static_cast(Ap.back()) == Ai.size(), + ExcInternalError()); + + // then copy over matrix elements + { + unsigned int index = 0; + for (SparseMatrix::const_iterator p=matrix.begin(); + p!=matrix.end(); ++p, ++index) + { + Ai[index] = p->column(); + Ax[index] = p->value(); + } + Assert (index == Ai.size(), ExcInternalError()); + } + + // finally do the copying around of entries + // so that the diagonal entry is in the + // right place. note that this is easy to + // detect: since all entries apart from the + // diagonal entry are sorted, we know that + // the diagonal entry is in the wrong place + // if and only if its column index is + // larger than the column index of the + // second entry in a row + // + // ignore rows with only one or no entry + { + for (unsigned int row=0; row Ai[cursor+1])) + { + std::swap (Ai[cursor], Ai[cursor+1]); + std::swap (Ax[cursor], Ax[cursor+1]); + ++cursor; + } + } + } + + + // now factorize the matrix. we need a + // dummy rhs vector as well as + // an object to hold the data we need + data = new Data (N); + + std::vector dummy_rhs (N); + SuperMatrix B; + + dCreate_CompRow_Matrix(&data->A, N, N, Ax.size(), + &Ax[0], &Ai[0], &Ap[0], SLU_NC, SLU_D, SLU_GE); + + dCreate_Dense_Matrix(&B, N, 1, &dummy_rhs[0], N, + SLU_DN, SLU_D, SLU_GE); + dCreate_Dense_Matrix(&data->X, N, 1, &data->solution[0], N, + SLU_DN, SLU_D, SLU_GE); + + // set options. note that just as with + // umfpack, we solve the transpose system, + // since we give compressed row storage and + // superlu wants compressed column storage + superlu_options_t options; + set_default_options(&options); + options.Trans = TRANS; + + // this seems to be crucial. without we get + // atrocious performance + options.ColPerm = MMD_AT_PLUS_A; + options.SymmetricMode = YES; + + // indicate that we don't actually want to + // solve anything, just to factorize + B.ncol = 0; + + // lots of unused output arguments of dgssvx + int info; + double rpg, rcond; + double ferr[1]; + double berr[1]; + mem_usage_t mem_usage; + + SuperLUStat_t stat; + StatInit(&stat); + + // do the factorization + dgssvx(&options, &data->A, &data->perm_c[0], &data->perm_r[0], + &data->etree[0], data->equed, &data->R[0], &data->C[0], + &data->L, &data->U, data->work, data->lwork, &B, + &data->X, &rpg, &rcond, ferr, berr, + &mem_usage, &stat, &info); + AssertThrow (info == 0, ExcSuperLUError()); + + // delete temp vector again + Destroy_SuperMatrix_Store (&B); + StatFree(&stat); +} + + + +void +SparseDirectSuperLU::solve (Vector &rhs_and_solution) const +{ + const unsigned int N = rhs_and_solution.size(); + + // create rhs vector + SuperMatrix B; + dCreate_Dense_Matrix(&B, N, 1, rhs_and_solution.begin(), N, + SLU_DN, SLU_D, SLU_GE); + + // set options. note that just as with + // umfpack, we solve the transpose system, + // since we give compressed row storage and + // superlu wants compressed column storage + superlu_options_t options; + set_default_options(&options); + options.Trans = TRANS; + + // this seems to be crucial. without we get + // atrocious performance + options.ColPerm = MMD_AT_PLUS_A; + options.SymmetricMode = YES; + + // indicate that the matrix has already + // been factorized + options.Fact = FACTORED; + + // lots of unused output arguments of dgssvx + int info; + double rpg, rcond; + double ferr[1]; + double berr[1]; + mem_usage_t mem_usage; + + SuperLUStat_t stat; + StatInit(&stat); + + // do the solve + dgssvx(&options, &data->A, &data->perm_c[0], &data->perm_r[0], + &data->etree[0], data->equed, &data->R[0], &data->C[0], + &data->L, &data->U, data->work, data->lwork, + &B, &data->X, &rpg, &rcond, ferr, berr, + &mem_usage, &stat, &info); + AssertThrow (info == 0, ExcSuperLUError()); + + // copy result + std::copy ((double*) ((DNformat*) data->X.Store)->nzval, + (double*) ((DNformat*) data->X.Store)->nzval + N, + rhs_and_solution.begin()); + + // delete temp vectors + Destroy_SuperMatrix_Store(&B); + StatFree(&stat); +} + + + +void +SparseDirectSuperLU::solve (const SparseMatrix &matrix, + Vector &rhs_and_solution) +{ + factorize (matrix); + solve (rhs_and_solution); +} + + + // explicit instantiations template void -- 2.39.5