From: Marco Feder Date: Thu, 22 May 2025 09:10:30 +0000 (+0200) Subject: Add MPI support for MUMPS X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8aaedb4ba1691d82125c4a7fefa0488761f9ef99;p=dealii.git Add MPI support for MUMPS --- diff --git a/include/deal.II/lac/sparse_direct.h b/include/deal.II/lac/sparse_direct.h index 2dba5af1c4..511a0d325d 100644 --- a/include/deal.II/lac/sparse_direct.h +++ b/include/deal.II/lac/sparse_direct.h @@ -35,6 +35,16 @@ # include #endif +#ifdef DEAL_II_WITH_TRILINOS +# include +# include +#endif + +#ifdef DEAL_II_WITH_PETSC +# include +# include +#endif + DEAL_II_NAMESPACE_OPEN namespace types @@ -469,10 +479,8 @@ public: */ using size_type = types::global_dof_index; - /** - * The AdditionalData struct contains data for controlling the MUMPS - * execution. + * The AdditionalData contains data for controlling the MUMPS execution. */ struct AdditionalData { @@ -512,12 +520,12 @@ public: {} /* - * If true, the MUMPS solver will print out details of the execution. + * If true, the MUMPS solver will print out details of the execution */ bool output_details; /* - * If true, the MUMPS solver will print out error statistics. + * If true, the MUMPS solver will print out error statistics */ bool error_statistics; /* @@ -532,30 +540,25 @@ public: bool posdef; /* - * If true, the MUMPS solver will use the Block Low-Rank factorization. - */ - bool blr_factorization; - - /* - * Stores Block Low-Rank approximation settings to be used in MUMPS - * factorization, if enabled. + * If true, the MUMPS solver will use the Block Low-Rank factorization */ + bool blr_factorization; BlockLowRank blr; }; - /** - * Constructor, takes AdditionalData to control MUMPS execution. + * Constructor, takes an MPI_Comm which defaults to MPI_COMM_WORLD and an + * AdditionalData to control MUMPS execution. */ SparseDirectMUMPS(const AdditionalData &additional_data = AdditionalData(), const MPI_Comm &communicator = MPI_COMM_WORLD); /** - * Destructor. + * Destructor */ ~SparseDirectMUMPS(); /** - * Exception. + * Exception */ DeclException0(ExcInitializeAlreadyCalled); @@ -573,24 +576,27 @@ public: * vector src and the solution is written into the output vector * dst. */ + template void - vmult(Vector &dst, const Vector &src) const; + vmult(VectorType &dst, const VectorType &src) const; + /** * A function in which the inverse of the transposed matrix is applied to the * input vector src and the solution is written into the output * vector dst. */ + template void - Tvmult(Vector &dst, const Vector &src) const; + Tvmult(VectorType &, const VectorType &src) const; /** * A function that returns the ICNTL integer array from MUMPS. * The ICNTL array contains integer control parameters for the MUMPS solver. * Keep in mind that MUMPS is a fortran library and the documentation refers - * to indices into this array starting from one rather than from zero. To - * select the correct index one can use a macro like this: - * `#define ICNTL(I) icntl[(I)-1]`. In the MUMPS documentation there is the + * to indices into this array into this array starting from one rather than + * from zero. To select the correct index one can use a macro like this: + * #define ICNTL(I) icntl[(I)-1]. In the MUMPS documentation there is the * description of each parameter of the array. Be aware that ownership of the * array remains in the current class rather than with the caller of this * function. @@ -615,23 +621,20 @@ private: */ mutable std::vector rhs; + /** + * Local to global index mapping for the right-hand side vector. + */ + mutable std::vector irhs_loc; + /** * irn contains the row indices of the non-zero entries of the matrix. */ -<<<<<<< HEAD std::unique_ptr irn; -======= - std::unique_ptr irn; ->>>>>>> 3caf1bb907 (Avoid raw pointers in SparseDirectMUMPS) /** * jcn contains the column indices of the non-zero entries of the matrix. */ -<<<<<<< HEAD std::unique_ptr jcn; -======= - std::unique_ptr jcn; ->>>>>>> 3caf1bb907 (Avoid raw pointers in SparseDirectMUMPS) /** * The number of rows of the matrix. The matrix is square. @@ -639,12 +642,18 @@ private: types::global_dof_index n; /** - * The number of non-zero entries in the matrix. + * Number of non-zero entries in the matrix. */ types::mumps_nnz nnz; /** - * This function hands over to MUMPS the system's matrix. + * IndexSet storing the locally owned rows of the matrix. + */ + IndexSet row_range; + + /** + * This function initializes a MUMPS instance and hands over the system's + * matrix matrix. */ template void diff --git a/source/lac/sparse_direct.cc b/source/lac/sparse_direct.cc index 4d4e3075fd..c77bd182d2 100644 --- a/source/lac/sparse_direct.cc +++ b/source/lac/sparse_direct.cc @@ -927,70 +927,169 @@ void SparseDirectMUMPS::initialize_matrix(const Matrix &matrix) { Assert(matrix.n() == matrix.m(), ExcMessage("Matrix needs to be square.")); - // Here we should be checking if the matrix is respecting the symmetry given - //(I.E. sym = 0 for non-symmetric matrix, sym = 1 for posdef matrix, sym = 2 - // for general symmetric). - // Hand over matrix to MUMPS as centralized assembled matrix - if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + n = matrix.n(); + id.n = n; + + if constexpr (std::is_same_v, Matrix>) + { + // Serial matrix: hand over matrix to MUMPS as centralized assembled + // matrix + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + { + // number of nonzero elements in matrix + if constexpr (std::is_same_v>) + nnz = matrix.n_actually_nonzero_elements(); + + // representation of the matrix + a = std::make_unique(nnz); + + // matrix indices pointing to the row and column dimensions + // respectively of the matrix representation above (a): ie. a[k] is + // the matrix element (irn[k], jcn[k]) + irn = std::make_unique(nnz); + jcn = std::make_unique(nnz); + + size_type n_non_zero_elements = 0; + + // loop over the elements of the matrix row by row, as suggested in + // the documentation of the sparse matrix iterator class + if (additional_data.symmetric == true) + { + for (size_type row = 0; row < matrix.m(); ++row) + { + for (typename Matrix::const_iterator ptr = matrix.begin(row); + ptr != matrix.end(row); + ++ptr) + if (std::abs(ptr->value()) > 0.0 && ptr->column() >= row) + { + a[n_non_zero_elements] = ptr->value(); + irn[n_non_zero_elements] = row + 1; + jcn[n_non_zero_elements] = ptr->column() + 1; + + ++n_non_zero_elements; + } + } + } + else + { + for (size_type row = 0; row < matrix.m(); ++row) + { + for (typename Matrix::const_iterator ptr = matrix.begin(row); + ptr != matrix.end(row); + ++ptr) + if (std::abs(ptr->value()) > 0.0) + { + a[n_non_zero_elements] = ptr->value(); + irn[n_non_zero_elements] = row + 1; + jcn[n_non_zero_elements] = ptr->column() + 1; + ++n_non_zero_elements; + } + } + } + id.n = n; + id.nnz = n_non_zero_elements; + id.irn = irn.get(); + id.jcn = jcn.get(); + id.a = a.get(); + } + } + else if constexpr (std::is_same_v || + std::is_same_v) { - // Set number of unknowns - n = matrix.n(); + // Distributed matrix case + id.icntl[17] = 3; // distributed matrix assembly + id.nnz = matrix.n_nonzero_elements(); + nnz = id.nnz; + size_type n_non_zero_local = 0; - // number of nonzero elements in matrix - nnz = matrix.n_actually_nonzero_elements(); + // Get the range of rows owned by this process + row_range = matrix.locally_owned_range_indices(); + size_type local_non_zeros = 0; - // representation of the matrix - a = std::make_unique(nnz); + if constexpr (std::is_same_v) + { + const auto &trilinos_matrix = matrix.trilinos_matrix(); + local_non_zeros = trilinos_matrix.NumMyNonzeros(); + } + else if constexpr (std::is_same_v) + { + Mat &petsc_matrix = + const_cast(matrix) + .petsc_matrix(); + MatInfo info; + MatGetInfo(petsc_matrix, MAT_LOCAL, &info); + local_non_zeros = (PetscInt)info.nz_used; + } - // matrix indices pointing to the row and column dimensions - // respectively of the matrix representation above (a): ie. a[k] is - // the matrix element (irn[k], jcn[k]) - irn = std::make_unique(nnz); - jcn = std::make_unique(nnz); - size_type n_non_zero_elements = 0; + irn = std::make_unique(local_non_zeros); + jcn = std::make_unique(local_non_zeros); + a = std::make_unique(local_non_zeros); + irhs_loc.resize(row_range.n_elements()); - // loop over the elements of the matrix row by row, as suggested in - // the documentation of the sparse matrix iterator class if (additional_data.symmetric == true) { - for (size_type row = 0; row < matrix.m(); ++row) + for (const auto &row : row_range) { - for (typename Matrix::const_iterator ptr = matrix.begin(row); - ptr != matrix.end(row); - ++ptr) - if (std::abs(ptr->value()) > 0.0 && ptr->column() >= row) + for (auto it = matrix.begin(row); it != matrix.end(row); ++it) + if (std::abs(it->value()) > 0.0 && it->column() >= row) { - a[n_non_zero_elements] = ptr->value(); - irn[n_non_zero_elements] = row + 1; - jcn[n_non_zero_elements] = ptr->column() + 1; + const int global_row = row + 1; + const int global_col = it->column() + 1; - ++n_non_zero_elements; + // Store this non-zero entry + irn[n_non_zero_local] = global_row; + jcn[n_non_zero_local] = global_col; + a[n_non_zero_local] = it->value(); + + // Count local non-zeros + n_non_zero_local++; } + + const types::global_cell_index local_index = + row_range.index_within_set(row); + irhs_loc[local_index] = row + 1; } } else { - for (size_type row = 0; row < matrix.m(); ++row) + for (const auto &row : row_range) { - for (typename Matrix::const_iterator ptr = matrix.begin(row); - ptr != matrix.end(row); - ++ptr) - if (std::abs(ptr->value()) > 0.0) - { - a[n_non_zero_elements] = ptr->value(); - irn[n_non_zero_elements] = row + 1; - jcn[n_non_zero_elements] = ptr->column() + 1; - ++n_non_zero_elements; - } + // Loop over columns + for (auto it = matrix.begin(row); it != matrix.end(row); ++it) + { + if (std::abs(it->value()) > 0.0) + { + const int global_row = row + 1; + const int global_col = it->column() + 1; + + irn[n_non_zero_local] = global_row; + jcn[n_non_zero_local] = global_col; + a[n_non_zero_local] = it->value(); + + // Count local non-zeros + n_non_zero_local++; + } + } + + const types::global_cell_index local_index = + row_range.index_within_set(row); + irhs_loc[local_index] = row + 1; } } - id.n = n; - id.nnz = n_non_zero_elements; - id.irn = irn.get(); - id.jcn = jcn.get(); - id.a = a.get(); + + // Hand over local arrays to MUMPS + id.nnz_loc = n_non_zero_local; + id.irn_loc = irn.get(); + id.jcn_loc = jcn.get(); + id.a_loc = a.get(); + id.irhs_loc = irhs_loc.data(); + } + else + { + DEAL_II_NOT_IMPLEMENTED(); } } @@ -1046,42 +1145,95 @@ SparseDirectMUMPS::initialize(const Matrix &matrix) dmumps_c(&id); } + + +template void -SparseDirectMUMPS::vmult(Vector &dst, const Vector &src) const +SparseDirectMUMPS::vmult(VectorType &dst, const VectorType &src) const { - // and that the matrix has at least one nonzero element: + // Check that the matrix has at least one nonzero element: Assert(nnz != 0, ExcNotInitialized()); Assert(n == dst.size(), ExcMessage("Destination vector has the wrong size.")); Assert(n == src.size(), ExcMessage("Source vector has the wrong size.")); - // Hand over right-hand side - copy_rhs_to_mumps(src); - // Start solver - id.job = 3; - dmumps_c(&id); - copy_solution(dst); -} + if constexpr (std::is_same_v>) + { + // Centralized assembly for serial vectors. + + // Hand over right-hand side + copy_rhs_to_mumps(src); + + // Start solver + id.job = 3; + dmumps_c(&id); + copy_solution(dst); + } + else if constexpr (std::is_same_v || + std::is_same_v) + { + id.icntl[19] = 10; // distributed rhs + id.icntl[20] = 0; // centralized solution, stored on rank 0 by MUMPS + id.nrhs = 1; + id.lrhs_loc = n; + id.nloc_rhs = row_range.n_elements(); + + + if constexpr (std::is_same_v) + id.rhs_loc = const_cast(src.begin()); + else if constexpr (std::is_same_v) + { + PetscScalar *local_array; + VecGetArray( + const_cast(src).petsc_vector(), + &local_array); + id.rhs_loc = local_array; + VecRestoreArray( + const_cast(src).petsc_vector(), + &local_array); + } + + + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + { + rhs.resize(id.lrhs_loc); + id.rhs = rhs.data(); + } + + // Start solver + id.job = 3; + dmumps_c(&id); + + // Copy solution into the given vector + // For MUMPS with centralized solution (icntl[20]=0), the solution is only + // on the root process (0) and needs to be distributed to all processes + if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0) + { + // Set all the values in the dst vector + for (size_type i = 0; i < n; ++i) + dst[i] = rhs[i]; + } + // Ensure the distributed vector has the correct values everywhere + dst.compress(VectorOperation::insert); + + rhs.resize(0); // remove rhs again + } +} +template void -SparseDirectMUMPS::Tvmult(Vector &dst, const Vector &src) const +SparseDirectMUMPS::Tvmult(VectorType &dst, const VectorType &src) const { // The matrix has at least one nonzero element: Assert(nnz != 0, ExcNotInitialized()); - Assert(n == dst.size(), ExcMessage("Destination vector has the wrong size.")); Assert(n == src.size(), ExcMessage("Source vector has the wrong size.")); id.icntl[8] = 2; // transpose - // Hand over right-hand side - copy_rhs_to_mumps(src); - - // Start solver - id.job = 3; - dmumps_c(&id); - copy_solution(dst); + vmult(dst, src); id.icntl[8] = 1; // reset to default } @@ -1154,10 +1306,23 @@ InstantiateUMFPACK(BlockSparseMatrix>); // explicit instantiations for SparseDirectMUMPS #ifdef DEAL_II_WITH_MUMPS + +# define InstantiateMUMPSMatVec(VECTOR) \ + template void SparseDirectMUMPS::vmult(VECTOR &, const VECTOR &) const; \ + template void SparseDirectMUMPS::Tvmult(VECTOR &, const VECTOR &) const; + +InstantiateMUMPSMatVec(TrilinosWrappers::MPI::Vector) + InstantiateMUMPSMatVec(PETScWrappers::MPI::Vector) + InstantiateMUMPSMatVec(Vector) + # define InstantiateMUMPS(MATRIX) \ template void SparseDirectMUMPS::initialize(const MATRIX &); -InstantiateMUMPS(SparseMatrix) InstantiateMUMPS(SparseMatrix) + InstantiateMUMPS(SparseMatrix) + InstantiateMUMPS(SparseMatrix) + InstantiateMUMPS(TrilinosWrappers::SparseMatrix) + InstantiateMUMPS(PETScWrappers::SparseMatrix) + InstantiateMUMPS(PETScWrappers::MPI::SparseMatrix) // InstantiateMUMPS(SparseMatrixEZ) // InstantiateMUMPS(SparseMatrixEZ) InstantiateMUMPS(BlockSparseMatrix) diff --git a/tests/mumps/mumps_06.cc b/tests/mumps/mumps_06.cc new file mode 100644 index 0000000000..b07422dee4 --- /dev/null +++ b/tests/mumps/mumps_06.cc @@ -0,0 +1,163 @@ +// ------------------------------------------------------------------------ +// +// SPDX-License-Identifier: LGPL-2.1-or-later +// Copyright (C) 2001 - 2024 by the deal.II authors +// +// This file is part of the deal.II library. +// +// Part of the source code is dual licensed under Apache-2.0 WITH +// LLVM-exception OR LGPL-2.1-or-later. Detailed license information +// governing the source code and code contributions can be found in +// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II. +// +// ------------------------------------------------------------------------ + +// test the mumps sparse direct solver in parallel on a +// TrilinosWrappers::SparseMatrix + +#include +#include + +#include + +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "../tests/tests.h" + +template +void +solve_and_check(const MatrixType &M, + const VectorType &rhs, + const VectorType &solution) +{ + SparseDirectMUMPS::AdditionalData data; + data.output_details = false; + data.symmetric = true; + data.posdef = true; + SparseDirectMUMPS solver(data); + solver.initialize(M); + VectorType dst(rhs); + solver.vmult(dst, rhs); + dst -= solution; + Assert(dst.l2_norm() < 1e-8, ExcInternalError()); +} + +template +void +test() +{ + deallog << dim << 'd' << std::endl; + + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + + parallel::distributed::Triangulation tria(mpi_communicator); + GridGenerator::hyper_cube(tria, -1, 1); + tria.refine_global(1); + + // destroy the uniformity of the matrix by + // refining one cell + tria.begin_active()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + tria.refine_global(8 - 2 * dim); + + FE_Q fe(1); + DoFHandler dof_handler(tria); + dof_handler.distribute_dofs(fe); + + deallog << "Number of dofs = " << dof_handler.n_dofs() << std::endl; + + auto locally_relevant_dofs = + DoFTools::extract_locally_relevant_dofs(dof_handler); + auto locally_owned_dofs = dof_handler.locally_owned_dofs(); + DynamicSparsityPattern dsp(locally_relevant_dofs); + AffineConstraints constraints; + + DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false); + SparsityTools::distribute_sparsity_pattern(dsp, + locally_owned_dofs, + mpi_communicator, + locally_relevant_dofs); + + MatrixType B; + B.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); + + QGauss qr(2); + MatrixTools::create_mass_matrix(dof_handler, qr, B); + B.compress(VectorOperation::add); + + SparseDirectMUMPS::AdditionalData data; + data.output_details = true; + data.error_statistics = true; + SparseDirectMUMPS Binv(data); + Binv.initialize(B); + + // for a number of different solution + // vectors, make up a matching rhs vector + // and check what the solver finds + for (unsigned int i = 0; i < 3; ++i) + { + VectorType solution(dof_handler.locally_owned_dofs(), mpi_communicator); + VectorType x(dof_handler.locally_owned_dofs(), mpi_communicator); + VectorType b(dof_handler.locally_owned_dofs(), mpi_communicator); + + for (const types::global_dof_index idx : dof_handler.locally_owned_dofs()) + solution(idx) = idx + idx * (i + 1) * (i + 1); + + solution.compress(VectorOperation::insert); + + B.vmult(b, solution); + + Binv.vmult(x, b); + + x -= solution; + deallog << "relative norm distance = " << x.l2_norm() / solution.l2_norm() + << std::endl; + deallog << "absolute norms = " << x.l2_norm() << ' ' << solution.l2_norm() + << std::endl; + Assert(x.l2_norm() / solution.l2_norm() < 1e-8, ExcInternalError()); + + // check with also the posdef option + solve_and_check(B, b, solution); + } +} + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, numbers::invalid_unsigned_int); + + MPILogInitAll log(true); + + deallog << "Trilinos matrices" << std::endl; + test<2, TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector>(); + test<3, TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector>(); + + + deallog << "PETSc matrices" << std::endl; + test<2, PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector>(); + test<3, PETScWrappers::MPI::SparseMatrix, PETScWrappers::MPI::Vector>(); +} diff --git a/tests/mumps/mumps_06.with_p4est=true.with_trilinos=true.mpirun=2.output b/tests/mumps/mumps_06.with_p4est=true.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..60ecfe587a --- /dev/null +++ b/tests/mumps/mumps_06.with_p4est=true.with_trilinos=true.mpirun=2.output @@ -0,0 +1,71 @@ + +DEAL:0::Trilinos matrices +DEAL:0::2d +DEAL:0::Number of dofs = 1889 +DEAL:0::relative norm distance = 4.64308e-16 +DEAL:0::absolute norms = 4.39998e-11 94764.3 +DEAL:0::relative norm distance = 4.65027e-16 +DEAL:0::absolute norms = 1.10170e-10 236911. +DEAL:0::relative norm distance = 4.65027e-16 +DEAL:0::absolute norms = 2.20340e-10 473822. +DEAL:0::3d +DEAL:0::Number of dofs = 1333 +DEAL:0::relative norm distance = 1.48787e-15 +DEAL:0::absolute norms = 8.35670e-11 56165.6 +DEAL:0::relative norm distance = 1.29809e-15 +DEAL:0::absolute norms = 1.82269e-10 140414. +DEAL:0::relative norm distance = 1.29623e-15 +DEAL:0::absolute norms = 3.64018e-10 280828. +DEAL:0::PETSc matrices +DEAL:0::2d +DEAL:0::Number of dofs = 1889 +DEAL:0::relative norm distance = 4.02650e-16 +DEAL:0::absolute norms = 3.81568e-11 94764.3 +DEAL:0::relative norm distance = 4.10886e-16 +DEAL:0::absolute norms = 9.73433e-11 236911. +DEAL:0::relative norm distance = 4.10886e-16 +DEAL:0::absolute norms = 1.94687e-10 473822. +DEAL:0::3d +DEAL:0::Number of dofs = 1333 +DEAL:0::relative norm distance = 9.32100e-16 +DEAL:0::absolute norms = 5.23519e-11 56165.6 +DEAL:0::relative norm distance = 9.01192e-16 +DEAL:0::absolute norms = 1.26540e-10 140414. +DEAL:0::relative norm distance = 9.01192e-16 +DEAL:0::absolute norms = 2.53080e-10 280828. + +DEAL:1::Trilinos matrices +DEAL:1::2d +DEAL:1::Number of dofs = 1889 +DEAL:1::relative norm distance = 4.64308e-16 +DEAL:1::absolute norms = 4.39998e-11 94764.3 +DEAL:1::relative norm distance = 4.65027e-16 +DEAL:1::absolute norms = 1.10170e-10 236911. +DEAL:1::relative norm distance = 4.65027e-16 +DEAL:1::absolute norms = 2.20340e-10 473822. +DEAL:1::3d +DEAL:1::Number of dofs = 1333 +DEAL:1::relative norm distance = 1.48787e-15 +DEAL:1::absolute norms = 8.35670e-11 56165.6 +DEAL:1::relative norm distance = 1.29809e-15 +DEAL:1::absolute norms = 1.82269e-10 140414. +DEAL:1::relative norm distance = 1.29623e-15 +DEAL:1::absolute norms = 3.64018e-10 280828. +DEAL:1::PETSc matrices +DEAL:1::2d +DEAL:1::Number of dofs = 1889 +DEAL:1::relative norm distance = 4.02650e-16 +DEAL:1::absolute norms = 3.81568e-11 94764.3 +DEAL:1::relative norm distance = 4.10886e-16 +DEAL:1::absolute norms = 9.73433e-11 236911. +DEAL:1::relative norm distance = 4.10886e-16 +DEAL:1::absolute norms = 1.94687e-10 473822. +DEAL:1::3d +DEAL:1::Number of dofs = 1333 +DEAL:1::relative norm distance = 9.32100e-16 +DEAL:1::absolute norms = 5.23519e-11 56165.6 +DEAL:1::relative norm distance = 9.01192e-16 +DEAL:1::absolute norms = 1.26540e-10 140414. +DEAL:1::relative norm distance = 9.01192e-16 +DEAL:1::absolute norms = 2.53080e-10 280828. +