From: David Wells Date: Wed, 21 Dec 2016 17:28:48 +0000 (-0500) Subject: Bulk write vector values in distribute_local_to_global. X-Git-Tag: v8.5.0-rc1~278^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F3691%2Fhead;p=dealii.git Bulk write vector values in distribute_local_to_global. This patch lowers the total number of calls to VecSetValues in ConstraintMatrix::distribute_local_to_global by writing new vector values to a temporary array before sending them off to PETSc. There are no notable performance improvements for the other vector classes (though this certainly should not hurt). I used step-40 to check that the performance improved. Here is the (filtered) output with this patch: [drwells@archway step-40]$ make run | grep assembly | assembly | 1 | 0.00966s | 8.7% | | assembly | 1 | 0.0122s | 6.4% | | assembly | 1 | 0.0217s | 5.9% | | assembly | 1 | 0.0431s | 5.6% | | assembly | 1 | 0.0848s | 5.6% | | assembly | 1 | 0.173s | 5.6% | | assembly | 1 | 0.351s | 5.5% | | assembly | 1 | 0.729s | 5.5% | and on master: [drwells@archway step-40]$ make run | grep assembly | assembly | 1 | 0.00672s | 8.9% | | assembly | 1 | 0.0133s | 7.2% | | assembly | 1 | 0.0258s | 6.9% | | assembly | 1 | 0.0513s | 6.8% | | assembly | 1 | 0.101s | 6.6% | | assembly | 1 | 0.203s | 6.5% | | assembly | 1 | 0.414s | 6.4% | | assembly | 1 | 0.93s | 6.9% | so it appears that the whole assembly process takes about 10% less time. --- diff --git a/doc/news/changes/minor/20161221DavidWells b/doc/news/changes/minor/20161221DavidWells new file mode 100644 index 0000000000..9b037c291d --- /dev/null +++ b/doc/news/changes/minor/20161221DavidWells @@ -0,0 +1,5 @@ +Improved: ContraintMatrix::distribute_local_to_global now does a bulk write of +all vector values at once. This improves performance with +PETScWrappers::MPI::Vector by about 10%. +
+(David Wells, 2016/12/21) diff --git a/include/deal.II/lac/constraint_matrix.templates.h b/include/deal.II/lac/constraint_matrix.templates.h index 9b85e6d02b..cadb380cc0 100644 --- a/include/deal.II/lac/constraint_matrix.templates.h +++ b/include/deal.II/lac/constraint_matrix.templates.h @@ -1442,6 +1442,11 @@ namespace internals */ std::vector vector_indices; + /** + * Temporary array for vector values + */ + std::vector vector_values; + /** * Data array for reorder row/column indices. Use a shared ptr to * global_rows to avoid defining in the .h file @@ -2271,8 +2276,13 @@ ConstraintMatrix::distribute_local_to_global ( // an array in any case since we cannot know about the actual data type in // the ConstraintMatrix class (unless we do cast). This involves a little // bit of logic to determine the type of the matrix value. - std::vector &cols = scratch_data->columns; - std::vector &vals = scratch_data->values; + std::vector &cols = scratch_data->columns; + std::vector &vals = scratch_data->values; + // create arrays for writing into the vector as well + std::vector &vector_indices = scratch_data->vector_indices; + std::vector &vector_values = scratch_data->vector_values; + vector_indices.resize(n_actual_dofs); + vector_values.resize(n_actual_dofs); SparseMatrix *sparse_matrix = dynamic_cast *>(&global_matrix); if (use_dealii_matrix == false) @@ -2285,6 +2295,7 @@ ConstraintMatrix::distribute_local_to_global ( // now do the actual job. go through all the global rows that we will touch // and call resolve_matrix_row for each of those. + size_type local_row_n = 0; for (size_type i=0; i(val); + { + vector_indices[local_row_n] = row; + vector_values[local_row_n] = val; + ++local_row_n; + } + } + } + // Drop the elements of vector_indices and vector_values that we do not use (we may + // always elide writing zero values to vectors) + const size_type n_local_rows = local_row_n; + vector_indices.resize(n_local_rows); + vector_values.resize(n_local_rows); + + // While the standard case is that these types are equal, they need not be, so + // only do a bulk update if they are. Note that the types in the arguments to + // add must be equal if we have a Trilinos or PETSc vector but do not have to + // be if we have a deal.II native vector: one could further optimize this for + // Vector, LinearAlgebra::distributed::vector, etc. + if (types_are_equal::value) + { + global_vector.add(vector_indices, + *reinterpret_cast *>(&vector_values)); + } + else + { + for (size_type row_n=0; row_n(vector_values[row_n]); } }