From 81d7ef49066348a01b96390fda1396b60ac70d2d Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Thu, 23 Apr 2009 18:11:59 +0000 Subject: [PATCH] Put the locks to a position where they need to be set only once. git-svn-id: https://svn.dealii.org/trunk@18708 0785d39b-7218-0410-832d-ea1e28bc413d --- .../include/lac/constraint_matrix.templates.h | 41 ++++++++----------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index d0bf1f590f..ff4f22c0ef 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -1275,6 +1275,8 @@ distribute_local_to_global (const FullMatrix &local_matrix, } internals::list_shellsort (my_indices); + Threads::ThreadMutex::ScopedLock lock(mutex); + // now in the second step actually // resolve the constraints const unsigned int n_constrained_dofs = constraint_lines.size(); @@ -1318,7 +1320,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, // something with this dof const double new_diagonal = std::fabs(local_matrix(local_row,local_row)) != 0 ? std::fabs(local_matrix(local_row,local_row)) : average_diagonal; - Threads::ThreadMutex::ScopedLock lock(mutex); global_matrix.add(global_row, global_row, new_diagonal); } @@ -1509,7 +1510,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, // that accumulated under the given // process into the global matrix row and // into the vector - Threads::ThreadMutex::ScopedLock lock(mutex); const unsigned int n_values = col_ptr - &cols[0]; Assert (n_values == (unsigned int)(val_ptr - &vals[0]), ExcInternalError()); @@ -1601,6 +1601,8 @@ distribute_local_to_global (const FullMatrix &local_matrix, } internals::list_shellsort (my_indices); + Threads::ThreadMutex::ScopedLock lock(mutex); + const unsigned int n_constrained_dofs = constraint_lines.size(); for (unsigned int i=0; i &local_matrix, const double new_diagonal = std::fabs(local_matrix(local_row,local_row)) != 0 ? std::fabs(local_matrix(local_row,local_row)) : average_diagonal; - Threads::ThreadMutex::ScopedLock lock(mutex); global_matrix.add(global_row, global_row, new_diagonal); } @@ -1751,12 +1752,9 @@ distribute_local_to_global (const FullMatrix &local_matrix, Assert (n_values == (unsigned int)(val_ptr - &vals[0]), ExcInternalError()); if (n_values > 0) - { - Threads::ThreadMutex::ScopedLock lock(mutex); - global_matrix.block(block, block_col).add(row, n_values, - &cols[0], &vals[0], - false, true); - } + global_matrix.block(block, block_col).add(row, n_values, + &cols[0], &vals[0], + false, true); } if (use_vectors == true) @@ -1788,10 +1786,7 @@ distribute_local_to_global (const FullMatrix &local_matrix, } } if (val != 0) - { - Threads::ThreadMutex::ScopedLock lock(mutex); - global_vector(my_indices[i].global_row) += val; - } + global_vector(my_indices[i].global_row) += val; } } } @@ -1861,6 +1856,8 @@ add_entries_local_to_global (const std::vector &local_dof_indices, actual_dof_indices.resize (added_rows); std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); + Threads::ThreadMutex::ScopedLock lock(mutex); + const unsigned int n_constrained_dofs = constraint_lines.size(); for (unsigned int i=0; i &local_dof_indices, } } - Threads::ThreadMutex::ScopedLock lock(mutex); - if (keep_constrained_entries == true) { for (unsigned int j=0; j &local_dof_indices, // to the sparsity pattern. Very easy // here - just add the same array to all // the columns... - Threads::ThreadMutex::ScopedLock lock(mutex); for (unsigned int i=0; i &local_dof_indices, } internals::list_shellsort (my_indices); + Threads::ThreadMutex::ScopedLock lock(mutex); + // now in the second step actually // resolve the constraints const unsigned int n_constrained_dofs = constraint_lines.size(); @@ -1982,8 +1978,6 @@ add_entries_local_to_global (const std::vector &local_dof_indices, (local_row, position->entries[q].second)); } - Threads::ThreadMutex::ScopedLock lock(mutex); - // need to add the whole row and column // structure in case we keep constrained // entries. Unfortunately, we can't use @@ -2122,7 +2116,6 @@ add_entries_local_to_global (const std::vector &local_dof_indices, // that accumulated under the given // process into the global matrix row and // into the vector - Threads::ThreadMutex::ScopedLock lock(mutex); if (col_ptr != cols.begin()) sparsity_pattern.add_entries(row, cols.begin(), col_ptr, true); @@ -2202,6 +2195,8 @@ add_entries_local_to_global (const std::vector &local_dof_indices, actual_dof_indices.resize (added_rows); std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); + Threads::ThreadMutex::ScopedLock lock(mutex); + const unsigned int n_constrained_dofs = constraint_lines.size(); for (unsigned int i=0; i &local_dof_indices, } } - Threads::ThreadMutex::ScopedLock lock(mutex); - if (keep_constrained_entries == true) { for (unsigned int j=0; j &local_dof_indices, // easy operation - just go trough the // individual blocks and add the same // array for each row - Threads::ThreadMutex::ScopedLock lock(mutex); for (unsigned int block=0; block &local_dof_indices, } internals::list_shellsort (my_indices); + Threads::ThreadMutex::ScopedLock lock(mutex); + // now in the second step actually // resolve the constraints const unsigned int n_constrained_dofs = constraint_lines.size(); @@ -2342,8 +2336,6 @@ add_entries_local_to_global (const std::vector &local_dof_indices, (local_row, position->entries[q].second)); } - Threads::ThreadMutex::ScopedLock lock(mutex); - if (keep_constrained_entries == true) { for (unsigned int j=0; j &local_dof_indices, // that accumulated under the given // process into the global matrix row and // into the vector - Threads::ThreadMutex::ScopedLock lock(mutex); sparsity_pattern.block(block, block_col).add_entries(row, cols.begin(), col_ptr, -- 2.39.5