From: Wolfgang Bangerth Date: Tue, 14 Dec 2021 18:16:19 +0000 (-0700) Subject: Convert a few more locally-owned loops with filters. X-Git-Tag: v9.4.0-rc1~590^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ca4baff35385ba1de0766761a7be8a6faa249ac6;p=dealii.git Convert a few more locally-owned loops with filters. --- diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h index 2aad15ed4d..8edefbaa61 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h @@ -2819,9 +2819,9 @@ MGTwoLevelTransfer>::reinit( IndexSet is_locally_owned_fine(cell_id_translator.size()); IndexSet is_locally_owned_coarse(cell_id_translator.size()); - for (const auto &cell : dof_handler_fine.active_cell_iterators()) - if (cell->is_locally_owned()) - is_locally_owned_fine.add_index(cell_id_translator.translate(cell)); + for (const auto &cell : dof_handler_fine.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + is_locally_owned_fine.add_index(cell_id_translator.translate(cell)); for (const auto &cell : dof_handler_coarse.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) diff --git a/source/distributed/fully_distributed_tria.cc b/source/distributed/fully_distributed_tria.cc index 098f7951fc..5fba0880f0 100644 --- a/source/distributed/fully_distributed_tria.cc +++ b/source/distributed/fully_distributed_tria.cc @@ -173,7 +173,7 @@ namespace parallel // 4a) set all cells artificial (and set the actual // (level_)subdomain_ids in the next step) - for (auto cell = this->begin(); cell != this->end(); ++cell) + for (const auto &cell : this->cell_iterators()) { if (cell->is_active()) cell->set_subdomain_id( diff --git a/source/distributed/grid_refinement.cc b/source/distributed/grid_refinement.cc index c0355617ad..47c167068f 100644 --- a/source/distributed/grid_refinement.cc +++ b/source/distributed/grid_refinement.cc @@ -25,6 +25,7 @@ # include +# include # include # include # include @@ -110,13 +111,13 @@ namespace ExcInternalError()); unsigned int owned_index = 0; - for (const auto &cell : tria.active_cell_iterators()) - if (cell->subdomain_id() == tria.locally_owned_subdomain()) - { - locally_owned_indicators(owned_index) = - criteria(cell->active_cell_index()); - ++owned_index; - } + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + locally_owned_indicators(owned_index) = + criteria(cell->active_cell_index()); + ++owned_index; + } Assert(owned_index == tria.n_locally_owned_active_cells(), ExcInternalError()); } diff --git a/source/distributed/repartitioning_policy_tools.cc b/source/distributed/repartitioning_policy_tools.cc index ada719d1c2..f22756fa8a 100644 --- a/source/distributed/repartitioning_policy_tools.cc +++ b/source/distributed/repartitioning_policy_tools.cc @@ -206,10 +206,10 @@ namespace RepartitioningPolicyTools // step 1) check if all processes have enough cells - unsigned int n_locally_owned_active_cells = 0; - for (const auto &cell : tria_in.active_cell_iterators()) - if (cell->is_locally_owned()) - ++n_locally_owned_active_cells; + const auto locally_owned_cells = + tria_in.active_cell_iterators() | IteratorFilters::LocallyOwnedCell(); + const unsigned int n_locally_owned_active_cells = + std::distance(locally_owned_cells.begin(), locally_owned_cells.end()); const auto comm = tria_in.get_communicator(); @@ -293,12 +293,11 @@ namespace RepartitioningPolicyTools const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator); // determine weight of each cell - for (const auto &cell : tria->active_cell_iterators()) - if (cell->is_locally_owned()) - weights[partitioner->global_to_local( - cell->global_active_cell_index())] = - weighting_function( - cell, Triangulation::CellStatus::CELL_PERSIST); + for (const auto &cell : + tria->active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + weights[partitioner->global_to_local(cell->global_active_cell_index())] = + weighting_function( + cell, Triangulation::CellStatus::CELL_PERSIST); // determine weight of all the cells locally owned by this process uint64_t process_local_weight = 0; diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index f8e020f090..deb132b1fe 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -282,14 +282,11 @@ namespace parallel # ifdef DEBUG { // Assert that each cell is owned by a processor - unsigned int n_my_cells = 0; - typename parallel::shared::Triangulation::active_cell_iterator - cell = this->begin_active(), - endc = this->end(); - for (; cell != endc; ++cell) - if (cell->is_locally_owned()) - n_my_cells += 1; + const unsigned int n_my_cells = std::count_if( + this->begin_active(), + typename Triangulation::active_cell_iterator( + this->end()), + [](const auto &i) { return (i.is_locally_owned()); }); const unsigned int total_cells = Utilities::MPI::sum(n_my_cells, this->get_communicator()); @@ -301,13 +298,11 @@ namespace parallel // cell is owned by a processor if (settings & construct_multigrid_hierarchy) { - unsigned int n_my_cells = 0; - typename parallel::shared::Triangulation::cell_iterator - cell = this->begin(), - endc = this->end(); - for (; cell != endc; ++cell) - if (cell->is_locally_owned_on_level()) - n_my_cells += 1; + const unsigned int n_my_cells = + std::count_if(this->begin(), this->end(), [](const auto &i) { + return (i.is_locally_owned_on_level()); + }); + const unsigned int total_cells = Utilities::MPI::sum(n_my_cells, this->get_communicator()); diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 7c622f5cc0..945fed6c7e 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -178,11 +179,16 @@ namespace parallel } if (this->n_levels() > 0) - for (const auto &cell : this->active_cell_iterators()) - if (cell->subdomain_id() == my_subdomain) - ++number_cache.n_locally_owned_active_cells; + number_cache.n_locally_owned_active_cells = std::count_if( + this->begin_active(), + typename Triangulation::active_cell_iterator( + this->end()), + [](const auto &i) { return i.is_locally_owned(); }); + else + number_cache.n_locally_owned_active_cells = 0; - // Potentially cast to a 64 bit type before accumulating to avoid overflow: + // Potentially cast to a 64 bit type before accumulating to avoid + // overflow: number_cache.n_global_active_cells = Utilities::MPI::sum(static_cast( number_cache.n_locally_owned_active_cells), @@ -191,7 +197,8 @@ namespace parallel number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator); - // Store MPI ranks of level ghost owners of this processor on all levels. + // Store MPI ranks of level ghost owners of this processor on all + // levels. if (this->is_multilevel_hierarchy_constructed() == true) { number_cache.level_ghost_owners.clear(); @@ -201,18 +208,15 @@ namespace parallel return; // find level ghost owners - for (typename Triangulation::cell_iterator cell = - this->begin(); - cell != this->end(); - ++cell) + for (const auto &cell : this->cell_iterators()) if (cell->level_subdomain_id() != numbers::artificial_subdomain_id && cell->level_subdomain_id() != this->locally_owned_subdomain()) this->number_cache.level_ghost_owners.insert( cell->level_subdomain_id()); # ifdef DEBUG - // Check that level_ghost_owners is symmetric by sending a message to - // everyone + // Check that level_ghost_owners is symmetric by sending a message + // to everyone { int ierr = MPI_Barrier(this->mpi_communicator); AssertThrowMPI(ierr); @@ -1009,10 +1013,11 @@ namespace parallel cell_sizes_variable_cumulative.end(), cell_sizes_variable_cumulative.begin()); - // Serialize cumulative variable size vector value-by-value. - // This way we can circumvent the overhead of storing the - // container object as a whole, since we know its size by - // the number of registered callback functions. + // Serialize cumulative variable size vector + // value-by-value. This way we can circumvent the overhead + // of storing the container object as a whole, since we + // know its size by the number of registered callback + // functions. data_fixed_it->resize(n_callbacks_variable * sizeof(unsigned int)); for (unsigned int i = 0; i < n_callbacks_variable; ++i) @@ -1048,10 +1053,10 @@ namespace parallel // functions (i.e. a cell that was not flagged with CELL_INVALID) // and store the sizes of each buffer. // - // To deal with the case that at least one of the processors does not own - // any cell at all, we will exchange the information about the data sizes - // among them later. The code in between is still well-defined, since the - // following loops will be skipped. + // To deal with the case that at least one of the processors does not + // own any cell at all, we will exchange the information about the data + // sizes among them later. The code in between is still well-defined, + // since the following loops will be skipped. std::vector local_sizes_fixed( 1 + n_callbacks_fixed + (variable_size_data_stored ? 1 : 0)); for (const auto &data_cell : packed_fixed_size_data) @@ -1122,7 +1127,8 @@ namespace parallel src_sizes_variable.end(), std::vector::size_type(0)); - // Move every piece of packed fixed size data into the consecutive buffer. + // Move every piece of packed fixed size data into the consecutive + // buffer. src_data_fixed.reserve(expected_size_fixed); for (const auto &data_cell_fixed : packed_fixed_size_data) { @@ -1222,8 +1228,8 @@ namespace parallel { // We decode the handle returned by register_data_attach() back into // a format we can use. All even handles belong to those callback - // functions which write/read variable size data, all odd handles interact - // with fixed size buffers. + // functions which write/read variable size data, all odd handles + // interact with fixed size buffers. const bool callback_variable_transfer = (handle % 2 == 0); const unsigned int callback_index = handle / 2; @@ -1314,7 +1320,8 @@ namespace parallel spacedim>::CELL_INVALID) { // Extract the corresponding values for offset and size from - // the cumulative sizes array stored in the fixed size buffer. + // the cumulative sizes array stored in the fixed size + // buffer. if (callback_index == 0) offset = 0; else @@ -1427,8 +1434,9 @@ namespace parallel // ------------------ // Write cumulative sizes to file. - // Since each processor owns the same information about the data sizes, - // it is sufficient to let only the first processor perform this task. + // Since each processor owns the same information about the data + // sizes, it is sufficient to let only the first processor perform + // this task. if (myrank == 0) { ierr = MPI_File_write_at(fh, @@ -1445,8 +1453,8 @@ namespace parallel const MPI_Offset size_header = sizes_fixed_cumulative.size() * sizeof(unsigned int); - // Make sure we do the following computation in 64bit integers to be able - // to handle 4GB+ files: + // Make sure we do the following computation in 64bit integers to be + // able to handle 4GB+ files: const MPI_Offset my_global_file_position = size_header + static_cast(global_first_cell) * bytes_per_cell; @@ -1518,8 +1526,8 @@ namespace parallel const MPI_Offset my_global_file_position = static_cast(global_first_cell) * sizeof(unsigned int); - // It is very unlikely that a single process has more than 2 billion - // cells, but we might as well check. + // It is very unlikely that a single process has more than + // 2 billion cells, but we might as well check. AssertThrow(src_sizes_variable.size() < static_cast( std::numeric_limits::max()), @@ -1639,9 +1647,9 @@ namespace parallel AssertThrowMPI(ierr); // Read cumulative sizes from file. - // Since all processors need the same information about the data sizes, - // let each of them retrieve it by reading from the same location in - // the file. + // Since all processors need the same information about the data + // sizes, let each of them retrieve it by reading from the same + // location in the file. sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed + (variable_size_data_stored ? 1 : 0)); ierr = MPI_File_read_at(fh, @@ -1661,8 +1669,8 @@ namespace parallel const MPI_Offset size_header = sizes_fixed_cumulative.size() * sizeof(unsigned int); - // Make sure we do the following computation in 64bit integers to be able - // to handle 4GB+ files: + // Make sure we do the following computation in 64bit integers to be + // able to handle 4GB+ files: const MPI_Offset my_global_file_position = size_header + static_cast(global_first_cell) * bytes_per_cell; @@ -1736,8 +1744,8 @@ namespace parallel AssertThrowMPI(ierr); - // Compute my data size in bytes and compute prefix sum. We do this in - // 64 bit to avoid overflow for files larger than 4 GB: + // Compute my data size in bytes and compute prefix sum. We do this + // in 64 bit to avoid overflow for files larger than 4 GB: const std::uint64_t size_on_proc = std::accumulate(dest_sizes_variable.begin(), dest_sizes_variable.end(), diff --git a/source/dofs/dof_tools.cc b/source/dofs/dof_tools.cc index 3ad2a1025e..1acf4d5602 100644 --- a/source/dofs/dof_tools.cc +++ b/source/dofs/dof_tools.cc @@ -1061,16 +1061,16 @@ namespace DoFTools std::vector dof_indices; std::set global_dof_indices; - for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_locally_owned()) - { - dof_indices.resize(cell->get_fe().n_dofs_per_cell()); - cell->get_dof_indices(dof_indices); + for (const auto &cell : dof_handler.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + { + dof_indices.resize(cell->get_fe().n_dofs_per_cell()); + cell->get_dof_indices(dof_indices); - for (const types::global_dof_index dof_index : dof_indices) - if (!dof_set.is_element(dof_index)) - global_dof_indices.insert(dof_index); - } + for (const types::global_dof_index dof_index : dof_indices) + if (!dof_set.is_element(dof_index)) + global_dof_indices.insert(dof_index); + } dof_set.add_indices(global_dof_indices.begin(), global_dof_indices.end()); diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index d5a4891c51..2e98d3a32d 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -3905,11 +3905,11 @@ namespace GridTools // In a first step, obtain the weights of the locally owned // cells. For all others, the weight remains at the zero the // vector was initialized with above. - for (const auto &cell : triangulation.active_cell_iterators()) - if (cell->is_locally_owned()) - cell_weights[cell->active_cell_index()] = - triangulation.signals.cell_weight( - cell, Triangulation::CellStatus::CELL_PERSIST); + for (const auto &cell : triangulation.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + cell_weights[cell->active_cell_index()] = + triangulation.signals.cell_weight( + cell, Triangulation::CellStatus::CELL_PERSIST); // If this is a parallel triangulation, we then need to also // get the weights for all other cells. We have asserted above diff --git a/source/grid/grid_tools_cache.cc b/source/grid/grid_tools_cache.cc index 470a8271ac..8cda775d0c 100644 --- a/source/grid/grid_tools_cache.cc +++ b/source/grid/grid_tools_cache.cc @@ -156,10 +156,9 @@ namespace GridTools typename Triangulation::active_cell_iterator>> boxes; boxes.reserve(tria->n_active_cells()); - for (const auto &cell : tria->active_cell_iterators()) - if (cell->is_locally_owned()) - boxes.emplace_back( - std::make_pair(mapping->get_bounding_box(cell), cell)); + for (const auto &cell : tria->active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + boxes.emplace_back(mapping->get_bounding_box(cell), cell); locally_owned_cell_bounding_boxes_rtree = pack_rtree(boxes); update_flags = diff --git a/source/hp/refinement.cc b/source/hp/refinement.cc index 57917f61a2..8656009d94 100644 --- a/source/hp/refinement.cc +++ b/source/hp/refinement.cc @@ -578,85 +578,83 @@ namespace hp // deep copy error indicators predicted_errors = error_indicators; - for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_locally_owned()) - { - // current cell will not be adapted - if (!(cell->future_fe_index_set()) && !(cell->refine_flag_set()) && - !(cell->coarsen_flag_set())) - { - predicted_errors[cell->active_cell_index()] *= gamma_n; - continue; - } + for (const auto &cell : dof_handler.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + { + // current cell will not be adapted + if (!(cell->future_fe_index_set()) && !(cell->refine_flag_set()) && + !(cell->coarsen_flag_set())) + { + predicted_errors[cell->active_cell_index()] *= gamma_n; + continue; + } - // current cell will be adapted - // determine degree of its future finite element - if (cell->coarsen_flag_set()) - { - // cell will be coarsened, thus determine future finite element - // on parent cell - const auto &parent = cell->parent(); - if (future_fe_indices_on_coarsened_cells.find(parent) == - future_fe_indices_on_coarsened_cells.end()) - { + // current cell will be adapted + // determine degree of its future finite element + if (cell->coarsen_flag_set()) + { + // cell will be coarsened, thus determine future finite element + // on parent cell + const auto &parent = cell->parent(); + if (future_fe_indices_on_coarsened_cells.find(parent) == + future_fe_indices_on_coarsened_cells.end()) + { #ifdef DEBUG - for (const auto &child : parent->child_iterators()) - Assert(child->is_active() && child->coarsen_flag_set(), - typename dealii::Triangulation< - dim>::ExcInconsistentCoarseningFlags()); + for (const auto &child : parent->child_iterators()) + Assert(child->is_active() && child->coarsen_flag_set(), + typename dealii::Triangulation< + dim>::ExcInconsistentCoarseningFlags()); #endif - parent_future_fe_index = - dealii::internal::hp::DoFHandlerImplementation:: - dominated_future_fe_on_children(parent); + parent_future_fe_index = + dealii::internal::hp::DoFHandlerImplementation:: + dominated_future_fe_on_children(parent); - future_fe_indices_on_coarsened_cells.insert( - {parent, parent_future_fe_index}); - } - else - { - parent_future_fe_index = - future_fe_indices_on_coarsened_cells[parent]; - } + future_fe_indices_on_coarsened_cells.insert( + {parent, parent_future_fe_index}); + } + else + { + parent_future_fe_index = + future_fe_indices_on_coarsened_cells[parent]; + } - future_fe_degree = - dof_handler.get_fe_collection()[parent_future_fe_index] - .degree; - } - else - { - // future finite element on current cell is already set - future_fe_degree = - dof_handler.get_fe_collection()[cell->future_fe_index()] - .degree; - } + future_fe_degree = + dof_handler.get_fe_collection()[parent_future_fe_index].degree; + } + else + { + // future finite element on current cell is already set + future_fe_degree = + dof_handler.get_fe_collection()[cell->future_fe_index()].degree; + } - // step 1: exponential decay with p-adaptation - if (cell->future_fe_index_set()) - { - predicted_errors[cell->active_cell_index()] *= - std::pow(gamma_p, - int(future_fe_degree) - int(cell->get_fe().degree)); - } + // step 1: exponential decay with p-adaptation + if (cell->future_fe_index_set()) + { + predicted_errors[cell->active_cell_index()] *= + std::pow(gamma_p, + int(future_fe_degree) - int(cell->get_fe().degree)); + } - // step 2: algebraic decay with h-adaptation - if (cell->refine_flag_set()) - { - predicted_errors[cell->active_cell_index()] *= - (gamma_h * std::pow(.5, future_fe_degree)); + // step 2: algebraic decay with h-adaptation + if (cell->refine_flag_set()) + { + predicted_errors[cell->active_cell_index()] *= + (gamma_h * std::pow(.5, future_fe_degree)); - // predicted error will be split on children cells - // after adaptation via CellDataTransfer - } - else if (cell->coarsen_flag_set()) - { - predicted_errors[cell->active_cell_index()] /= - (gamma_h * std::pow(.5, future_fe_degree)); + // predicted error will be split on children cells + // after adaptation via CellDataTransfer + } + else if (cell->coarsen_flag_set()) + { + predicted_errors[cell->active_cell_index()] /= + (gamma_h * std::pow(.5, future_fe_degree)); - // predicted error will be summed up on parent cell - // after adaptation via CellDataTransfer - } - } + // predicted error will be summed up on parent cell + // after adaptation via CellDataTransfer + } + } } diff --git a/source/numerics/data_out_resample.cc b/source/numerics/data_out_resample.cc index a2d564a86e..156a7ffbc5 100644 --- a/source/numerics/data_out_resample.cc +++ b/source/numerics/data_out_resample.cc @@ -69,11 +69,9 @@ DataOutResample::update_mapping( partitioner = std::make_shared( patch_dof_handler.locally_owned_dofs(), active_dofs, MPI_COMM_WORLD); - for (const auto &cell : patch_dof_handler.active_cell_iterators()) + for (const auto &cell : patch_dof_handler.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) { - if (cell->is_locally_owned() == false) - continue; - fe_values.reinit(cell); cell->get_dof_indices(dof_indices); diff --git a/source/numerics/smoothness_estimator.cc b/source/numerics/smoothness_estimator.cc index a92347cf4c..691f292888 100644 --- a/source/numerics/smoothness_estimator.cc +++ b/source/numerics/smoothness_estimator.cc @@ -120,62 +120,59 @@ namespace SmoothnessEstimator Vector local_dof_values; std::vector converted_indices; std::pair, std::vector> res; - for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_locally_owned()) - { - if (!only_flagged_cells || cell->refine_flag_set() || - cell->coarsen_flag_set()) - { - n_modes = fe_legendre.get_n_coefficients_per_direction( - cell->active_fe_index()); - resize(expansion_coefficients, n_modes); - - local_dof_values.reinit(cell->get_fe().n_dofs_per_cell()); - cell->get_dof_values(solution, local_dof_values); - - fe_legendre.calculate(local_dof_values, - cell->active_fe_index(), - expansion_coefficients); - - // We fit our exponential decay of expansion coefficients to the - // provided regression_strategy on each possible value of |k|. - // To this end, we use FESeries::process_coefficients() to - // rework coefficients into the desired format. - res = FESeries::process_coefficients( - expansion_coefficients, - [n_modes](const TableIndices &indices) { - return index_sum_less_than_N(indices, n_modes); - }, - regression_strategy, - smallest_abs_coefficient); - - Assert(res.first.size() == res.second.size(), - ExcInternalError()); - - // Last, do the linear regression. - float regularity = std::numeric_limits::infinity(); - if (res.first.size() > 1) - { - // Prepare linear equation for the logarithmic least squares - // fit. - converted_indices.assign(res.first.begin(), - res.first.end()); - - for (auto &residual_element : res.second) - residual_element = std::log(residual_element); - - const std::pair fit = - FESeries::linear_regression(converted_indices, - res.second); - regularity = static_cast(-fit.first); - } - - smoothness_indicators(cell->active_cell_index()) = regularity; - } - else - smoothness_indicators(cell->active_cell_index()) = - numbers::signaling_nan(); - } + for (const auto &cell : dof_handler.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + { + if (!only_flagged_cells || cell->refine_flag_set() || + cell->coarsen_flag_set()) + { + n_modes = fe_legendre.get_n_coefficients_per_direction( + cell->active_fe_index()); + resize(expansion_coefficients, n_modes); + + local_dof_values.reinit(cell->get_fe().n_dofs_per_cell()); + cell->get_dof_values(solution, local_dof_values); + + fe_legendre.calculate(local_dof_values, + cell->active_fe_index(), + expansion_coefficients); + + // We fit our exponential decay of expansion coefficients to the + // provided regression_strategy on each possible value of |k|. + // To this end, we use FESeries::process_coefficients() to + // rework coefficients into the desired format. + res = FESeries::process_coefficients( + expansion_coefficients, + [n_modes](const TableIndices &indices) { + return index_sum_less_than_N(indices, n_modes); + }, + regression_strategy, + smallest_abs_coefficient); + + Assert(res.first.size() == res.second.size(), ExcInternalError()); + + // Last, do the linear regression. + float regularity = std::numeric_limits::infinity(); + if (res.first.size() > 1) + { + // Prepare linear equation for the logarithmic least squares + // fit. + converted_indices.assign(res.first.begin(), res.first.end()); + + for (auto &residual_element : res.second) + residual_element = std::log(residual_element); + + const std::pair fit = + FESeries::linear_regression(converted_indices, res.second); + regularity = static_cast(-fit.first); + } + + smoothness_indicators(cell->active_cell_index()) = regularity; + } + else + smoothness_indicators(cell->active_cell_index()) = + numbers::signaling_nan(); + } } @@ -213,76 +210,76 @@ namespace SmoothnessEstimator x.reserve(max_degree); y.reserve(max_degree); - for (const auto &cell : dof_handler.active_cell_iterators()) - if (cell->is_locally_owned()) - { - if (!only_flagged_cells || cell->refine_flag_set() || - cell->coarsen_flag_set()) - { - n_modes = fe_legendre.get_n_coefficients_per_direction( - cell->active_fe_index()); - resize(expansion_coefficients, n_modes); - - const unsigned int pe = cell->get_fe().degree; - Assert(pe > 0, ExcInternalError()); - - // since we use coefficients with indices [1,pe] in each - // direction, the number of coefficients we need to calculate is - // at least N=pe+1 - AssertIndexRange(pe, n_modes); - - local_dof_values.reinit(cell->get_fe().n_dofs_per_cell()); - cell->get_dof_values(solution, local_dof_values); - - fe_legendre.calculate(local_dof_values, - cell->active_fe_index(), - expansion_coefficients); - - // choose the smallest decay of coefficients in each direction, - // i.e. the maximum decay slope k_v as in exp(-k_v) - double k_v = std::numeric_limits::infinity(); - for (unsigned int d = 0; d < dim; ++d) - { - x.resize(0); - y.resize(0); - - // will use all non-zero coefficients allowed by the - // predicate function - for (unsigned int i = 0; i <= pe; ++i) - if (coefficients_predicate[i]) - { - TableIndices ind; - ind[d] = i; - const double coeff_abs = - std::abs(expansion_coefficients(ind)); - - if (coeff_abs > smallest_abs_coefficient) - { - x.push_back(i); - y.push_back(std::log(coeff_abs)); - } - } - - // in case we don't have enough non-zero coefficient to fit, - // skip this direction - if (x.size() < 2) - continue; - - const std::pair fit = - FESeries::linear_regression(x, y); - - // decay corresponds to negative slope - // take the lesser negative slope along each direction - k_v = std::min(k_v, -fit.first); - } - - smoothness_indicators(cell->active_cell_index()) = - static_cast(k_v); - } - else + for (const auto &cell : dof_handler.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + { + if (!only_flagged_cells || cell->refine_flag_set() || + cell->coarsen_flag_set()) + { + n_modes = fe_legendre.get_n_coefficients_per_direction( + cell->active_fe_index()); + resize(expansion_coefficients, n_modes); + + const unsigned int pe = cell->get_fe().degree; + Assert(pe > 0, ExcInternalError()); + + // since we use coefficients with indices [1,pe] in each + // direction, the number of coefficients we need to calculate is + // at least N=pe+1 + AssertIndexRange(pe, n_modes); + + local_dof_values.reinit(cell->get_fe().n_dofs_per_cell()); + cell->get_dof_values(solution, local_dof_values); + + fe_legendre.calculate(local_dof_values, + cell->active_fe_index(), + expansion_coefficients); + + // choose the smallest decay of coefficients in each direction, + // i.e. the maximum decay slope k_v as in exp(-k_v) + double k_v = std::numeric_limits::infinity(); + for (unsigned int d = 0; d < dim; ++d) + { + x.resize(0); + y.resize(0); + + // will use all non-zero coefficients allowed by the + // predicate function + for (unsigned int i = 0; i <= pe; ++i) + if (coefficients_predicate[i]) + { + TableIndices ind; + ind[d] = i; + const double coeff_abs = + std::abs(expansion_coefficients(ind)); + + if (coeff_abs > smallest_abs_coefficient) + { + x.push_back(i); + y.push_back(std::log(coeff_abs)); + } + } + + // in case we don't have enough non-zero coefficient to fit, + // skip this direction + if (x.size() < 2) + continue; + + const std::pair fit = + FESeries::linear_regression(x, y); + + // decay corresponds to negative slope + // take the lesser negative slope along each direction + k_v = std::min(k_v, -fit.first); + } + smoothness_indicators(cell->active_cell_index()) = - numbers::signaling_nan(); - } + static_cast(k_v); + } + else + smoothness_indicators(cell->active_cell_index()) = + numbers::signaling_nan(); + } } diff --git a/tests/matrix_free/laplace_operator_03.cc b/tests/matrix_free/laplace_operator_03.cc index f3e7cbf4a0..59f6bd2a65 100644 --- a/tests/matrix_free/laplace_operator_03.cc +++ b/tests/matrix_free/laplace_operator_03.cc @@ -55,10 +55,10 @@ test() parallel::distributed::Triangulation tria(MPI_COMM_WORLD); GridGenerator::hyper_cube(tria); tria.refine_global(1); - for (const auto &cell : tria.active_cell_iterators()) - if (cell->is_locally_owned()) - if (cell->center().norm() < 0.2) - cell->set_refine_flag(); + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + if (cell->center().norm() < 0.2) + cell->set_refine_flag(); tria.execute_coarsening_and_refinement(); if (dim < 3 && fe_degree < 2) tria.refine_global(2); diff --git a/tests/mpi/error_prediction_01.cc b/tests/mpi/error_prediction_01.cc index 1f9a311ba5..1c4a920c84 100644 --- a/tests/mpi/error_prediction_01.cc +++ b/tests/mpi/error_prediction_01.cc @@ -102,23 +102,23 @@ test() // ----- verify ------ deallog << "pre_adaptation" << std::endl; - for (const auto &cell : dh.active_cell_iterators()) - if (cell->is_locally_owned()) - { - deallog << " cell:" << cell->id().to_string() - << " fe_deg:" << cell->get_fe().degree - << " error:" << error_indicators[cell->active_cell_index()]; + for (const auto &cell : + dh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + deallog << " cell:" << cell->id().to_string() + << " fe_deg:" << cell->get_fe().degree + << " error:" << error_indicators[cell->active_cell_index()]; - if (cell->coarsen_flag_set()) - deallog << " coarsening"; - else if (cell->refine_flag_set()) - deallog << " refining"; + if (cell->coarsen_flag_set()) + deallog << " coarsening"; + else if (cell->refine_flag_set()) + deallog << " refining"; - if (cell->future_fe_index_set()) - deallog << " future_fe_deg:" << fes[cell->future_fe_index()].degree; + if (cell->future_fe_index_set()) + deallog << " future_fe_deg:" << fes[cell->future_fe_index()].degree; - deallog << std::endl; - } + deallog << std::endl; + } // ----- execute adaptation ----- parallel::distributed::CellDataTransfer> diff --git a/tests/mpi/error_prediction_02.cc b/tests/mpi/error_prediction_02.cc index c03f34f729..f7ca2994bf 100644 --- a/tests/mpi/error_prediction_02.cc +++ b/tests/mpi/error_prediction_02.cc @@ -128,23 +128,23 @@ test() // ----- verify ------ deallog << "pre_adaptation" << std::endl; - for (const auto &cell : dh.active_cell_iterators()) - if (cell->is_locally_owned()) - { - deallog << " cell:" << cell->id().to_string() - << " fe_deg:" << cell->get_fe().degree - << " error:" << error_indicators[cell->active_cell_index()]; - - if (cell->coarsen_flag_set()) - deallog << " coarsening"; - else if (cell->refine_flag_set()) - deallog << " refining"; - - if (cell->future_fe_index_set()) - deallog << " future_fe_deg:" << fes[cell->future_fe_index()].degree; - - deallog << std::endl; - } + for (const auto &cell : + dh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + deallog << " cell:" << cell->id().to_string() + << " fe_deg:" << cell->get_fe().degree + << " error:" << error_indicators[cell->active_cell_index()]; + + if (cell->coarsen_flag_set()) + deallog << " coarsening"; + else if (cell->refine_flag_set()) + deallog << " refining"; + + if (cell->future_fe_index_set()) + deallog << " future_fe_deg:" << fes[cell->future_fe_index()].degree; + + deallog << std::endl; + } // ----- execute adaptation ----- parallel::distributed::CellDataTransfer> diff --git a/tests/mpi/limit_p_level_difference_01.cc b/tests/mpi/limit_p_level_difference_01.cc index 38d935c1b9..a99393782a 100644 --- a/tests/mpi/limit_p_level_difference_01.cc +++ b/tests/mpi/limit_p_level_difference_01.cc @@ -94,9 +94,9 @@ test(const unsigned int fes_size, const unsigned int max_difference) // display number of cells for each FE index std::vector count(fes.size(), 0); - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - count[cell->active_fe_index()]++; + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + count[cell->active_fe_index()]++; Utilities::MPI::sum(count, tria.get_communicator(), count); deallog << "fe count:" << count << std::endl; diff --git a/tests/mpi/limit_p_level_difference_02.cc b/tests/mpi/limit_p_level_difference_02.cc index 6858278a6f..6ff3b5aee3 100644 --- a/tests/mpi/limit_p_level_difference_02.cc +++ b/tests/mpi/limit_p_level_difference_02.cc @@ -100,9 +100,9 @@ test(const unsigned int fes_size, const unsigned int max_difference) // display number of cells for each FE index std::vector count(fes.size(), 0); - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - count[cell->active_fe_index()]++; + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + count[cell->active_fe_index()]++; Utilities::MPI::sum(count, tria.get_communicator(), count); deallog << "cycle:" << i << ", fe count:" << count << std::endl; } diff --git a/tests/mpi/p_refinement_and_coarsening.cc b/tests/mpi/p_refinement_and_coarsening.cc index 21be8ccb7b..41f87d49f8 100644 --- a/tests/mpi/p_refinement_and_coarsening.cc +++ b/tests/mpi/p_refinement_and_coarsening.cc @@ -52,15 +52,15 @@ test() // set future_fe_indices unsigned int future_feidx = 0; - for (const auto &cell : dh.active_cell_iterators()) - if (cell->is_locally_owned()) - { - // check if cell is initialized correctly - Assert(cell->active_fe_index() == 0, ExcInternalError()); - - cell->set_future_fe_index(future_feidx); - future_feidx = ((future_feidx + 1) < fe.size()) ? future_feidx + 1 : 0; - } + for (const auto &cell : + dh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + // check if cell is initialized correctly + Assert(cell->active_fe_index() == 0, ExcInternalError()); + + cell->set_future_fe_index(future_feidx); + future_feidx = ((future_feidx + 1) < fe.size()) ? future_feidx + 1 : 0; + } dh.distribute_dofs(fe); tria.execute_coarsening_and_refinement(); diff --git a/tests/mpi/refine_and_coarsen_fixed_fraction_08.cc b/tests/mpi/refine_and_coarsen_fixed_fraction_08.cc index 37974b3167..465e54dac8 100644 --- a/tests/mpi/refine_and_coarsen_fixed_fraction_08.cc +++ b/tests/mpi/refine_and_coarsen_fixed_fraction_08.cc @@ -65,15 +65,15 @@ test() Vector indicator(tria.n_active_cells()); // assign each cell a globally unique cellid - for (const auto &cell : tria.active_cell_iterators()) - if (cell->is_locally_owned()) - { - const std::string cellid = cell->id().to_string(); - const unsigned int fine_cellid = - std::stoul(cellid.substr(cellid.find(':') + 1, std::string::npos)); + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + const std::string cellid = cell->id().to_string(); + const unsigned int fine_cellid = + std::stoul(cellid.substr(cellid.find(':') + 1, std::string::npos)); - indicator[cell->active_cell_index()] = fine_cellid + 1; - } + indicator[cell->active_cell_index()] = fine_cellid + 1; + } deallog << "l1-norm: "; parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction( diff --git a/tests/mpi/refine_and_coarsen_fixed_fraction_09.cc b/tests/mpi/refine_and_coarsen_fixed_fraction_09.cc index cd10057e4c..79d3369072 100644 --- a/tests/mpi/refine_and_coarsen_fixed_fraction_09.cc +++ b/tests/mpi/refine_and_coarsen_fixed_fraction_09.cc @@ -65,16 +65,16 @@ test() Vector indicator(tria.n_active_cells()); // assign each cell a globally unique cellid - for (const auto &cell : tria.active_cell_iterators()) - if (cell->is_locally_owned()) - { - const std::string cellid = cell->id().to_string(); - const unsigned int fine_cellid = - std::stoul(cellid.substr(cellid.find(':') + 1, std::string::npos)); + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + const std::string cellid = cell->id().to_string(); + const unsigned int fine_cellid = + std::stoul(cellid.substr(cellid.find(':') + 1, std::string::npos)); - Testing::srand(fine_cellid); - indicator[cell->active_cell_index()] = random_value(); - } + Testing::srand(fine_cellid); + indicator[cell->active_cell_index()] = random_value(); + } deallog << "l1-norm: "; parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction( diff --git a/tests/mpi/renumber_cuthill_mckee_03.cc b/tests/mpi/renumber_cuthill_mckee_03.cc index 33ae828745..b27b1264a6 100644 --- a/tests/mpi/renumber_cuthill_mckee_03.cc +++ b/tests/mpi/renumber_cuthill_mckee_03.cc @@ -62,20 +62,20 @@ test() dofh.distribute_dofs(fe); deallog << "Before:" << std::endl; - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - { - deallog << "locally owned cell: " << cell << std::endl; - deallog << " dof indices: "; - - std::vector cell_dofs( - cell->get_fe().dofs_per_cell); - cell->get_dof_indices(cell_dofs); - - for (auto i : cell_dofs) - deallog << i << ' '; - deallog << std::endl; - } + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + deallog << "locally owned cell: " << cell << std::endl; + deallog << " dof indices: "; + + std::vector cell_dofs( + cell->get_fe().dofs_per_cell); + cell->get_dof_indices(cell_dofs); + + for (auto i : cell_dofs) + deallog << i << ' '; + deallog << std::endl; + } std::set starting_indices; for (const auto &cell : diff --git a/tests/multigrid-global-coarsening/mg_transfer_a_03.cc b/tests/multigrid-global-coarsening/mg_transfer_a_03.cc index b971aa2a3c..78e0464561 100644 --- a/tests/multigrid-global-coarsening/mg_transfer_a_03.cc +++ b/tests/multigrid-global-coarsening/mg_transfer_a_03.cc @@ -80,9 +80,9 @@ do_test(const FiniteElement &fe_fine, const FiniteElement &fe_coarse) // setup dof-handlers DoFHandler dof_handler_fine(tria_fine); - for (const auto &cell : dof_handler_fine.active_cell_iterators()) - if (cell->is_locally_owned()) - cell->set_active_fe_index(0); + for (const auto &cell : dof_handler_fine.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + cell->set_active_fe_index(0); dof_handler_fine.distribute_dofs(fe); DoFHandler dof_handler_coarse(tria_coarse); diff --git a/tests/sharedtria/cell_data_transfer_01.cc b/tests/sharedtria/cell_data_transfer_01.cc index 24a1679ada..be3674274e 100644 --- a/tests/sharedtria/cell_data_transfer_01.cc +++ b/tests/sharedtria/cell_data_transfer_01.cc @@ -63,14 +63,14 @@ test() // ----- gather ----- // store parent id of all locally owned cells Vector cell_ids_pre(tria.n_active_cells()); - for (const auto &cell : tria.active_cell_iterators()) - if (cell->is_locally_owned()) - { - const std::string parent_cellid = cell->parent()->id().to_string(); - const unsigned int parent_coarse_cell_id = - static_cast(std::stoul(parent_cellid)); - cell_ids_pre(cell->active_cell_index()) = parent_coarse_cell_id; - } + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + const std::string parent_cellid = cell->parent()->id().to_string(); + const unsigned int parent_coarse_cell_id = + static_cast(std::stoul(parent_cellid)); + cell_ids_pre(cell->active_cell_index()) = parent_coarse_cell_id; + } // distribute local vector (as presented in step-18) PETScWrappers::MPI::Vector distributed_cell_ids_pre( diff --git a/tests/sharedtria/cell_data_transfer_02.cc b/tests/sharedtria/cell_data_transfer_02.cc index 67089821a0..7948875f40 100644 --- a/tests/sharedtria/cell_data_transfer_02.cc +++ b/tests/sharedtria/cell_data_transfer_02.cc @@ -63,14 +63,14 @@ test() // ----- gather ----- // store parent id of all locally owned cells Vector cell_ids_pre(tria.n_active_cells()); - for (const auto &cell : tria.active_cell_iterators()) - if (cell->is_locally_owned()) - { - const std::string parent_cellid = cell->parent()->id().to_string(); - const unsigned int parent_coarse_cell_id = - static_cast(std::stoul(parent_cellid)); - cell_ids_pre(cell->active_cell_index()) = parent_coarse_cell_id; - } + for (const auto &cell : + tria.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + { + const std::string parent_cellid = cell->parent()->id().to_string(); + const unsigned int parent_coarse_cell_id = + static_cast(std::stoul(parent_cellid)); + cell_ids_pre(cell->active_cell_index()) = parent_coarse_cell_id; + } // distribute local vector (as presented in step-18) PETScWrappers::MPI::Vector distributed_cell_ids_pre( diff --git a/tests/sharedtria/limit_p_level_difference_01.cc b/tests/sharedtria/limit_p_level_difference_01.cc index cf363d4a4e..c84625d138 100644 --- a/tests/sharedtria/limit_p_level_difference_01.cc +++ b/tests/sharedtria/limit_p_level_difference_01.cc @@ -91,9 +91,9 @@ test(const unsigned int fes_size, // display number of cells for each FE index std::vector count(fes.size(), 0); - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - count[cell->active_fe_index()]++; + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + count[cell->active_fe_index()]++; Utilities::MPI::sum(count, tria.get_communicator(), count); deallog << "fe count:" << count << std::endl; diff --git a/tests/sharedtria/limit_p_level_difference_02.cc b/tests/sharedtria/limit_p_level_difference_02.cc index 1221ccf5bf..d2ec2029dd 100644 --- a/tests/sharedtria/limit_p_level_difference_02.cc +++ b/tests/sharedtria/limit_p_level_difference_02.cc @@ -96,9 +96,9 @@ test(const unsigned int fes_size, // display number of cells for each FE index std::vector count(fes.size(), 0); - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - count[cell->active_fe_index()]++; + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + count[cell->active_fe_index()]++; Utilities::MPI::sum(count, tria.get_communicator(), count); deallog << "cycle:" << i << ", fe count:" << count << std::endl; } diff --git a/tests/sharedtria/limit_p_level_difference_04.cc b/tests/sharedtria/limit_p_level_difference_04.cc index 5a935b020b..5dc01deae7 100644 --- a/tests/sharedtria/limit_p_level_difference_04.cc +++ b/tests/sharedtria/limit_p_level_difference_04.cc @@ -96,10 +96,10 @@ test(const unsigned int max_difference, const bool allow_artificial_cells) Assert(fe_indices_changed, ExcInternalError()); deallog << "future FE indices before adaptation:" << std::endl; - for (const auto &cell : dofh.active_cell_iterators()) - if (cell->is_locally_owned()) - deallog << " " << cell->id().to_string() << " " << cell->future_fe_index() - << std::endl; + for (const auto &cell : + dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell()) + deallog << " " << cell->id().to_string() << " " << cell->future_fe_index() + << std::endl; tria.execute_coarsening_and_refinement(); diff --git a/tests/simplex/step-18.cc b/tests/simplex/step-18.cc index 99d377637d..efe3253dac 100644 --- a/tests/simplex/step-18.cc +++ b/tests/simplex/step-18.cc @@ -951,12 +951,12 @@ namespace Step18 quadrature_formula.size()); unsigned int history_index = 0; - for (auto &cell : triangulation.active_cell_iterators()) - if (cell->is_locally_owned()) - { - cell->set_user_pointer(&quadrature_point_history[history_index]); - history_index += quadrature_formula.size(); - } + for (auto &cell : triangulation.active_cell_iterators() | + IteratorFilters::LocallyOwnedCell()) + { + cell->set_user_pointer(&quadrature_point_history[history_index]); + history_index += quadrature_formula.size(); + } Assert(history_index == quadrature_point_history.size(), ExcInternalError());