From bcd0c3ad97f575df2fab3947825eed92b7c76c48 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Thu, 12 Apr 2018 13:37:27 +0200 Subject: [PATCH] Add methods to compute the face data in MappingInfo. --- include/deal.II/matrix_free/mapping_info.h | 30 +- .../matrix_free/mapping_info.templates.h | 780 +++++++++++++++++- .../matrix_free/matrix_free.templates.h | 16 +- 3 files changed, 820 insertions(+), 6 deletions(-) diff --git a/include/deal.II/matrix_free/mapping_info.h b/include/deal.II/matrix_free/mapping_info.h index ed4f4ad8d1..37bc22c860 100644 --- a/include/deal.II/matrix_free/mapping_info.h +++ b/include/deal.II/matrix_free/mapping_info.h @@ -25,6 +25,7 @@ #include #include #include +#include #include @@ -289,10 +290,14 @@ namespace internal */ void initialize (const dealii::Triangulation &tria, const std::vector > &cells, + const FaceInfo::n_array_elements> &faces, const std::vector &active_fe_index, const Mapping &mapping, const std::vector > &quad, - const UpdateFlags update_flags_cells); + const UpdateFlags update_flags_cells, + const UpdateFlags update_flags_boundary_faces, + const UpdateFlags update_flags_inner_faces, + const UpdateFlags update_flags_faces_by_cells); /** * Return the type of a given cell as detected during initialization. @@ -361,6 +366,29 @@ namespace internal const std::vector > &quad, const UpdateFlags update_flags_cells); + /** + * Computes the information in the given faces, called within + * initialize. + */ + void initialize_faces (const dealii::Triangulation &tria, + const std::vector > &cells, + const std::vector::n_array_elements> > &faces, + const Mapping &mapping, + const std::vector > &quad, + const UpdateFlags update_flags_boundary_faces, + const UpdateFlags update_flags_inner_faces); + + /** + * Computes the information in the given faces, called within + * initialize. + */ + void initialize_faces_by_cells + (const dealii::Triangulation &tria, + const std::vector > &cells, + const Mapping &mapping, + const std::vector > &quad, + const UpdateFlags update_flags_faces_by_cells); + /** * Helper function to determine which update flags must be set in the * internal functions to initialize all data as requested by the user. diff --git a/include/deal.II/matrix_free/mapping_info.templates.h b/include/deal.II/matrix_free/mapping_info.templates.h index 8bd1f58e5f..796e2e9c87 100644 --- a/include/deal.II/matrix_free/mapping_info.templates.h +++ b/include/deal.II/matrix_free/mapping_info.templates.h @@ -261,16 +261,23 @@ namespace internal MappingInfo::initialize (const dealii::Triangulation &tria, const std::vector > &cells, + const FaceInfo::n_array_elements> &face_info, const std::vector &active_fe_index, const Mapping &mapping, const std::vector > &quad, - const UpdateFlags update_flags_cells) + const UpdateFlags update_flags_cells, + const UpdateFlags update_flags_boundary_faces, + const UpdateFlags update_flags_inner_faces, + const UpdateFlags update_flags_faces_by_cells) { clear(); // Could call these functions in parallel, but not useful because the // work inside is nicely split up already initialize_cells(tria, cells, active_fe_index, mapping, quad, update_flags_cells); + initialize_faces(tria, cells, face_info.faces, mapping, quad, + update_flags_boundary_faces, update_flags_inner_faces); + initialize_faces_by_cells(tria, cells, mapping, quad, update_flags_faces_by_cells); } @@ -1069,6 +1076,777 @@ namespace internal + /* ------------------------- initialization of faces ------------------- */ + + // Anonymous namespace with implementation of extraction of values on cell + // range + namespace + { + template + struct CompressedFaceData + { + // Constructor. As a scaling factor for the FPArrayComparator, we + // select the inverse of the Jacobian (not the Jacobian as in the + // CompressedCellData) and add another factor of 512 to account for + // some roundoff effects. + CompressedFaceData(const Number jacobian_size) + : + data(FPArrayComparator(512./jacobian_size)), + jacobian_size(jacobian_size) + {} + + // Store the Jacobians on both sides of a face (2*(dim*dim) entries), + // the normal vector (dim entries), and the Jacobian determinant on + // the face (first tensor) for each entry in the vectorized array + // (inner tensor). We cannot choose a VectorizedArray type directly + // because std::map does not provide the necessary alignment upon + // memory allocation. + std::map::n_array_elements,Number> > + , unsigned int, FPArrayComparator > data; + + // Store the scaling factor + const Number jacobian_size; + }; + + + + // We always put the derivative normal to the face in the last slot for + // simpler unit cell gradient computations. This function reorders the + // indices of the Jacobian appropriately. + template + unsigned int + reorder_face_derivative_indices(const unsigned int face_no, + const unsigned int index) + { + Assert(index < dim, ExcInternalError()); + if (dim == 3) + { + unsigned int table [3][3] = {{1, 2, 0}, {2, 0, 1}, {0, 1, 2}}; + return table[face_no/2][index]; + } + else if (dim == 2) + { + unsigned int table [2][2] = {{1, 0}, {0, 1}}; + return table[face_no/2][index]; + } + else if (dim == 1) + return 0; + else + Assert(false, ExcNotImplemented("Not possible in dim=" + std::to_string(dim))); + } + + + + template + void + initialize_face_range + (const std::pair face_range, + const dealii::Triangulation &tria, + const std::vector > &cells, + const std::vector::n_array_elements> > &faces, + const Mapping &mapping, + const UpdateFlags update_flags_boundary, + const UpdateFlags update_flags_inner, + MappingInfo &mapping_info, + std::pair >, + CompressedFaceData > &data) + { + FE_Nothing dummy_fe; + + std::vector > > > + fe_face_values_container(mapping_info.face_data.size()); + for (unsigned int my_q=0; my_q > > > + fe_boundary_face_values_container(mapping_info.face_data.size()); + for (unsigned int my_q=0; my_q > > > + fe_subface_values_container(mapping_info.face_data.size()); + for (unsigned int my_q=0; my_q face_data (get_jacobian_size(tria)); + + const unsigned int end_face = + std::min(std::size_t(face_range.second), faces.size()); + for (unsigned int face=face_range.first; face &quadrature = + mapping_info.face_data[my_q].descriptor[0].quadrature; + + const bool is_boundary_face = + faces[face].cells_exterior[0] == numbers::invalid_unsigned_int; + + if (is_boundary_face && + fe_boundary_face_values_container[my_q][0].get() == 0) + fe_boundary_face_values_container[my_q][0].reset + (new FEFaceValues (mapping, dummy_fe, quadrature, + update_flags_boundary)); + else if (fe_face_values_container[my_q][0].get() == 0) + fe_face_values_container[my_q][0].reset + (new FEFaceValues (mapping, dummy_fe, quadrature, + update_flags_inner)); + + FEFaceValues &fe_face_values = is_boundary_face ? + *fe_boundary_face_values_container[my_q][0] : + *fe_face_values_container[my_q][0]; + const unsigned int n_q_points = fe_face_values.n_quadrature_points; + face_data.resize(n_q_points); + + bool normal_is_similar = true; + bool JxW_is_similar = true; + bool cell_is_cartesian = true; + for (unsigned int v=0; v::n_array_elements; ++v) + { + Tensor<2,dim> jacobian_0; + double compare_norm_jac = 1.; + if (faces[face].cells_interior[v] != numbers::invalid_unsigned_int) + { + typename dealii::Triangulation::cell_iterator + cell_it (&tria, cells[faces[face].cells_interior[v]].first, + cells[faces[face].cells_interior[v]].second); + + fe_face_values.reinit(cell_it, faces[face].interior_face_no); + + for (unsigned int q=0; q 2048. * std::numeric_limits::epsilon() * + fe_face_values.JxW(0)*quadrature.weight(q)) + JxW_is_similar = false; + face_data.JxW_values[q][v] = fe_face_values.JxW(q); + + DerivativeForm<1,dim,dim> inv_jac = fe_face_values.jacobian(q).covariant_form(); + Tensor<1,dim> normal = fe_face_values.normal_vector(q); + + // Filter out very small values in normal. No need + // to re-normalize because these values cannot enter + // the norm significantly: Total size is 1 but 1e-13 + // squared is 1e-26. + for (unsigned int d=0; d::epsilon()) + normal[d] = 0.; + + if (q==0) + { + jacobian_0 = inv_jac; + compare_norm_jac = jacobian_0.norm(); + } + for (unsigned int d=0; d 2048. * std::numeric_limits::epsilon() * + compare_norm_jac) + JxW_is_similar = false; + const unsigned int ee= + reorder_face_derivative_indices(faces[face].interior_face_no,e); + face_data.general_jac[q][d][e][v] = inv_jac[d][ee]; + } + + for (unsigned int d=0; d 1024. * + std::numeric_limits::epsilon()) + normal_is_similar = false; + } + if (std::abs(std::abs(normal[faces[face].interior_face_no/2])- + 1.) > 1024. * std::numeric_limits::epsilon()) + cell_is_cartesian = false; + for (unsigned int d=0; d + 2048*std::numeric_limits::epsilon() * + compare_norm_jac) + cell_is_cartesian = false; + + if (fe_face_values.get_update_flags() & update_quadrature_points) + for (unsigned int d=0; d::cell_iterator + cell_it (&tria, cells[faces[face].cells_exterior[v]].first, + cells[faces[face].cells_exterior[v]].second); + + const FEValuesBase *actual_fe_face_values = 0; + if (faces[face].subface_index >= + GeometryInfo::max_children_per_cell) + { + fe_face_values.reinit(cell_it, faces[face].exterior_face_no); + actual_fe_face_values = &fe_face_values; + } + else + { + if (fe_subface_values_container[my_q][0].get() == 0) + fe_subface_values_container[my_q][0].reset + (new FESubfaceValues (mapping, dummy_fe, quadrature, + update_flags_inner)); + fe_subface_values_container[my_q][0]-> + reinit(cell_it, faces[face].exterior_face_no, + faces[face].subface_index); + actual_fe_face_values = fe_subface_values_container[my_q][0].get(); + } + for (unsigned int q=0; q inv_jac = actual_fe_face_values->jacobian(q).covariant_form(); + if (q==0) + { + jacobian_0 = inv_jac; + compare_norm_jac = jacobian_0.norm(); + } + for (unsigned int d=0; d 2048. * std::numeric_limits::epsilon() * + compare_norm_jac) + JxW_is_similar = false; + const unsigned int ee= + reorder_face_derivative_indices(faces[face].exterior_face_no,e); + face_data.general_jac[n_q_points+q][d][e][v] = inv_jac[d][ee]; + } + for (unsigned int d=0; d + 2048*std::numeric_limits::epsilon() * + compare_norm_jac) + cell_is_cartesian = false; + } + } + // Fill up with 'known' values + else if (is_boundary_face == false) + { + Assert(faces[face].cells_exterior[0] != numbers::invalid_unsigned_int, + ExcInternalError()); + for (unsigned int q=0; q::n_array_elements,Number> VEC_ARRAY; + unsigned int insert_position = data.first[my_q].JxW_values.size(); + + // Fill in JxW values, apply compression + if (mapping_info.face_type[face] <= affine) + { + if (my_q == 0) + { + // find out if we already had the same JxW values before + std::pair,unsigned int> new_entry; + new_entry.second = data.second.data.size(); + for (unsigned int v=0; v::n_array_elements; ++v) + new_entry.first[2*dim*dim+dim][v] = face_data.JxW_values[0][v]/quadrature.weight(0)/ + Utilities::fixed_power(face_data.jac_size); + + new_entry.second = data.second.data.size(); + for (unsigned int d=0; d::n_array_elements; ++v) + new_entry.first[d*dim+e][v] = face_data.general_jac[0][d][e][v]; + if (is_boundary_face == false) + for (unsigned int d=0; d::n_array_elements; ++v) + new_entry.first[dim*dim+d*dim+e][v] = face_data.general_jac[n_q_points][d][e][v]; + // we need to add the normal vector here because we + // store both the inverse jacobian and the normal vector + // times the jacobian; of course, there will be + // different values in their product for normal vectors + // oriented in different ways (the memory saving is + // still significant); we need to divide by the jacobian + // size to get the right scaling + for (unsigned int d=0; d::n_array_elements; ++v) + new_entry.first[2*dim*dim+d][v] = face_data.normal_vectors[0][d][v] / face_data.jac_size; + + insert_position = data.second.data.insert(new_entry).first->second; + } + else + insert_position = data.first[0].data_index_offsets[face]; + } + data.first[my_q].data_index_offsets.push_back(insert_position); + if (mapping_info.face_type[face] > affine) + { + for (unsigned int q=0; q + void + compute_normal_times_jacobian (const unsigned int first_face, + const unsigned int last_face, + const std::vector &face_type, + const std::vector::n_array_elements> > &faces, + MappingInfoStorage &data_faces) + { + for (unsigned int face = first_face; face < last_face; ++face) + { + const bool is_boundary_face = faces[face].cells_exterior[0] == numbers::invalid_unsigned_int; + const unsigned int n_q_points_work = + face_type[face] > affine ? data_faces.descriptor[0].n_q_points : 1; + const unsigned int offset = data_faces.data_index_offsets[face]; + + for (unsigned int q=0; q + void + MappingInfo::initialize_faces + (const dealii::Triangulation &tria, + const std::vector > &cells, + const std::vector::n_array_elements> > &faces, + const Mapping &mapping, + const std::vector > &quad, + const UpdateFlags update_flags_boundary_faces, + const UpdateFlags update_flags_inner_faces) + { + face_type.resize(faces.size(), general); + face_data.resize (quad.size()); + + // We currently always set the same flags on both inner and boundary + // faces to the same value. At some point, we might want to separate the + // two. + UpdateFlags update_flags_compute_boundary = + ((update_flags_inner_faces | update_flags_boundary_faces) & update_quadrature_points ? + update_quadrature_points : + update_default) + | update_normal_vectors | update_JxW_values | update_jacobians; + UpdateFlags update_flags_compute_inner = + ((update_flags_inner_faces | update_flags_boundary_faces) & update_quadrature_points ? + update_quadrature_points : + update_default) + | update_normal_vectors | update_JxW_values | update_jacobians; + UpdateFlags update_flags_common = update_flags_inner_faces | update_flags_boundary_faces; + + for (unsigned int my_q=0; my_q >, + CompressedFaceData > > data_faces_local; + // Reserve enough space to avoid re-allocation (which would destroy the + // references passed to the tasks!) + data_faces_local.reserve(MultithreadInfo::n_threads()); + + { + Threads::TaskGroup<> tasks; + std::pair face_range(0U, work_per_chunk); + while (face_range.first < faces.size()) + { + data_faces_local.push_back + (std::make_pair (std::vector >(quad.size()), + CompressedFaceData(get_jacobian_size(tria)))); + tasks += Threads::new_task(&initialize_face_range, + face_range, tria, cells, faces, mapping, + update_flags_compute_boundary, + update_flags_compute_inner, + *this, data_faces_local.back()); + face_range.first = face_range.second; + face_range.second += work_per_chunk; + } + + tasks.join_all(); + } + + // Fill in each thread's constant data (normals, JxW, normals x + // Jacobian) into the data of the zeroth chunk in serial + std::vector > indices_compressed(data_faces_local.size()); + for (unsigned int i=0; i > shift(data_faces_local.size()); + shift[0][0] = n_constant_data; + shift[0][1] = 0; + for (unsigned int i=1; i tasks; + for (unsigned int i=0; i, + work_per_chunk * i, shift[i], + indices_compressed[i], face_type, + data_faces_local[i].first[my_q], + face_data[my_q]); + + // fill the constant data fields (in parallel to the loop above) + if (my_q == 0) + { + const Number jac_size = get_jacobian_size(tria); + for (auto &it : data_faces_local[0].second.data) + { + // JxW values; invert previously applied scaling + for (unsigned int v=0; v::n_array_elements; ++v) + face_data[my_q].JxW_values[it.second][v] + = it.first[2*dim*dim+dim][v]* + Utilities::fixed_power(jac_size); + + // inverse Jacobians + for (unsigned int i=0; i<2; ++i) + for (unsigned int d=0; d::n_array_elements; ++v) + face_data[my_q].jacobians[i][it.second][d][e][v] + = it.first[i*dim*dim+d*dim+e][v]; + + // normal vectors; invert previously applied scaling + for (unsigned int d=0; d::n_array_elements; ++v) + face_data[my_q].normal_vectors[it.second][d][v] + = it.first[2*dim*dim+d][v] * jac_size; + } + } + else + { + for (unsigned int i=0; i, + work_per_chunk * i, + std::min(work_per_chunk * (i+1), + (unsigned int)faces.size()), + face_type, faces, face_data[my_q]); + tasks.join_all(); + } + } + + + + template + void + MappingInfo::initialize_faces_by_cells + (const dealii::Triangulation &tria, + const std::vector > &cells, + const Mapping &mapping, + const std::vector > &quad, + const UpdateFlags update_flags_faces_by_cells) + { + if (update_flags_faces_by_cells == update_default) + return; + + face_data_by_cells.resize(quad.size()); + const unsigned int n_quads = quad.size(); + const unsigned int vectorization_width = + VectorizedArray::n_array_elements; + UpdateFlags update_flags = + (update_flags_faces_by_cells & update_quadrature_points ? + update_quadrature_points : + update_default) + | update_normal_vectors | update_JxW_values | update_jacobians; + + for (unsigned int my_q=0; my_q::faces_per_cell); + if (update_flags & update_quadrature_points) + face_data_by_cells[my_q].quadrature_point_offsets. + resize(cell_type.size()*GeometryInfo::faces_per_cell); + std::size_t storage_length = 0; + for (unsigned int i=0; i::faces_per_cell; ++face) + { + if (cell_type[i] <= affine) + { + face_data_by_cells[my_q].data_index_offsets + [i*GeometryInfo::faces_per_cell+face] = storage_length; + ++storage_length; + } + else + { + face_data_by_cells[my_q].data_index_offsets + [i*GeometryInfo::faces_per_cell+face] = storage_length; + storage_length += face_data_by_cells[my_q].descriptor[0].n_q_points; + } + if (update_flags & update_quadrature_points) + face_data_by_cells[my_q].quadrature_point_offsets + [i*GeometryInfo::faces_per_cell+face] = + (i*GeometryInfo::faces_per_cell+face)* + face_data_by_cells[my_q].descriptor[0].n_q_points; + } + face_data_by_cells[my_q].JxW_values.resize_fast(storage_length * + GeometryInfo::faces_per_cell); + face_data_by_cells[my_q].jacobians[0].resize_fast(storage_length * + GeometryInfo::faces_per_cell); + if (update_flags & update_normal_vectors) + face_data_by_cells[my_q].normal_vectors.resize_fast(storage_length * + GeometryInfo::faces_per_cell); + if (update_flags & update_normal_vectors && + update_flags & update_jacobians) + face_data_by_cells[my_q].normals_times_jacobians[0].resize_fast(storage_length * + GeometryInfo::faces_per_cell); + if (update_flags & update_jacobian_grads) + face_data_by_cells[my_q].jacobian_gradients[0].resize_fast(storage_length * + GeometryInfo::faces_per_cell); + + if (update_flags & update_quadrature_points) + face_data_by_cells[my_q].quadrature_points. + resize_fast(cell_type.size()*GeometryInfo::faces_per_cell* + face_data_by_cells[my_q].descriptor[0].n_q_points); + } + + FE_Nothing dummy_fe; + // currently no hp-indices implemented + const unsigned int fe_index = 0; + std::vector > > > + fe_face_values (face_data_by_cells.size()); + for (unsigned int i=0; i::faces_per_cell; ++face) + { + if (fe_face_values[my_q][fe_index].get() == nullptr) + fe_face_values[my_q][fe_index].reset + (new dealii::FEFaceValues (mapping, dummy_fe, face_data_by_cells[my_q]. + descriptor[fe_index].quadrature, + update_flags)); + dealii::FEFaceValues &fe_val = *fe_face_values[my_q][fe_index]; + const unsigned int offset = + face_data_by_cells[my_q].data_index_offsets + [cell*GeometryInfo::faces_per_cell+face]; + + for (unsigned int v=0; v::cell_iterator + cell_it (&tria, cells[cell*vectorization_width+v].first, + cells[cell*vectorization_width+v].second); + fe_val.reinit(cell_it, face); + + // copy data for affine data type + if (cell_type[cell] <= affine) + { + if (update_flags & update_JxW_values) + face_data_by_cells[my_q].JxW_values[offset][v] + = fe_val.JxW(0) / face_data_by_cells[my_q]. + descriptor[fe_index].quadrature.weight(0); + if (update_flags & update_jacobians) + { + DerivativeForm<1,dim,dim> inv_jac = fe_val.jacobian(0).covariant_form(); + for (unsigned int d=0; d(face,e); + face_data_by_cells[my_q].jacobians[0][offset][d][e][v] + = inv_jac[d][ee]; + } + } + if (update_flags & update_jacobian_grads) + { + Assert(false, ExcNotImplemented()); + } + if (update_flags & update_normal_vectors) + for (unsigned int d=0; d inv_jac = fe_val.jacobian(q).covariant_form(); + for (unsigned int d=0; d(face,e); + face_data_by_cells[my_q].jacobians[0][offset+q][d][e][v] + = inv_jac[d][ee]; + } + } + if (update_flags & update_jacobian_grads) + { + Assert(false, ExcNotImplemented()); + } + if (update_flags & update_normal_vectors) + for (unsigned int q=0; q::faces_per_cell+face]+q][d][v] = fe_val.quadrature_point(q)[d]; + } + if (update_flags & update_normal_vectors && + update_flags & update_jacobians) + for (unsigned int q=0; q<(cell_type[cell]<=affine ? 1 : + fe_val.n_quadrature_points); ++q) + face_data_by_cells[my_q].normals_times_jacobians[0][offset+q] + = face_data_by_cells[my_q].normal_vectors[offset+q] * + face_data_by_cells[my_q].jacobians[0][offset+q]; + } + } + + + template std::size_t MappingInfo::memory_consumption() const { diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 390f0143c7..475386de79 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -31,6 +31,7 @@ #include #include #include +#include DEAL_II_NAMESPACE_OPEN @@ -188,9 +189,13 @@ internal_reinit(const Mapping &mapping, // general case? if (additional_data.initialize_mapping == true) { + std::vector dummy; mapping_info.initialize (dof_handler[0]->get_triangulation(), cell_level_index, - dof_info[0].cell_active_fe_index, mapping, quad, - additional_data.mapping_update_flags); + internal::MatrixFreeFunctions:: FaceInfo + ::n_array_elements>(), + dummy, mapping, + quad, additional_data.mapping_update_flags, + update_default, update_default, update_default); mapping_is_initialized = true; } @@ -316,8 +321,11 @@ internal_reinit(const Mapping &mapping, if (additional_data.initialize_mapping == true) { mapping_info.initialize (dof_handler[0]->get_triangulation(), cell_level_index, - dof_info[0].cell_active_fe_index, mapping, quad, - additional_data.mapping_update_flags); + internal::MatrixFreeFunctions::FaceInfo + ::n_array_elements>(), + dof_info[0].cell_active_fe_index, mapping, + quad, additional_data.mapping_update_flags, + update_default, update_default, update_default); mapping_is_initialized = true; } -- 2.39.5