From: Daniel Arndt Date: Sat, 5 Jan 2019 23:37:48 +0000 (+0100) Subject: Fix include/matrix_free X-Git-Tag: v9.1.0-rc1~425^2~4 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=881cce36a9fc3921fc1d973462ab3cf2862ad09e;p=dealii.git Fix include/matrix_free --- diff --git a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h index ed1b18b657..d1ae2f745a 100644 --- a/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h +++ b/include/deal.II/matrix_free/cuda_hanging_nodes_internal.h @@ -224,7 +224,7 @@ namespace CUDAWrappers line_to_inactive_cells(n_raw_lines); // First add active and inactive cells to their lines: - for (auto cell : dof_handler.cell_iterators()) + for (const auto &cell : dof_handler.cell_iterators()) { for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line) @@ -261,7 +261,7 @@ namespace CUDAWrappers child->line(neighbor_line)->index(); // Now add all active cells - for (auto cl : line_to_cells[line_idx]) + for (const auto cl : line_to_cells[line_idx]) line_to_cells[child_line_idx].push_back(cl); } } @@ -514,7 +514,7 @@ namespace CUDAWrappers { // For each cell which share that edge const unsigned int line = cell->line(local_line)->index(); - for (auto edge_neighbor : line_to_cells[line]) + for (const auto edge_neighbor : line_to_cells[line]) { // If one of them is coarser than us const cell_iterator neighbor_cell = edge_neighbor.first; diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 2987edf8cd..153e6a8236 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -365,10 +365,8 @@ namespace internal vector_partitioner->local_range().first); const std::size_t n_ghosts = ghost_dofs.size(); #ifdef DEBUG - for (std::vector::iterator dof = dof_indices.begin(); - dof != dof_indices.end(); - ++dof) - AssertIndexRange(*dof, n_owned + n_ghosts); + for (const auto dof_index : dof_indices) + AssertIndexRange(dof_index, n_owned + n_ghosts); #endif const unsigned int n_components = start_components.back(); @@ -631,8 +629,8 @@ namespace internal (vector_partitioner->local_range().second - vector_partitioner->local_range().first) + vector_partitioner->ghost_indices().n_elements(); - for (std::size_t i = 0; i < dof_indices.size(); ++i) - AssertIndexRange(dof_indices[i], index_range); + for (const auto dof_index : dof_indices) + AssertIndexRange(dof_index, index_range); // sanity check 2: for the constraint indicators, the first index should // be smaller than the number of indices in the row, and the second @@ -1201,9 +1199,9 @@ namespace internal } } // ensure that all indices are touched at least during the last round - for (auto &i : touched_by) - if (i == numbers::invalid_unsigned_int) - i = task_info.cell_partition_data.back() - 1; + for (auto &index : touched_by) + if (index == numbers::invalid_unsigned_int) + index = task_info.cell_partition_data.back() - 1; vector_zero_range_list_index.resize( 1 + task_info @@ -1221,7 +1219,7 @@ namespace internal auto it = chunk_must_zero_vector.find(chunk); if (it != chunk_must_zero_vector.end()) { - for (unsigned int i : it->second) + for (const auto i : it->second) vector_zero_range_list.push_back(i); vector_zero_range_list_index[chunk + 1] = vector_zero_range_list.size(); @@ -1519,13 +1517,13 @@ namespace internal } AssertIndexRange(counter, local_size + 1); - for (std::size_t i = 0; i < renumbering.size(); ++i) - if (renumbering[i] == numbers::invalid_dof_index) - renumbering[i] = counter++; + for (types::global_dof_index &dof_index : renumbering) + if (dof_index == numbers::invalid_dof_index) + dof_index = counter++; // transform indices to global index space - for (std::size_t i = 0; i < renumbering.size(); ++i) - renumbering[i] = vector_partitioner->local_to_global(renumbering[i]); + for (types::global_dof_index &dof_index : renumbering) + dof_index = vector_partitioner->local_to_global(dof_index); AssertDimension(counter, renumbering.size()); } diff --git a/include/deal.II/matrix_free/face_setup_internal.h b/include/deal.II/matrix_free/face_setup_internal.h index a1d90a7e96..5ff3798770 100644 --- a/include/deal.II/matrix_free/face_setup_internal.h +++ b/include/deal.II/matrix_free/face_setup_internal.h @@ -174,10 +174,10 @@ namespace internal # ifdef DEBUG // safety check if (use_active_cells) - for (unsigned int i = 0; i < cell_levels.size(); ++i) + for (const auto &cell_level : cell_levels) { typename dealii::Triangulation::cell_iterator dcell( - &triangulation, cell_levels[i].first, cell_levels[i].second); + &triangulation, cell_level.first, cell_level.second); Assert(dcell->active(), ExcInternalError()); } # endif @@ -273,19 +273,16 @@ namespace internal // sort the cell ids related to each neighboring processor. This // algorithm is symmetric so every processor combination should // arrive here and no deadlock should be possible - for (std::map::iterator it = - inner_faces_at_proc_boundary.begin(); - it != inner_faces_at_proc_boundary.end(); - ++it) + for (auto &inner_face : inner_faces_at_proc_boundary) { - Assert(it->first != my_domain, + Assert(inner_face.first != my_domain, ExcInternalError("Should not send info to myself")); - std::sort(it->second.shared_faces.begin(), - it->second.shared_faces.end()); - it->second.shared_faces.erase( - std::unique(it->second.shared_faces.begin(), - it->second.shared_faces.end()), - it->second.shared_faces.end()); + std::sort(inner_face.second.shared_faces.begin(), + inner_face.second.shared_faces.end()); + inner_face.second.shared_faces.erase( + std::unique(inner_face.second.shared_faces.begin(), + inner_face.second.shared_faces.end()), + inner_face.second.shared_faces.end()); // safety check: both involved processors should see the same list // because the pattern of ghosting is symmetric. We test this by @@ -298,46 +295,46 @@ namespace internal comm = ptria->get_communicator(); MPI_Status status; - unsigned int mysize = it->second.shared_faces.size(); + unsigned int mysize = inner_face.second.shared_faces.size(); unsigned int othersize = numbers::invalid_unsigned_int; MPI_Sendrecv(&mysize, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 600 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 600 + it->first, + inner_face.first, + 600 + inner_face.first, comm, &status); AssertDimension(mysize, othersize); - mysize = it->second.n_hanging_faces_smaller_subdomain; + mysize = inner_face.second.n_hanging_faces_smaller_subdomain; MPI_Sendrecv(&mysize, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 700 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 700 + it->first, + inner_face.first, + 700 + inner_face.first, comm, &status); AssertDimension(mysize, othersize); - mysize = it->second.n_hanging_faces_larger_subdomain; + mysize = inner_face.second.n_hanging_faces_larger_subdomain; MPI_Sendrecv(&mysize, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 800 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 800 + it->first, + inner_face.first, + 800 + inner_face.first, comm, &status); AssertDimension(mysize, othersize); @@ -359,11 +356,11 @@ namespace internal // contained in `shared_faces`, whereas we need a copy that we // sort differently for the other way around. std::vector> other_range( - it->second.shared_faces.size()); + inner_face.second.shared_faces.size()); for (unsigned int i = 0; i < other_range.size(); ++i) other_range[i] = - std::make_tuple(it->second.shared_faces[i].second, - it->second.shared_faces[i].first, + std::make_tuple(inner_face.second.shared_faces[i].second, + inner_face.second.shared_faces[i].first, i); std::sort(other_range.begin(), other_range.end()); @@ -375,15 +372,16 @@ namespace internal // be made in an arbitrary way. unsigned int n_faces_lower_proc = 0, n_faces_higher_proc = 0; std::vector assignment(other_range.size(), 0); - if (it->second.shared_faces.size() > 0) + if (inner_face.second.shared_faces.size() > 0) { // identify faces that go to the processor with the higher // rank unsigned int count = 0; - for (unsigned int i = 1; i < it->second.shared_faces.size(); + for (unsigned int i = 1; + i < inner_face.second.shared_faces.size(); ++i) - if (it->second.shared_faces[i].first == - it->second.shared_faces[i - 1 - count].first) + if (inner_face.second.shared_faces[i].first == + inner_face.second.shared_faces[i - 1 - count].first) ++count; else { @@ -413,11 +411,11 @@ namespace internal { for (unsigned int k = 0; k <= count; ++k) { - Assert(it->second + Assert(inner_face.second .shared_faces[std::get<2>( other_range[i - 1])] .second == - it->second + inner_face.second .shared_faces[std::get<2>( other_range[i - 1 - k])] .second, @@ -444,13 +442,13 @@ namespace internal // faces that always get assigned to one side, and the faces we // have already assigned due to the criterion above n_faces_lower_proc += - it->second.n_hanging_faces_smaller_subdomain; + inner_face.second.n_hanging_faces_smaller_subdomain; n_faces_higher_proc += - it->second.n_hanging_faces_larger_subdomain; + inner_face.second.n_hanging_faces_larger_subdomain; const unsigned int n_total_faces_at_proc_boundary = - (it->second.shared_faces.size() + - it->second.n_hanging_faces_smaller_subdomain + - it->second.n_hanging_faces_larger_subdomain); + (inner_face.second.shared_faces.size() + + inner_face.second.n_hanging_faces_smaller_subdomain + + inner_face.second.n_hanging_faces_larger_subdomain); unsigned int split_index = n_total_faces_at_proc_boundary / 2; if (split_index < n_faces_lower_proc) split_index = 0; @@ -466,39 +464,39 @@ namespace internal MPI_Sendrecv(&split_index, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 900 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 900 + it->first, + inner_face.first, + 900 + inner_face.first, comm, &status); AssertDimension(split_index, othersize); MPI_Sendrecv(&n_faces_lower_proc, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 1000 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 1000 + it->first, + inner_face.first, + 1000 + inner_face.first, comm, &status); AssertDimension(n_faces_lower_proc, othersize); MPI_Sendrecv(&n_faces_higher_proc, 1, MPI_UNSIGNED, - it->first, + inner_face.first, 1100 + my_domain, &othersize, 1, MPI_UNSIGNED, - it->first, - 1100 + it->first, + inner_face.first, + 1100 + inner_face.first, comm, &status); AssertDimension(n_faces_higher_proc, othersize); @@ -509,11 +507,13 @@ namespace internal owned_faces_higher; for (unsigned int i = 0; i < assignment.size(); ++i) if (assignment[i] < 0) - owned_faces_lower.push_back(it->second.shared_faces[i]); + owned_faces_lower.push_back( + inner_face.second.shared_faces[i]); else if (assignment[i] > 0) - owned_faces_higher.push_back(it->second.shared_faces[i]); + owned_faces_higher.push_back( + inner_face.second.shared_faces[i]); AssertIndexRange(split_index, - it->second.shared_faces.size() + 1 - + inner_face.second.shared_faces.size() + 1 - owned_faces_lower.size() - owned_faces_higher.size()); @@ -521,13 +521,15 @@ namespace internal for (; i < assignment.size() && c < split_index; ++i) if (assignment[i] == 0) { - owned_faces_lower.push_back(it->second.shared_faces[i]); + owned_faces_lower.push_back( + inner_face.second.shared_faces[i]); ++c; } for (; i < assignment.size(); ++i) if (assignment[i] == 0) { - owned_faces_higher.push_back(it->second.shared_faces[i]); + owned_faces_higher.push_back( + inner_face.second.shared_faces[i]); } # ifdef DEBUG @@ -541,20 +543,20 @@ namespace internal owned_faces_higher.end()); std::sort(check_faces.begin(), check_faces.end()); AssertDimension(check_faces.size(), - it->second.shared_faces.size()); + inner_face.second.shared_faces.size()); for (unsigned int i = 0; i < check_faces.size(); ++i) - Assert(check_faces[i] == it->second.shared_faces[i], + Assert(check_faces[i] == inner_face.second.shared_faces[i], ExcInternalError()); # endif // now only set half of the faces as the ones to keep - if (my_domain < it->first) - it->second.shared_faces.swap(owned_faces_lower); + if (my_domain < inner_face.first) + inner_face.second.shared_faces.swap(owned_faces_lower); else - it->second.shared_faces.swap(owned_faces_higher); + inner_face.second.shared_faces.swap(owned_faces_higher); - std::sort(it->second.shared_faces.begin(), - it->second.shared_faces.end()); + std::sort(inner_face.second.shared_faces.begin(), + inner_face.second.shared_faces.end()); } } @@ -717,11 +719,8 @@ namespace internal // step 2: append the ghost cells at the end of the locally owned // cells - for (std::set>::iterator it = - ghost_cells.begin(); - it != ghost_cells.end(); - ++it) - cell_levels.push_back(*it); + for (const auto &ghost_cell : ghost_cells) + cell_levels.push_back(ghost_cell); // step 3: clean up the cells close to the boundary std::sort(cells_close_to_boundary.begin(), cells_close_to_boundary.end()); @@ -1108,13 +1107,13 @@ namespace internal // to the face type for (unsigned int face = face_start; face < face_end; ++face) { - for (unsigned int type = 0; type < faces_type.size(); ++type) + for (auto &face_type : faces_type) { // Compare current face with first face of type type - if (compare_faces_for_vectorization( - faces_in[face], faces_in[faces_type[type][0]])) + if (compare_faces_for_vectorization(faces_in[face], + faces_in[face_type[0]])) { - faces_type[type].push_back(face); + face_type.push_back(face); goto face_found; } } @@ -1127,17 +1126,16 @@ namespace internal std::set, FaceComparator> new_faces; - for (unsigned int type = 0; type < faces_type.size(); ++type) + for (const auto &face_type : faces_type) { macro_face.interior_face_no = - faces_in[faces_type[type][0]].interior_face_no; + faces_in[face_type[0]].interior_face_no; macro_face.exterior_face_no = - faces_in[faces_type[type][0]].exterior_face_no; - macro_face.subface_index = - faces_in[faces_type[type][0]].subface_index; + faces_in[face_type[0]].exterior_face_no; + macro_face.subface_index = faces_in[face_type[0]].subface_index; macro_face.face_orientation = - faces_in[faces_type[type][0]].face_orientation; - unsigned int no_faces = faces_type[type].size(); + faces_in[face_type[0]].face_orientation; + unsigned int no_faces = face_type.size(); std::vector touched(no_faces, 0); // do two passes through the data. The first is to identify @@ -1146,7 +1144,7 @@ namespace internal // all the rest unsigned int n_vectorized = 0; for (unsigned int f = 0; f < no_faces; ++f) - if (faces_in[faces_type[type][f]].cells_interior[0] % + if (faces_in[face_type[f]].cells_interior[0] % vectorization_width == 0) { @@ -1155,23 +1153,20 @@ namespace internal is_contiguous = false; else for (unsigned int v = 1; v < vectorization_width; ++v) - if (faces_in[faces_type[type][f + v]] - .cells_interior[0] != - faces_in[faces_type[type][f]].cells_interior[0] + v) + if (faces_in[face_type[f + v]].cells_interior[0] != + faces_in[face_type[f]].cells_interior[0] + v) is_contiguous = false; if (is_contiguous) { AssertIndexRange(f, - faces_type[type].size() - + face_type.size() - vectorization_width + 1); for (unsigned int v = 0; v < vectorization_width; ++v) { macro_face.cells_interior[v] = - faces_in[faces_type[type][f + v]] - .cells_interior[0]; + faces_in[face_type[f + v]].cells_interior[0]; macro_face.cells_exterior[v] = - faces_in[faces_type[type][f + v]] - .cells_exterior[0]; + faces_in[face_type[f + v]].cells_exterior[0]; touched[f + v] = 1; } new_faces.insert(macro_face); @@ -1186,12 +1181,12 @@ namespace internal if (touched[f] == 0) untouched.push_back(f); unsigned int v = 0; - for (auto f : untouched) + for (const auto f : untouched) { macro_face.cells_interior[v] = - faces_in[faces_type[type][f]].cells_interior[0]; + faces_in[face_type[f]].cells_interior[0]; macro_face.cells_exterior[v] = - faces_in[faces_type[type][f]].cells_exterior[0]; + faces_in[face_type[f]].cells_exterior[0]; ++v; if (v == vectorization_width) { @@ -1220,8 +1215,7 @@ namespace internal // postpone to the next partition std::vector untreated(v); for (unsigned int f = 0; f < v; ++f) - untreated[f] = - faces_type[type][*(untouched.end() - 1 - f)]; + untreated[f] = face_type[*(untouched.end() - 1 - f)]; new_faces_type.push_back(untreated); } } @@ -1238,8 +1232,8 @@ namespace internal # ifdef DEBUG // final safety checks - for (unsigned int i = 0; i < faces_type.size(); ++i) - AssertDimension(faces_type[i].size(), 0U); + for (const auto &face_type : faces_type) + AssertDimension(face_type.size(), 0U); AssertDimension(faces_out.size(), face_partition_data.back()); unsigned int nfaces = 0; @@ -1252,9 +1246,9 @@ namespace internal AssertDimension(nfaces, faces_in.size()); std::vector> in_faces, out_faces; - for (unsigned int i = 0; i < faces_in.size(); ++i) - in_faces.emplace_back(faces_in[i].cells_interior[0], - faces_in[i].cells_exterior[0]); + for (const auto &face_in : faces_in) + in_faces.emplace_back(face_in.cells_interior[0], + face_in.cells_exterior[0]); for (unsigned int i = face_partition_data[0]; i < face_partition_data.back(); ++i) diff --git a/include/deal.II/matrix_free/mapping_info.templates.h b/include/deal.II/matrix_free/mapping_info.templates.h index e64815cd18..8da87d9807 100644 --- a/include/deal.II/matrix_free/mapping_info.templates.h +++ b/include/deal.II/matrix_free/mapping_info.templates.h @@ -1133,7 +1133,7 @@ namespace internal // other tasks can already start copying the non-constant data) if (my_q == 0) { - for (auto &it : data_cells_local[0].second.data) + for (const auto &it : data_cells_local[0].second.data) { Tensor<2, dim, VectorizedArray> jac; for (unsigned int d = 0; d < dim; ++d) @@ -1809,7 +1809,7 @@ namespace internal { const Number jac_size = ExtractCellHelper::get_jacobian_size(tria); - for (auto &it : data_faces_local[0].second.data) + for (const auto &it : data_faces_local[0].second.data) { // JxW values; invert previously applied scaling for (unsigned int v = 0; diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 4d097a575d..d790a74b8c 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -1038,8 +1038,8 @@ MatrixFree::initialize_indices( .dofs_per_cell); cell->neighbor_or_periodic_neighbor(f) ->get_mg_dof_indices(dof_indices); - for (unsigned int i = 0; i < dof_indices.size(); ++i) - dof_info[no].ghost_dofs.push_back(dof_indices[i]); + for (const auto dof_index : dof_indices) + dof_info[no].ghost_dofs.push_back(dof_index); } } dof_info[no].assign_ghosts(cells_with_ghosts); @@ -1138,9 +1138,8 @@ MatrixFree::initialize_indices( counter = 0; for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) { - for (unsigned int jj = 0; jj < renumbering_fe_index[j].size(); - jj++) - renumbering[counter++] = renumbering_fe_index[j][jj]; + for (const auto jj : renumbering_fe_index[j]) + renumbering[counter++] = jj; irregular_cells[renumbering_fe_index[j].size() / vectorization_length + n_macro_cells_before] = @@ -1164,9 +1163,8 @@ MatrixFree::initialize_indices( counter = start_nonboundary * vectorization_length; for (unsigned int j = 0; j < dof_info[0].max_fe_index; j++) { - for (unsigned int jj = 0; jj < renumbering_fe_index[j].size(); - jj++) - renumbering[counter++] = renumbering_fe_index[j][jj]; + for (const auto jj : renumbering_fe_index[j]) + renumbering[counter++] = jj; irregular_cells[renumbering_fe_index[j].size() / vectorization_length + n_macro_cells_before] = @@ -1302,12 +1300,12 @@ MatrixFree::initialize_indices( constraint_pool_data.reserve(length); constraint_pool_row_index.reserve(constraint_values.constraints.size() + 1); constraint_pool_row_index.resize(1, 0); - for (unsigned int i = 0; i < constraints.size(); ++i) + for (const auto &constraint : constraints) { - Assert(constraints[i] != nullptr, ExcInternalError()); + Assert(constraint != nullptr, ExcInternalError()); constraint_pool_data.insert(constraint_pool_data.end(), - constraints[i]->begin(), - constraints[i]->end()); + constraint->begin(), + constraint->end()); constraint_pool_row_index.push_back(constraint_pool_data.size()); } @@ -1337,8 +1335,9 @@ MatrixFree::initialize_indices( task_info.face_partition_data.size()) hard_vectorization_boundary[task_info.partition_row_index[2]] = true; else - for (unsigned int i = 0; i < hard_vectorization_boundary.size(); ++i) - hard_vectorization_boundary[i] = true; + std::fill(hard_vectorization_boundary.begin(), + hard_vectorization_boundary.end(), + true); internal::MatrixFreeFunctions::collect_faces_vectorization( face_setup.inner_faces, diff --git a/include/deal.II/matrix_free/operators.h b/include/deal.II/matrix_free/operators.h index f09d56a138..eb35e1ebe9 100644 --- a/include/deal.II/matrix_free/operators.h +++ b/include/deal.II/matrix_free/operators.h @@ -1288,10 +1288,10 @@ namespace MatrixFreeOperators edge_constrained_values[j].resize(interface_indices.size()); const IndexSet &locally_owned = data->get_dof_handler(selected_rows[j]).locally_owned_mg_dofs(level); - for (unsigned int i = 0; i < interface_indices.size(); ++i) - if (locally_owned.is_element(interface_indices[i])) + for (const auto interface_index : interface_indices) + if (locally_owned.is_element(interface_index)) edge_constrained_indices[j].push_back( - locally_owned.index_within_set(interface_indices[i])); + locally_owned.index_within_set(interface_index)); have_interface_matrices |= Utilities::MPI::max( static_cast(edge_constrained_indices[j].size()), @@ -1309,8 +1309,8 @@ namespace MatrixFreeOperators { const std::vector &constrained_dofs = data->get_constrained_dofs(selected_rows[j]); - for (unsigned int i = 0; i < constrained_dofs.size(); ++i) - BlockHelper::subblock(dst, j).local_element(constrained_dofs[i]) = 1.; + for (const auto constrained_dof : constrained_dofs) + BlockHelper::subblock(dst, j).local_element(constrained_dof) = 1.; for (unsigned int i = 0; i < edge_constrained_indices[j].size(); ++i) BlockHelper::subblock(dst, j).local_element( edge_constrained_indices[j][i]) = 1.; @@ -1442,9 +1442,9 @@ namespace MatrixFreeOperators { const std::vector &constrained_dofs = data->get_constrained_dofs(selected_rows[j]); - for (unsigned int i = 0; i < constrained_dofs.size(); ++i) - BlockHelper::subblock(dst, j).local_element(constrained_dofs[i]) += - BlockHelper::subblock(src, j).local_element(constrained_dofs[i]); + for (const auto constrained_dof : constrained_dofs) + BlockHelper::subblock(dst, j).local_element(constrained_dof) += + BlockHelper::subblock(src, j).local_element(constrained_dof); } // reset edge constrained values, multiply by unit matrix and add into