# include <deal.II/arborx/access_traits.h>
# include <ArborX_LinearBVH.hpp>
+
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# include <Kokkos_Core.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
DEAL_II_NAMESPACE_OPEN
# include <deal.II/arborx/access_traits.h>
# include <ArborX_DistributedTree.hpp>
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# include <Kokkos_Core.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
DEAL_II_NAMESPACE_OPEN
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_kokkos_h
+#define dealii_kokkos_h
+
+#include <deal.II/base/config.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ /**
+ * Records if Kokkos has been initialized by deal.II. The value stored is only
+ * meaningful after ensure_kokkos_initialized() has been called.
+ */
+ extern bool dealii_initialized_kokkos;
+
+ /**
+ * Makes sure that Kokkos is initialized. Sets dealii_initialized_kokkos.
+ */
+ void
+ ensure_kokkos_initialized();
+} // namespace internal
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
#include <deal.II/base/config.h>
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
+#include <Kokkos_Core.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
+
DEAL_II_NAMESPACE_OPEN
/**
* Structure describing Host memory space.
*/
struct Host
- {};
-
+ {
+ using kokkos_space = ::Kokkos::HostSpace;
+ };
+ /**
+ * Structure describing the default memory space. If Kokkos was configure with
+ * a GPU backend, the default memory space is the one corresponding to that
+ * backend. Otherwise, the default memory space is the the same as the Host
+ * memory space.
+ */
+ struct Default
+ {
+ using kokkos_space = ::Kokkos::DefaultExecutionSpace::memory_space;
+ };
/**
* Structure describing CUDA memory space.
*/
- struct CUDA
- {};
+ // FIXME Only enable if CUDA is enabled in deal.II.
+ using CUDA = Default;
} // namespace MemorySpace
#include <deal.II/base/cuda.h>
#include <deal.II/base/exceptions.h>
+#include <deal.II/base/kokkos.h>
+
+DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
+#include <Kokkos_Core.hpp>
+DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
#include <functional>
#include <memory>
namespace MemorySpace
{
/**
- * Data structure
+ * Structure which stores data on the host or the device depending on the
+ * template parameter @p MemorySpace. Valid choices are MemorySpace::Host,
+ * MemorySpace::Device, and MemorySpace::CUDA (if CUDA was enabled in
+ * deal.II). The data is copied into the structure which then owns the data
+ * and will release the memory when the destructor is called.
*/
- template <typename Number, typename MemorySpace>
+ template <typename T, typename MemorySpace>
struct MemorySpaceData
{
- MemorySpaceData()
- {
- static_assert(std::is_same<MemorySpace, Host>::value ||
- std::is_same<MemorySpace, CUDA>::value,
- "MemorySpace should be Host or CUDA");
- }
+ MemorySpaceData();
/**
- * Copy the active data (values for Host and values_dev for CUDA) to @p begin.
+ * Copy the class member values to @p begin.
* If the data is on the device it is moved to the host.
*/
void
- copy_to(Number *begin, std::size_t n_elements)
- {
- (void)begin;
- (void)n_elements;
- }
+ copy_to(T *begin, const std::size_t n_elements);
/**
- * Copy the data in @p begin to the active data of the structure (values for
- * Host and values_dev for CUDA). The pointer @p begin must be on the host.
+ * Copy the data in @p begin to the class member values.
+ * The pointer @p begin must be on the host.
*/
void
- copy_from(Number *begin, std::size_t n_elements)
- {
- (void)begin;
- (void)n_elements;
- }
+ copy_from(const T *begin, const std::size_t n_elements);
+
+ /**
+ * Kokkos View owning a host buffer used for MPI communication.
+ */
+ // FIXME Should we move this somewhere else?
+ Kokkos::View<T *, Kokkos::HostSpace> values_host_buffer;
/**
- * Pointer to data on the host.
+ * Kokkos View owning the data on the device (unless @p values_sm_ptr is used).
*/
- std::unique_ptr<Number[], std::function<void(Number *)>> values;
+ Kokkos::View<T *, typename MemorySpace::kokkos_space> values;
/**
- * Pointer to data on the device.
+ * Pointer to data on the host. The pointer points to the same data as
+ * @p values when using shared memory and the memory space is
+ * MemorySpace::Host. Otherwise it is not set.
*/
- std::unique_ptr<Number[]> values_dev;
+ // This a shared pointer pointer so that MemorySpaceData can be copied and
+ // MemorySpaceData::values can be used in Kokkos::parallel_for. This
+ // pointer owns the data when using shared memory with MPI. In this case,
+ // the Kokkos::View @p values is non-owning. When shared memory with MPI is
+ // not used, the @p values_sm_ptr is unused.
+ std::shared_ptr<T> values_sm_ptr;
/**
* Pointers to the data of the processes sharing the same memory.
*/
- std::vector<ArrayView<const Number>> values_sm;
+ std::vector<ArrayView<const T>> values_sm;
};
/**
* Swap function similar to std::swap.
*/
- template <typename Number, typename MemorySpace>
+ template <typename T, typename MemorySpace>
inline void
- swap(MemorySpaceData<Number, MemorySpace> &,
- MemorySpaceData<Number, MemorySpace> &)
- {
- static_assert(std::is_same<MemorySpace, Host>::value ||
- std::is_same<MemorySpace, CUDA>::value,
- "MemorySpace should be Host or CUDA");
- }
-
-#ifndef DOXYGEN
+ swap(MemorySpaceData<T, MemorySpace> &u, MemorySpaceData<T, MemorySpace> &v);
- template <typename Number>
- struct MemorySpaceData<Number, Host>
- {
- MemorySpaceData()
- : values(nullptr, &std::free)
- {}
-
- void
- copy_to(Number *begin, std::size_t n_elements)
- {
- std::copy(values.get(), values.get() + n_elements, begin);
- }
-
- void
- copy_from(Number *begin, std::size_t n_elements)
- {
- std::copy(begin, begin + n_elements, values.get());
- }
-
- std::unique_ptr<Number[], std::function<void(Number *)>> values;
- // This is not used but it allows to simplify the code until we start using
- // CUDA-aware MPI.
- std::unique_ptr<Number[]> values_dev;
+#ifndef DOXYGEN
- std::vector<ArrayView<const Number>> values_sm;
- };
+ template <typename T, typename MemorySpace>
+ MemorySpaceData<T, MemorySpace>::MemorySpaceData()
+ : values_host_buffer(
+ (dealii::internal::ensure_kokkos_initialized(),
+ Kokkos::View<T *, Kokkos::HostSpace>("host buffer", 0)))
+ , values(Kokkos::View<T *, typename MemorySpace::kokkos_space>(
+ "memoryspace data",
+ 0))
+ {}
- template <typename Number>
- inline void
- swap(MemorySpaceData<Number, Host> &u, MemorySpaceData<Number, Host> &v)
+ template <typename T, typename MemorySpace>
+ void
+ MemorySpaceData<T, MemorySpace>::copy_to(T * begin,
+ const std::size_t n_elements)
{
- std::swap(u.values, v.values);
+ Assert(n_elements <= values.extent(0),
+ ExcMessage(
+ "n_elements is greater than the size of MemorySpaceData."));
+ using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space;
+ Kokkos::
+ View<T *, Kokkos::HostSpace, Kokkos::MemoryTraits<Kokkos::Unmanaged>>
+ begin_view(begin, n_elements);
+ Kokkos::deep_copy(
+ ExecutionSpace{},
+ begin_view,
+ Kokkos::subview(values, Kokkos::make_pair(std::size_t(0), n_elements)));
+ ExecutionSpace{}.fence();
}
-# ifdef DEAL_II_COMPILER_CUDA_AWARE
-
- template <typename Number>
- struct MemorySpaceData<Number, CUDA>
+ template <typename T, typename MemorySpace>
+ void
+ MemorySpaceData<T, MemorySpace>::copy_from(const T * begin,
+ const std::size_t n_elements)
{
- MemorySpaceData()
- : values(nullptr, &std::free)
- , values_dev(nullptr, Utilities::CUDA::delete_device_data<Number>)
- {}
-
- void
- copy_to(Number *begin, std::size_t n_elements)
- {
- const cudaError_t cuda_error_code =
- cudaMemcpy(begin,
- values_dev.get(),
- n_elements * sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error_code);
- }
-
- void
- copy_from(Number *begin, std::size_t n_elements)
- {
- const cudaError_t cuda_error_code =
- cudaMemcpy(values_dev.get(),
- begin,
- n_elements * sizeof(Number),
- cudaMemcpyHostToDevice);
- AssertCuda(cuda_error_code);
- }
-
- std::unique_ptr<Number[], std::function<void(Number *)>> values;
- std::unique_ptr<Number[], void (*)(Number *)> values_dev;
-
- /**
- * This is currently not used.
- */
- std::vector<ArrayView<const Number>> values_sm;
- };
+ Assert(n_elements <= values.extent(0),
+ ExcMessage(
+ "n_elements is greater than the size of MemorySpaceData."));
+ using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space;
+ Kokkos::View<const T *,
+ Kokkos::HostSpace,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>
+ begin_view(begin, n_elements);
+ Kokkos::deep_copy(
+ ExecutionSpace{},
+ Kokkos::subview(values, Kokkos::make_pair(std::size_t(0), n_elements)),
+ begin_view);
+ ExecutionSpace{}.fence();
+ }
- template <typename Number>
+ /**
+ * Swap function similar to std::swap.
+ */
+ template <typename T, typename MemorySpace>
inline void
- swap(MemorySpaceData<Number, CUDA> &u, MemorySpaceData<Number, CUDA> &v)
+ swap(MemorySpaceData<T, MemorySpace> &u, MemorySpaceData<T, MemorySpace> &v)
{
+ std::swap(u.values_host_buffer, v.values_host_buffer);
std::swap(u.values, v.values);
- std::swap(u.values_dev, v.values_dev);
+ std::swap(u.values_sm_ptr, v.values_sm_ptr);
}
-# endif
-
#endif
} // namespace MemorySpace
#ifndef DOXYGEN
- namespace internal
- {
- template <typename Number, typename MemorySpace>
- struct Policy
- {
- static inline typename Vector<Number, MemorySpace>::iterator
- begin(::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
- {
- return nullptr;
- }
-
- static inline typename Vector<Number, MemorySpace>::const_iterator
- begin(
- const ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
- {
- return nullptr;
- }
-
- static inline Number *
- get_values(
- ::dealii::MemorySpace::MemorySpaceData<Number, MemorySpace> &)
- {
- return nullptr;
- }
- };
-
-
-
- template <typename Number>
- struct Policy<Number, ::dealii::MemorySpace::Host>
- {
- static inline
- typename Vector<Number, ::dealii::MemorySpace::Host>::iterator
- begin(::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
- {
- return data.values.get();
- }
-
- static inline
- typename Vector<Number, ::dealii::MemorySpace::Host>::const_iterator
- begin(const ::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
- {
- return data.values.get();
- }
-
- static inline Number *
- get_values(::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
- {
- return data.values.get();
- }
- };
-
-
-
- template <typename Number>
- struct Policy<Number, ::dealii::MemorySpace::CUDA>
- {
- static inline
- typename Vector<Number, ::dealii::MemorySpace::CUDA>::iterator
- begin(::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
- {
- return data.values_dev.get();
- }
-
- static inline
- typename Vector<Number, ::dealii::MemorySpace::CUDA>::const_iterator
- begin(const ::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
- {
- return data.values_dev.get();
- }
-
- static inline Number *
- get_values(::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
- {
- return data.values_dev.get();
- }
- };
- } // namespace internal
-
-
template <typename Number, typename MemorySpace>
inline bool
Vector<Number, MemorySpace>::has_ghost_elements() const
inline typename Vector<Number, MemorySpace>::iterator
Vector<Number, MemorySpace>::begin()
{
- return internal::Policy<Number, MemorySpace>::begin(data);
+ return data.values.data();
}
inline typename Vector<Number, MemorySpace>::const_iterator
Vector<Number, MemorySpace>::begin() const
{
- return internal::Policy<Number, MemorySpace>::begin(data);
+ return data.values.data();
}
inline typename Vector<Number, MemorySpace>::iterator
Vector<Number, MemorySpace>::end()
{
- return internal::Policy<Number, MemorySpace>::begin(data) +
- partitioner->locally_owned_size();
+ return data.values.data() + partitioner->locally_owned_size();
}
inline typename Vector<Number, MemorySpace>::const_iterator
Vector<Number, MemorySpace>::end() const
{
- return internal::Policy<Number, MemorySpace>::begin(data) +
- partitioner->locally_owned_size();
+ return data.values.data() + partitioner->locally_owned_size();
}
inline Number *
Vector<Number, MemorySpace>::get_values() const
{
- return internal::Policy<Number, MemorySpace>::get_values(data);
+ return data.values.data();
}
{
if (comm_shared == MPI_COMM_SELF)
{
- Number *new_val;
- Utilities::System::posix_memalign(
- reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * new_alloc_size);
- data.values = {new_val, [](Number *data) { std::free(data); }};
+ Kokkos::resize(data.values, new_alloc_size);
allocated_size = new_alloc_size;
data.values_sm = {
- ArrayView<const Number>(data.values.get(), new_alloc_size)};
+ ArrayView<const Number>(data.values.data(), new_alloc_size)};
}
else
{
data.values_sm[i] =
ArrayView<const Number>(others[i], new_alloc_sizes[i]);
- data.values = {ptr_aligned, [mpi_window](Number *) mutable {
- // note: we are creating here a copy of the
- // window other approaches led to segmentation
- // faults
- const auto ierr = MPI_Win_free(&mpi_window);
- AssertThrowMPI(ierr);
- }};
+ data.values =
+ Kokkos::View<Number *,
+ Kokkos::HostSpace,
+ Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
+ ptr_aligned, new_alloc_size);
+
+ // Kokkos will not free the memory because the memory is
+ // unmanaged. Instead we use a shared pointer to take care of
+ // that.
+ data.values_sm_ptr = {ptr_aligned,
+ [mpi_window](Number *) mutable {
+ // note: we are creating here a copy of
+ // the window other approaches led to
+ // segmentation faults
+ const auto ierr =
+ MPI_Win_free(&mpi_window);
+ AssertThrowMPI(ierr);
+ }};
+
#else
Assert(false, ExcInternalError());
#endif
if (new_alloc_size > allocated_size)
{
- Assert(((allocated_size > 0 && data.values_dev != nullptr) ||
- data.values_dev == nullptr),
+ Assert(((allocated_size > 0 && data.values.size() != 0) ||
+ data.values.size() == 0),
ExcInternalError());
- Number *new_val_dev;
- Utilities::CUDA::malloc(new_val_dev, new_alloc_size);
- data.values_dev.reset(new_val_dev);
+ Kokkos::resize(data.values, new_alloc_size);
allocated_size = new_alloc_size;
}
else if (new_alloc_size == 0)
{
- data.values_dev.reset();
+ Kokkos::resize(data.values, 0);
allocated_size = 0;
}
}
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated<
Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
indices_dev,
- data.values_dev.get(),
+ data.values.data(),
tmp_vector.begin(),
tmp_n_elements);
else
::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<
Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
indices_dev,
- data.values_dev.get(),
+ data.values.data(),
tmp_vector.begin(),
tmp_n_elements);
Number,
::dealii::LinearAlgebra::CUDAWrappers::kernel::LInfty<Number>>
<<<dim3(n_blocks, 1), dim3(::dealii::CUDAWrappers::block_size)>>>(
- result_device, data.values_dev.get(), size);
+ result_device, data.values.data(), size);
// Copy the result back to the host
error_code = cudaMemcpy(&result,
resize_val(size, comm_sm);
// delete previous content in import data
- import_data.values.reset();
- import_data.values_dev.reset();
+ Kokkos::resize(import_data.values_host_buffer, 0);
+ Kokkos::resize(import_data.values, 0);
// set partitioner to serial version
partitioner = std::make_shared<Utilities::MPI::Partitioner>(size);
resize_val(local_size + ghost_size, comm_sm);
// delete previous content in import data
- import_data.values.reset();
- import_data.values_dev.reset();
+ Kokkos::resize(import_data.values_host_buffer, 0);
+ Kokkos::resize(import_data.values, 0);
// create partitioner
partitioner = std::make_shared<Utilities::MPI::Partitioner>(local_size,
// is only used as temporary storage for compress() and
// update_ghost_values, and we might have vectors where we never
// call these methods and hence do not need to have the storage.
- import_data.values.reset();
- import_data.values_dev.reset();
+ Kokkos::resize(import_data.values_host_buffer, 0);
+ Kokkos::resize(import_data.values, 0);
thread_loop_partitioner = v.thread_loop_partitioner;
}
// is only used as temporary storage for compress() and
// update_ghost_values, and we might have vectors where we never
// call these methods and hence do not need to have the storage.
- import_data.values.reset();
- import_data.values_dev.reset();
+ Kokkos::resize(import_data.values_host_buffer, 0);
+ Kokkos::resize(import_data.values, 0);
vector_is_ghosted = false;
}
void
Vector<Number, MemorySpaceType>::zero_out_ghost_values() const
{
- if (data.values != nullptr)
- std::fill_n(data.values.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices(),
- Number());
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
- if (data.values_dev != nullptr)
+ if (data.values.size() != 0)
{
- const cudaError_t cuda_error_code =
- cudaMemset(data.values_dev.get() +
- partitioner->locally_owned_size(),
- 0,
- partitioner->n_ghost_indices() * sizeof(Number));
- AssertCuda(cuda_error_code);
- }
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+ {
+ const cudaError_t cuda_error_code =
+ cudaMemset(data.values.data() +
+ partitioner->locally_owned_size(),
+ 0,
+ partitioner->n_ghost_indices() * sizeof(Number));
+ AssertCuda(cuda_error_code);
+ }
+ else
#endif
+ {
+ std::fill_n(data.values.data() +
+ partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices(),
+ Number());
+ }
+ }
vector_is_ghosted = false;
}
if (partitioner->n_import_indices() > 0)
{
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
{
- if (import_data.values_dev == nullptr)
- import_data.values_dev.reset(
- Utilities::CUDA::allocate_device_data<Number>(
- partitioner->n_import_indices()));
+ if (import_data.values_host_buffer.size() == 0)
+ Kokkos::resize(import_data.values_host_buffer,
+ partitioner->n_import_indices());
}
else
# endif
std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
"This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
# endif
- if (import_data.values == nullptr)
- {
- Number *new_val;
- Utilities::System::posix_memalign(
- reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * partitioner->n_import_indices());
- import_data.values.reset(new_val);
- }
+ if (import_data.values.size() == 0)
+ Kokkos::resize(import_data.values,
+ partitioner->n_import_indices());
}
}
// device. We use values to store the elements because the function
// uses a view of the array and thus we need the data on the host to
// outlive the scope of the function.
- Number *new_val;
- Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * allocated_size);
-
- data.values = {new_val, [](Number *data) { std::free(data); }};
-
- cudaError_t cuda_error_code =
- cudaMemcpy(data.values.get(),
- data.values_dev.get(),
- allocated_size * sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error_code);
- }
-# endif
-
-# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
- {
+ data.values_host_buffer =
+ Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{},
+ data.values);
partitioner->import_from_ghosted_array_start(
operation,
communication_channel,
- ArrayView<Number, MemorySpace::CUDA>(
- data.values_dev.get() + partitioner->locally_owned_size(),
+ ArrayView<Number, MemorySpace::Host>(
+ data.values_host_buffer.data() +
+ partitioner->locally_owned_size(),
partitioner->n_ghost_indices()),
- ArrayView<Number, MemorySpace::CUDA>(
- import_data.values_dev.get(), partitioner->n_import_indices()),
+ ArrayView<Number, MemorySpace::Host>(
+ import_data.values_host_buffer.data(),
+ partitioner->n_import_indices()),
compress_requests);
}
else
partitioner->import_from_ghosted_array_start(
operation,
communication_channel,
- ArrayView<Number, MemorySpace::Host>(
- data.values.get() + partitioner->locally_owned_size(),
+ ArrayView<Number, MemorySpaceType>(
+ data.values.data() + partitioner->locally_owned_size(),
partitioner->n_ghost_indices()),
- ArrayView<Number, MemorySpace::Host>(
- import_data.values.get(), partitioner->n_import_indices()),
+ ArrayView<Number, MemorySpaceType>(import_data.values.data(),
+ partitioner->n_import_indices()),
compress_requests);
}
#else
// make this function thread safe
std::lock_guard<std::mutex> lock(mutex);
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
Assert(partitioner->n_import_indices() == 0 ||
- import_data.values_dev != nullptr,
- ExcNotInitialized());
- partitioner
- ->import_from_ghosted_array_finish<Number, MemorySpace::CUDA>(
- operation,
- ArrayView<const Number, MemorySpace::CUDA>(
- import_data.values_dev.get(), partitioner->n_import_indices()),
- ArrayView<Number, MemorySpace::CUDA>(
- data.values_dev.get(), partitioner->locally_owned_size()),
- ArrayView<Number, MemorySpace::CUDA>(
- data.values_dev.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices()),
- compress_requests);
- }
- else
-# endif
- {
- Assert(partitioner->n_import_indices() == 0 ||
- import_data.values != nullptr,
+ import_data.values_host_buffer.size() != 0,
ExcNotInitialized());
partitioner
->import_from_ghosted_array_finish<Number, MemorySpace::Host>(
operation,
ArrayView<const Number, MemorySpace::Host>(
- import_data.values.get(), partitioner->n_import_indices()),
+ import_data.values_host_buffer.data(),
+ partitioner->n_import_indices()),
ArrayView<Number, MemorySpace::Host>(
- data.values.get(), partitioner->locally_owned_size()),
+ data.values_host_buffer.data(),
+ partitioner->locally_owned_size()),
ArrayView<Number, MemorySpace::Host>(
- data.values.get() + partitioner->locally_owned_size(),
+ data.values_host_buffer.data() +
+ partitioner->locally_owned_size(),
partitioner->n_ghost_indices()),
compress_requests);
- }
-# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined DEAL_II_MPI_WITH_CUDA_SUPPORT
- // The communication is done on the host, so we need to
- // move the data back to the device.
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
- {
+ // The communication is done on the host, so we need to
+ // move the data back to the device.
cudaError_t cuda_error_code =
- cudaMemcpy(data.values_dev.get(),
- data.values.get(),
+ cudaMemcpy(data.values.data(),
+ data.values_host_buffer.data(),
allocated_size * sizeof(Number),
cudaMemcpyHostToDevice);
AssertCuda(cuda_error_code);
- data.values.reset();
+ Kokkos::resize(data.values_host_buffer, 0);
}
+ else
# endif
+ {
+ Assert(partitioner->n_import_indices() == 0 ||
+ import_data.values.size() != 0,
+ ExcNotInitialized());
+ partitioner
+ ->import_from_ghosted_array_finish<Number, MemorySpaceType>(
+ operation,
+ ArrayView<const Number, MemorySpaceType>(
+ import_data.values.data(), partitioner->n_import_indices()),
+ ArrayView<Number, MemorySpaceType>(
+ data.values.data(), partitioner->locally_owned_size()),
+ ArrayView<Number, MemorySpaceType>(
+ data.values.data() + partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices()),
+ compress_requests);
+ }
+
#else
(void)operation;
#endif
if (partitioner->n_import_indices() > 0)
{
# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- Assert(
- (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
- ExcMessage(
- "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!"));
- if (import_data.values_dev == nullptr)
- import_data.values_dev.reset(
- Utilities::CUDA::allocate_device_data<Number>(
- partitioner->n_import_indices()));
-# else
-# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
- static_assert(
- std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
- "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
-# endif
- if (import_data.values == nullptr)
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
{
- Number *new_val;
- Utilities::System::posix_memalign(
- reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * partitioner->n_import_indices());
- import_data.values.reset(new_val);
+ if (import_data.values_host_buffer.size() == 0)
+ Kokkos::resize(import_data.values_host_buffer,
+ partitioner->n_import_indices());
}
+ else
# endif
+ {
+# ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
+ static_assert(
+ std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
+ "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
+# endif
+ if (import_data.values.size() == 0)
+ Kokkos::resize(import_data.values,
+ partitioner->n_import_indices());
+ }
}
# if defined DEAL_II_COMPILER_CUDA_AWARE && \
!defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
- // Move the data to the host and then move it back to the
- // device. We use values to store the elements because the function
- // uses a view of the array and thus we need the data on the host to
- // outlive the scope of the function.
- Number *new_val;
- Utilities::System::posix_memalign(reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * allocated_size);
-
- data.values = {new_val, [](Number *data) { std::free(data); }};
-
- cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
- data.values_dev.get(),
- allocated_size * sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error_code);
-# endif
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+ {
+ // Move the data to the host and then move it back to the
+ // device. We use values to store the elements because the function
+ // uses a view of the array and thus we need the data on the host to
+ // outlive the scope of the function.
+ data.values_host_buffer =
+ Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{},
+ data.values);
-# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
- partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
- communication_channel,
- ArrayView<const Number, MemorySpace::Host>(
- data.values.get(), partitioner->locally_owned_size()),
- ArrayView<Number, MemorySpace::Host>(import_data.values.get(),
- partitioner->n_import_indices()),
- ArrayView<Number, MemorySpace::Host>(
- data.values.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices()),
- update_ghost_values_requests);
-# else
- partitioner->export_to_ghosted_array_start<Number, MemorySpace::CUDA>(
- communication_channel,
- ArrayView<const Number, MemorySpace::CUDA>(
- data.values_dev.get(), partitioner->locally_owned_size()),
- ArrayView<Number, MemorySpace::CUDA>(import_data.values_dev.get(),
- partitioner->n_import_indices()),
- ArrayView<Number, MemorySpace::CUDA>(
- data.values_dev.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices()),
- update_ghost_values_requests);
+ partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
+ communication_channel,
+ ArrayView<const Number, MemorySpace::Host>(
+ data.values_host_buffer.data(),
+ partitioner->locally_owned_size()),
+ ArrayView<Number, MemorySpace::Host>(
+ import_data.values_host_buffer.data(),
+ partitioner->n_import_indices()),
+ ArrayView<Number, MemorySpace::Host>(
+ data.values_host_buffer.data() +
+ partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices()),
+ update_ghost_values_requests);
+ }
+ else
# endif
+ {
+ partitioner->export_to_ghosted_array_start<Number, MemorySpaceType>(
+ communication_channel,
+ ArrayView<const Number, MemorySpaceType>(
+ data.values.data(), partitioner->locally_owned_size()),
+ ArrayView<Number, MemorySpaceType>(import_data.values.data(),
+ partitioner->n_import_indices()),
+ ArrayView<Number, MemorySpaceType>(
+ data.values.data() + partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices()),
+ update_ghost_values_requests);
+ }
#else
(void)communication_channel;
// make this function thread safe
std::lock_guard<std::mutex> lock(mutex);
-# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
- defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
- partitioner->export_to_ghosted_array_finish(
- ArrayView<Number, MemorySpace::Host>(
- data.values.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices()),
- update_ghost_values_requests);
-# else
- partitioner->export_to_ghosted_array_finish(
- ArrayView<Number, MemorySpace::CUDA>(
- data.values_dev.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices()),
- update_ghost_values_requests);
+# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+ if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+ {
+ partitioner->export_to_ghosted_array_finish(
+ ArrayView<Number, MemorySpace::Host>(
+ data.values_host_buffer.data() +
+ partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices()),
+ update_ghost_values_requests);
+
+ // The communication is done on the host, so we need to
+ // move the data back to the device.
+ cudaError_t cuda_error_code =
+ cudaMemcpy(data.values.data() +
+ partitioner->locally_owned_size(),
+ data.values_host_buffer.data() +
+ partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices() * sizeof(Number),
+ cudaMemcpyHostToDevice);
+ AssertCuda(cuda_error_code);
+
+ Kokkos::resize(data.values_host_buffer, 0);
+ }
+ else
# endif
+ {
+ partitioner->export_to_ghosted_array_finish(
+ ArrayView<Number, MemorySpaceType>(
+ data.values.data() + partitioner->locally_owned_size(),
+ partitioner->n_ghost_indices()),
+ update_ghost_values_requests);
+ }
}
-# if defined DEAL_II_COMPILER_CUDA_AWARE && \
- !defined DEAL_II_MPI_WITH_CUDA_SUPPORT
- // The communication is done on the host, so we need to
- // move the data back to the device.
- if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
- {
- cudaError_t cuda_error_code =
- cudaMemcpy(data.values_dev.get() +
- partitioner->locally_owned_size(),
- data.values.get() + partitioner->locally_owned_size(),
- partitioner->n_ghost_indices() * sizeof(Number),
- cudaMemcpyHostToDevice);
- AssertCuda(cuda_error_code);
-
- data.values.reset();
- }
-# endif
-
#endif
vector_is_ghosted = true;
}
if (partitioner.use_count() > 0)
memory +=
partitioner->memory_consumption() / partitioner.use_count() + 1;
- if (import_data.values != nullptr || import_data.values_dev != nullptr)
+ if (import_data.values_host_buffer.size() != 0 ||
+ import_data.values.size() != 0)
memory += (static_cast<std::size_t>(partitioner->n_import_indices()) *
sizeof(Number));
return memory;
#include <deal.II/base/config.h>
+#include <deal.II/base/kokkos.h>
#include <deal.II/base/memory_consumption.h>
#include <deal.II/lac/vector_memory.h>
typename GrowingVectorMemory<VectorType>::Pool &
GrowingVectorMemory<VectorType>::get_pool()
{
- static GrowingVectorMemory<VectorType>::Pool pool;
+ // Kokkos needs to be initialized before constructing the static Pool for
+ // vector types that use Kokkos.
+ // If Kokkos is initialized by deal.II, this make sure that it is finalized
+ // after the Pool has been destroyed.
+ // If Kokkos is not initialized by deal.II, we assume that Kokkos is not
+ // finalized past program end together with static variables and we need to
+ // make sure to empty the Pool when finalizing Kokkos so that the destruction
+ // of the Pool doesn't call Kokkos functions.
+ internal::ensure_kokkos_initialized();
+ static auto pool = []() {
+ if (!internal::dealii_initialized_kokkos)
+ Kokkos::push_finalize_hook(
+ GrowingVectorMemory<VectorType>::release_unused_memory);
+ return GrowingVectorMemory<VectorType>::Pool{};
+ }();
return pool;
}
const size_type /*size*/,
real_type & /*sum*/,
Number * /*values*/,
- Number * /*values_dev*/)
+ Number * /*values*/)
{}
template <typename real_type>
::dealii::MemorySpace::Host>
&data)
{
- Vector_copy<Number, Number2> copier(v_data.values.get(),
- data.values.get());
+ Vector_copy<Number, Number2> copier(v_data.values.data(),
+ data.values.data());
parallel_for(copier, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vector_set<Number> setter(s, data.values.get());
+ Vector_set<Number> setter(s, data.values.data());
parallel_for(setter, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_add_v<Number> vector_add(data.values.get(),
- v_data.values.get());
+ Vectorization_add_v<Number> vector_add(data.values.data(),
+ v_data.values.data());
parallel_for(vector_add, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_subtract_v<Number> vector_subtract(data.values.get(),
- v_data.values.get());
+ Vectorization_subtract_v<Number> vector_subtract(data.values.data(),
+ v_data.values.data());
parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_add_factor<Number> vector_add(data.values.get(), a);
+ Vectorization_add_factor<Number> vector_add(data.values.data(), a);
parallel_for(vector_add, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_add_av<Number> vector_add(data.values.get(),
- v_data.values.get(),
+ Vectorization_add_av<Number> vector_add(data.values.data(),
+ v_data.values.data(),
a);
parallel_for(vector_add, 0, size, thread_loop_partitioner);
}
&data)
{
Vectorization_add_avpbw<Number> vector_add(
- data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
+ data.values.data(), v_data.values.data(), w_data.values.data(), a, b);
parallel_for(vector_add, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_sadd_xv<Number> vector_sadd(data.values.get(),
- v_data.values.get(),
+ Vectorization_sadd_xv<Number> vector_sadd(data.values.data(),
+ v_data.values.data(),
x);
parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_sadd_xav<Number> vector_sadd(data.values.get(),
- v_data.values.get(),
+ Vectorization_sadd_xav<Number> vector_sadd(data.values.data(),
+ v_data.values.data(),
a,
x);
parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_sadd_xavbw<Number> vector_sadd(
- data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b);
+ Vectorization_sadd_xavbw<Number> vector_sadd(data.values.data(),
+ v_data.values.data(),
+ w_data.values.data(),
+ x,
+ a,
+ b);
parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_multiply_factor<Number> vector_multiply(data.values.get(),
- factor);
+ Vectorization_multiply_factor<Number> vector_multiply(
+ data.values.data(), factor);
parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_scale<Number> vector_scale(data.values.get(),
- v_data.values.get());
+ Vectorization_scale<Number> vector_scale(data.values.data(),
+ v_data.values.data());
parallel_for(vector_scale, 0, size, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- Vectorization_equ_au<Number> vector_equ(data.values.get(),
- v_data.values.get(),
+ Vectorization_equ_au<Number> vector_equ(data.values.data(),
+ v_data.values.data(),
a);
parallel_for(vector_equ, 0, size, thread_loop_partitioner);
}
&data)
{
Vectorization_equ_aubv<Number> vector_equ(
- data.values.get(), v_data.values.get(), w_data.values.get(), a, b);
+ data.values.data(), v_data.values.data(), w_data.values.data(), a, b);
parallel_for(vector_equ, 0, size, thread_loop_partitioner);
}
{
Number sum;
dealii::internal::VectorOperations::Dot<Number, Number2> dot(
- data.values.get(), v_data.values.get());
+ data.values.data(), v_data.values.data());
dealii::internal::VectorOperations::parallel_reduce(
dot, 0, size, sum, thread_loop_partitioner);
AssertIsFinite(sum);
::dealii::MemorySpace::Host>
&data)
{
- Norm2<Number, real_type> norm2(data.values.get());
+ Norm2<Number, real_type> norm2(data.values.data());
parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
}
MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
{
Number sum;
- MeanValue<Number> mean(data.values.get());
+ MeanValue<Number> mean(data.values.data());
parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
return sum;
::dealii::MemorySpace::Host>
&data)
{
- Norm1<Number, real_type> norm1(data.values.get());
+ Norm1<Number, real_type> norm1(data.values.data());
parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
}
::dealii::MemorySpace::Host>
&data)
{
- NormP<Number, real_type> normp(data.values.get(), p);
+ NormP<Number, real_type> normp(data.values.data(), p);
parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
}
&data)
{
Number sum;
- AddAndDot<Number> adder(data.values.get(),
- v_data.values.get(),
- w_data.values.get(),
+ AddAndDot<Number> adder(data.values.data(),
+ v_data.values.data(),
+ w_data.values.data(),
a);
parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
{
if (operation == VectorOperation::insert)
{
- cudaError_t cuda_error_code = cudaMemcpy(data.values.get(),
- v_data.values_dev.get(),
+ cudaError_t cuda_error_code = cudaMemcpy(data.values.data(),
+ v_data.values.data(),
size * sizeof(Number),
cudaMemcpyDeviceToHost);
AssertCuda(cuda_error_code);
::dealii::MemorySpace::CUDA>
&data)
{
- cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
- v_data.values_dev.get(),
+ cudaError_t cuda_error_code = cudaMemcpy(data.values.data(),
+ v_data.values.data(),
size * sizeof(Number),
cudaMemcpyDeviceToDevice);
AssertCuda(cuda_error_code);
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::set<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(), s, size);
+ <<<n_blocks, block_size>>>(data.values.data(), s, size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(),
+ <<<n_blocks, block_size>>>(data.values.data(),
1.,
- v_data.values_dev.get(),
+ v_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(),
+ <<<n_blocks, block_size>>>(data.values.data(),
-1.,
- v_data.values_dev.get(),
+ v_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::vec_add<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(), a, size);
+ <<<n_blocks, block_size>>>(data.values.data(), a, size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(),
+ <<<n_blocks, block_size>>>(data.values.data(),
a,
- v_data.values_dev.get(),
+ v_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aVbW<Number>
- <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
+ <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values.data(),
a,
- v_data.values_dev.get(),
+ v_data.values.data(),
b,
- w_data.values_dev.get(),
+ w_data.values.data(),
size);
AssertCudaKernel();
}
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(
- x, data.values_dev.get(), 1., v_data.values_dev.get(), size);
+ x, data.values.data(), 1., v_data.values.data(), size);
AssertCudaKernel();
}
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(
- x, data.values_dev.get(), a, v_data.values_dev.get(), size);
+ x, data.values.data(), a, v_data.values.data(), size);
AssertCudaKernel();
}
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd<Number>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(x,
- data.values_dev.get(),
+ data.values.data(),
a,
- v_data.values_dev.get(),
+ v_data.values.data(),
b,
- w_data.values_dev.get(),
+ w_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::vec_scale<Number>
- <<<n_blocks, block_size>>>(data.values_dev.get(), factor, size);
+ <<<n_blocks, block_size>>>(data.values.data(), factor, size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::scale<Number>
- <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
- v_data.values_dev.get(),
+ <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values.data(),
+ v_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
- <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
+ <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values.data(),
a,
- v_data.values_dev.get(),
+ v_data.values.data(),
size);
AssertCudaKernel();
}
{
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::equ<Number>
- <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values_dev.get(),
+ <<<dim3(n_blocks, 1), dim3(block_size)>>>(data.values.data(),
a,
- v_data.values_dev.get(),
+ v_data.values.data(),
b,
- w_data.values_dev.get(),
+ w_data.values.data(),
size);
AssertCudaKernel();
}
Number,
::dealii::LinearAlgebra::CUDAWrappers::kernel::DotProduct<Number>>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
- data.values_dev.get(),
- v_data.values_dev.get(),
+ data.values.data(),
+ v_data.values.data(),
static_cast<unsigned int>(
size));
AssertCudaKernel();
Number,
::dealii::LinearAlgebra::CUDAWrappers::kernel::ElemSum<Number>>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
- data.values_dev.get(),
+ data.values.data(),
size);
// Copy the result back to the host
Number,
::dealii::LinearAlgebra::CUDAWrappers::kernel::L1Norm<Number>>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(result_device,
- data.values_dev.get(),
+ data.values.data(),
size);
// Copy the result back to the host
const int n_blocks = 1 + size / (chunk_size * block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_and_dot<Number>
<<<dim3(n_blocks, 1), dim3(block_size)>>>(res_d,
- data.values_dev.get(),
- v_data.values_dev.get(),
- w_data.values_dev.get(),
+ data.values.data(),
+ v_data.values.data(),
+ w_data.values.data(),
a,
size);
{
if (operation == VectorOperation::insert)
{
- cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(),
- v_data.values.get(),
+ cudaError_t cuda_error_code = cudaMemcpy(data.values.data(),
+ v_data.values.data(),
size * sizeof(Number),
cudaMemcpyHostToDevice);
AssertCuda(cuda_error_code);
job_identifier.cc
logstream.cc
hdf5.cc
+ kokkos.cc
mpi.cc
mpi_compute_index_owner_internal.cc
mpi_noncontiguous_partitioner.cc
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/kokkos.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/vector_memory.h>
+
+#include <Kokkos_Core.hpp>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace internal
+{
+ bool dealii_initialized_kokkos = false;
+
+ void
+ ensure_kokkos_initialized()
+ {
+ if (!Kokkos::is_initialized())
+ {
+ dealii_initialized_kokkos = true;
+ Kokkos::initialize();
+ std::atexit(Kokkos::finalize);
+ }
+ }
+} // namespace internal
+DEAL_II_NAMESPACE_CLOSE
}
#endif
-// There is a similar issue with CUDA: The destructor of static objects might
-// run after the CUDA driver is unloaded. Hence, also release all memory
-// related to CUDA vectors.
-#ifdef DEAL_II_WITH_CUDA
- GrowingVectorMemory<
- LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>>::
- release_unused_memory();
- GrowingVectorMemory<
- LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA>>::
- release_unused_memory();
-#endif
-
#ifdef DEAL_II_WITH_P4EST
// now end p4est and libsc
// Note: p4est has no finalize function
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2022 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// test that we can initialize and finalize Kokkos in user code.
+
+#include <deal.II/base/kokkos.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/vector_memory.h>
+
+#include "../tests.h"
+
+int
+main()
+{
+ Kokkos::initialize();
+
+ initlog();
+
+ GrowingVectorMemory<
+ LinearAlgebra::distributed::Vector<double, MemorySpace::Host>>{};
+ GrowingVectorMemory<
+ LinearAlgebra::distributed::Vector<float, MemorySpace::Host>>{};
+
+ internal::ensure_kokkos_initialized();
+ deallog << "Kokkos initialized by Kokkos: "
+ << internal::dealii_initialized_kokkos << std::endl;
+
+ Kokkos::finalize();
+
+ return 0;
+}
--- /dev/null
+
+DEAL::Kokkos initialized by Kokkos: 0