Skip to content

Commit

Permalink
enable MemorySpace in GMG transfer
Browse files Browse the repository at this point in the history
  • Loading branch information
tjhei committed Nov 14, 2024
1 parent 9e9a721 commit 3974909
Show file tree
Hide file tree
Showing 10 changed files with 418 additions and 220 deletions.
9 changes: 2 additions & 7 deletions cmake/config/template-arguments.in
Original file line number Diff line number Diff line change
Expand Up @@ -169,18 +169,13 @@ EXTERNAL_PARALLEL_VECTORS := { @DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_BLOCKVECTOR@
}

// TODO: I don't understand this one. LA::distributed::Vector does not have a
// native matrix type, especially if we don't compile with Trilinos. Currently
// only used: mg_transfer_prebuilt.inst.in and
// mg_level_global_transfer.inst.in
VECTORS_WITH_MATRIX := { Vector<double>;
// Vectors we can do GMG with excluding the LinearAlgebra::distributed::Vector<> (which is handled separately):
VECTORS_WITHOUT_LAVEC := { Vector<double>;
Vector<float> ;

BlockVector<double>;
BlockVector<float>;

LinearAlgebra::distributed::Vector<double>;

@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
@DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE@;
Expand Down
135 changes: 129 additions & 6 deletions examples/step-64/step-64.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

// First include the necessary files from the deal.II library known from the
// previous tutorials.
#include <boost/iostreams/detail/select.hpp>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/quadrature_lib.h>

Expand All @@ -40,6 +41,10 @@
#include <deal.II/matrix_free/portable_fe_evaluation.h>
#include <deal.II/matrix_free/portable_matrix_free.h>
#include <deal.II/matrix_free/operators.h>
#include <deal.II/multigrid/mg_transfer_matrix_free.h>
#include <deal.II/multigrid/mg_transfer_global_coarsening.h>



#include <fstream>

Expand Down Expand Up @@ -467,7 +472,8 @@ namespace Step64
// Since all the operations in the `solve()` function are executed on the
// graphics card, it is necessary for the vectors used to store their values
// on the GPU as well. LinearAlgebra::distributed::Vector can be told which
// memory space to use. It might
// memory space to use. There is also LinearAlgebra::CUDAWrappers::Vector
// that always uses GPU memory storage but doesn't work with MPI. It might
// be worth noticing that the communication between different MPI processes
// can be improved if the MPI implementation is GPU-aware and the configure
// flag `DEAL_II_MPI_WITH_DEVICE_SUPPORT` is enabled. (The value of this
Expand Down Expand Up @@ -610,6 +616,104 @@ namespace Step64
// copy the solution from the device to the host to be able to view its
// values and display it in `output_results()`. This transfer works the same
// as at the end of the previous function.
template <typename VectorType, typename OperatorType, int dim>
void solve_with_gmg(SolverControl &solver_control,
const OperatorType &system_matrix,
VectorType &dst,
const VectorType &src,
// const hp::MappingCollection<dim> mapping,
const DoFHandler<dim> &dof_handler,
// const hp::QCollection<dim> &quadrature_collection,
int fe_degree)
{
MGLevelObject<DoFHandler<dim>> dof_handlers;
MGLevelObject<std::unique_ptr<OperatorType>> operators;
MGLevelObject<MGTwoLevelTransfer<dim, VectorType>> transfers;

MGTransferGlobalCoarseningTools::PolynomialCoarseningSequenceType
p_sequence = MGTransferGlobalCoarseningTools::
PolynomialCoarseningSequenceType::decrease_by_one;

const std::vector<unsigned int> p_levels =
MGTransferGlobalCoarseningTools::create_polynomial_coarsening_sequence(
fe_degree, p_sequence);

const unsigned int n_p_levels = p_levels.size();

unsigned int minlevel = 0;
unsigned int maxlevel = n_p_levels - 1;

dof_handlers.resize(minlevel, maxlevel);
MGLevelObject<AffineConstraints<double>> constraints(minlevel, maxlevel);
operators.resize(minlevel, maxlevel);
transfers.resize(minlevel, maxlevel);


// set up levels
for (auto l = minlevel; l <= maxlevel; ++l)
{
/* auto &dof_handler = dof_handlers[l];
auto &constraint = constraints[l];
auto &op = operators[l];
std::unique_ptr<FiniteElement<dim>> fe;
std::unique_ptr<Quadrature<dim>> quad;
std::unique_ptr<Mapping<dim>> mapping;
fe = std::make_unique<FE_Q<dim>>(level_degrees[l]);
quad = std::make_unique<QGauss<dim>>(level_degrees[l] + 1);
mapping = std::make_unique<MappingFE<dim>>(FE_Q<dim>(1));
if (l == max_level)
mapping_ = mapping->clone();
// set up dofhandler
dof_handler.distribute_dofs(*fe);
dof_handler.distribute_mg_dofs();
// set up constraints
const std::set<types::boundary_id> dirichlet_boundary = {0};
MGConstrainedDoFs mg_constrained_dofs;
mg_constrained_dofs.initialize(dof_handler);
mg_constrained_dofs.make_zero_boundary_constraints(dof_handler,
dirichlet_boundary);
const IndexSet relevant_dofs =
DoFTools::extract_locally_relevant_level_dofs(dof_handler, 0);
constraint.reinit(relevant_dofs);
constraint.add_lines(
mg_constrained_dofs.get_boundary_indices(0);
constraint.close();
constraint.print(std::cout);
// set up operator
op.reinit(*mapping, dof_handler, *quad, constraint, 0);
*/
}


for (unsigned int level = minlevel; level < maxlevel; ++level)
transfers[level + 1].reinit(dof_handlers[level + 1],
dof_handlers[level],
constraints[level + 1],
constraints[level]);

MGTransferMF<dim, VectorType> transfer(
transfers, [&](const auto l, auto &vec) {
operators[l]->initialize_dof_vector(vec);
});



PreconditionIdentity preconditioner;
SolverCG<LinearAlgebra::distributed::Vector<double, MemorySpace::Default>>
cg(solver_control);
// cg.solve(*system_matrix_dev, solution_dev, system_rhs_dev,
// preconditioner);
}


template <int dim, int fe_degree>
void HelmholtzProblem<dim, fe_degree>::solve()
{
Expand All @@ -630,9 +734,26 @@ namespace Step64

SolverControl solver_control(system_rhs_dev.size(),
1e-12 * system_rhs_dev.l2_norm());
SolverCG<LinearAlgebra::distributed::Vector<double, MemorySpace::Default>>
cg(solver_control);
cg.solve(*system_matrix_dev, solution_dev, system_rhs_dev, preconditioner);

if (false)
solve_with_gmg(solver_control,
*system_matrix_dev,
solution_dev,
system_rhs_dev,
// mapping,
dof_handler,
// quadrature,
fe_degree);
else
{
SolverCG<
LinearAlgebra::distributed::Vector<double, MemorySpace::Default>>
cg(solver_control);
cg.solve(*system_matrix_dev,
solution_dev,
system_rhs_dev,
preconditioner);
}

pcout << " Solved in " << solver_control.last_step() << " iterations."
<< std::endl;
Expand Down Expand Up @@ -696,7 +817,7 @@ namespace Step64
template <int dim, int fe_degree>
void HelmholtzProblem<dim, fe_degree>::run()
{
for (unsigned int cycle = 0; cycle < 7 - dim; ++cycle)
for (unsigned int cycle = 0; cycle < 7 - dim - 1; ++cycle)
{
pcout << "Cycle " << cycle << std::endl;

Expand Down Expand Up @@ -746,7 +867,9 @@ int main(int argc, char *argv[])

Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);

HelmholtzProblem<3, 3> helmholtz_problem;
constexpr int dim = 3;
constexpr int fe_degree = 3;
HelmholtzProblem<dim, fe_degree> helmholtz_problem;
helmholtz_problem.run();
}
catch (std::exception &exc)
Expand Down
47 changes: 27 additions & 20 deletions include/deal.II/multigrid/mg_transfer.h
Original file line number Diff line number Diff line change
Expand Up @@ -407,9 +407,11 @@ class MGLevelGlobalTransfer : public MGTransferBase<VectorType>
* routines as compared to the %parallel vectors in the PETScWrappers and
* TrilinosWrappers namespaces.
*/
template <typename Number>
class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
: public MGTransferBase<LinearAlgebra::distributed::Vector<Number>>
template <typename Number, typename MemorySpace>
class MGLevelGlobalTransfer<
LinearAlgebra::distributed::Vector<Number, MemorySpace>>
: public MGTransferBase<
LinearAlgebra::distributed::Vector<Number, MemorySpace>>
{
public:
/**
Expand All @@ -426,9 +428,10 @@ class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
*/
template <int dim, typename Number2, int spacedim>
void
copy_to_mg(const DoFHandler<dim, spacedim> &dof_handler,
MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &dst,
const LinearAlgebra::distributed::Vector<Number2> &src) const;
copy_to_mg(
const DoFHandler<dim, spacedim> &dof_handler,
MGLevelObject<LinearAlgebra::distributed::Vector<Number, MemorySpace>> &dst,
const LinearAlgebra::distributed::Vector<Number2, MemorySpace> &src) const;

/**
* Transfer from multi-level vector to normal vector.
Expand All @@ -440,9 +443,10 @@ class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
template <int dim, typename Number2, int spacedim>
void
copy_from_mg(
const DoFHandler<dim, spacedim> &dof_handler,
LinearAlgebra::distributed::Vector<Number2> &dst,
const MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &src) const;
const DoFHandler<dim, spacedim> &dof_handler,
LinearAlgebra::distributed::Vector<Number2, MemorySpace> &dst,
const MGLevelObject<LinearAlgebra::distributed::Vector<Number, MemorySpace>>
&src) const;

/**
* Add a multi-level vector to a normal vector.
Expand All @@ -452,9 +456,10 @@ class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
template <int dim, typename Number2, int spacedim>
void
copy_from_mg_add(
const DoFHandler<dim, spacedim> &dof_handler,
LinearAlgebra::distributed::Vector<Number2> &dst,
const MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &src) const;
const DoFHandler<dim, spacedim> &dof_handler,
LinearAlgebra::distributed::Vector<Number2, MemorySpace> &dst,
const MGLevelObject<LinearAlgebra::distributed::Vector<Number, MemorySpace>>
&src) const;

/**
* If this object operates on BlockVector objects, we need to describe how
Expand Down Expand Up @@ -493,10 +498,11 @@ class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
*/
template <int dim, typename Number2, int spacedim>
void
copy_to_mg(const DoFHandler<dim, spacedim> &dof_handler,
MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &dst,
const LinearAlgebra::distributed::Vector<Number2> &src,
const bool solution_transfer) const;
copy_to_mg(
const DoFHandler<dim, spacedim> &dof_handler,
MGLevelObject<LinearAlgebra::distributed::Vector<Number, MemorySpace>> &dst,
const LinearAlgebra::distributed::Vector<Number2, MemorySpace> &src,
const bool solution_transfer) const;

/**
* Internal function to @p fill copy_indices*. Called by derived classes.
Expand Down Expand Up @@ -581,26 +587,27 @@ class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>
* global vector for inserting into the level vectors. This vector is
* populated with those entries.
*/
mutable LinearAlgebra::distributed::Vector<Number> ghosted_global_vector;
mutable LinearAlgebra::distributed::Vector<Number, MemorySpace>
ghosted_global_vector;

/**
* Same as above but used when working with solution vectors.
*/
mutable LinearAlgebra::distributed::Vector<Number>
mutable LinearAlgebra::distributed::Vector<Number, MemorySpace>
solution_ghosted_global_vector;

/**
* In the function copy_from_mg, we access all level vectors with certain
* ghost entries for inserting the result into a global vector.
*/
mutable MGLevelObject<LinearAlgebra::distributed::Vector<Number>>
mutable MGLevelObject<LinearAlgebra::distributed::Vector<Number, MemorySpace>>
ghosted_level_vector;

/**
* Function to initialize internal level vectors.
*/
std::function<void(const unsigned int,
LinearAlgebra::distributed::Vector<Number> &)>
LinearAlgebra::distributed::Vector<Number, MemorySpace> &)>
initialize_dof_vector;

private:
Expand Down
Loading

0 comments on commit 3974909

Please sign in to comment.