DOLFIN-X
DOLFIN-X C++ interface
MPI.h
1 // Copyright (C) 2007-2014 Magnus Vikstrøm and Garth N. Wells
2 //
3 // This file is part of DOLFINX (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <algorithm>
10 #include <array>
11 #include <cassert>
12 #include <complex>
13 #include <cstdint>
14 #include <dolfinx/graph/AdjacencyList.h>
15 #include <iostream>
16 #include <numeric>
17 #include <type_traits>
18 #include <utility>
19 #include <vector>
20 
21 #define MPICH_IGNORE_CXX_SEEK 1
22 #include <mpi.h>
23 
24 namespace dolfinx
25 {
26 
29 class MPI
30 {
31 public:
34  class Comm
35  {
36  public:
38  explicit Comm(MPI_Comm comm);
39 
41  Comm(const Comm& comm);
42 
44  Comm(Comm&& comm) noexcept;
45 
46  // Disable copy assignment operator
47  Comm& operator=(const Comm& comm) = delete;
48 
50  Comm& operator=(Comm&& comm);
51 
53  ~Comm();
54 
56  MPI_Comm comm() const;
57 
58  private:
59  // MPI communicator
60  MPI_Comm _comm;
61  };
62 
64  static int rank(MPI_Comm comm);
65 
68  static int size(MPI_Comm comm);
69 
72  template <typename T>
74  all_to_all(MPI_Comm comm, const graph::AdjacencyList<T>& send_data);
75 
79  template <typename T>
81  neighbor_all_to_all(MPI_Comm neighbor_comm,
82  const std::vector<int>& send_offsets,
83  const std::vector<T>& send_data);
84 
87  static std::vector<int> neighbors(MPI_Comm neighbor_comm);
88 
91  static std::size_t global_offset(MPI_Comm comm, std::size_t range,
92  bool exclusive);
93 
96  static std::array<std::int64_t, 2> local_range(int process, std::int64_t N,
97  int size);
98 
104  static int index_owner(int size, std::size_t index, std::size_t N);
105 
106  template <typename T>
107  struct dependent_false : std::false_type
108  {
109  };
110 
112  template <typename T>
113  static MPI_Datatype mpi_type()
114  {
115  static_assert(dependent_false<T>::value, "Unknown MPI type");
116  throw std::runtime_error("MPI data type unknown");
117  return MPI_CHAR;
118  }
119 };
120 
121 // Turn off doxygen for these template specialisations
123 // Specialisations for MPI_Datatypes
124 template <>
125 inline MPI_Datatype MPI::mpi_type<float>()
126 {
127  return MPI_FLOAT;
128 }
129 template <>
130 inline MPI_Datatype MPI::mpi_type<double>()
131 {
132  return MPI_DOUBLE;
133 }
134 template <>
135 inline MPI_Datatype MPI::mpi_type<std::complex<double>>()
136 {
137  return MPI_DOUBLE_COMPLEX;
138 }
139 template <>
140 inline MPI_Datatype MPI::mpi_type<short int>()
141 {
142  return MPI_SHORT;
143 }
144 template <>
145 inline MPI_Datatype MPI::mpi_type<int>()
146 {
147  return MPI_INT;
148 }
149 template <>
150 inline MPI_Datatype MPI::mpi_type<unsigned int>()
151 {
152  return MPI_UNSIGNED;
153 }
154 template <>
155 inline MPI_Datatype MPI::mpi_type<long int>()
156 {
157  return MPI_LONG;
158 }
159 template <>
160 inline MPI_Datatype MPI::mpi_type<unsigned long>()
161 {
162  return MPI_UNSIGNED_LONG;
163 }
164 template <>
165 inline MPI_Datatype MPI::mpi_type<long long>()
166 {
167  return MPI_LONG_LONG;
168 }
169 template <>
170 inline MPI_Datatype MPI::mpi_type<unsigned long long>()
171 {
172  return MPI_UNSIGNED_LONG_LONG;
173 }
174 template <>
175 inline MPI_Datatype MPI::mpi_type<bool>()
176 {
177  return MPI_C_BOOL;
178 }
180 //---------------------------------------------------------------------------
181 template <typename T>
182 graph::AdjacencyList<T>
184  const graph::AdjacencyList<T>& send_data)
185 {
186  const Eigen::Array<std::int32_t, Eigen::Dynamic, 1>& send_offsets
187  = send_data.offsets();
188  const Eigen::Array<T, Eigen::Dynamic, 1>& values_in = send_data.array();
189 
190  const int comm_size = MPI::size(comm);
191  assert(send_data.num_nodes() == comm_size);
192 
193  // Data size per destination rank
194  std::vector<int> send_size(comm_size);
195  std::adjacent_difference(send_offsets.data() + 1,
196  send_offsets.data() + send_offsets.rows(),
197  send_size.begin());
198 
199  // Get received data sizes from each rank
200  std::vector<int> recv_size(comm_size);
201  MPI_Alltoall(send_size.data(), 1, mpi_type<int>(), recv_size.data(), 1,
202  mpi_type<int>(), comm);
203 
204  // Compute receive offset
205  Eigen::Array<std::int32_t, Eigen::Dynamic, 1> recv_offset(comm_size + 1);
206  recv_offset(0) = 0;
207  std::partial_sum(recv_size.begin(), recv_size.end(), recv_offset.data() + 1);
208 
209  // Send/receive data
210  Eigen::Array<T, Eigen::Dynamic, 1> recv_values(recv_offset(comm_size));
211  MPI_Alltoallv(values_in.data(), send_size.data(), send_offsets.data(),
212  mpi_type<T>(), recv_values.data(), recv_size.data(),
213  recv_offset.data(), mpi_type<T>(), comm);
214 
215  return graph::AdjacencyList<T>(std::move(recv_values),
216  std::move(recv_offset));
217 }
218 //-----------------------------------------------------------------------------
219 template <typename T>
221 dolfinx::MPI::neighbor_all_to_all(MPI_Comm neighbor_comm,
222  const std::vector<int>& send_offsets,
223  const std::vector<T>& send_data)
224 {
225  assert((int)send_data.size() == send_offsets.back());
226  assert(send_offsets[0] == 0);
227 
228  // Get receive sizes
229  std::vector<int> send_sizes(send_offsets.size() - 1, 0);
230  std::vector<int> recv_sizes(send_sizes.size());
231  std::adjacent_difference(send_offsets.begin() + 1, send_offsets.end(),
232  send_sizes.begin());
233  MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI::mpi_type<int>(),
234  recv_sizes.data(), 1, MPI::mpi_type<int>(),
235  neighbor_comm);
236 
237  // Work out recv offsets
238  Eigen::Array<int, Eigen::Dynamic, 1> recv_offsets(recv_sizes.size() + 1);
239  recv_offsets(0) = 0;
240  std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
241  recv_offsets.data() + 1);
242 
243  Eigen::Array<T, Eigen::Dynamic, 1> recv_data(
244  recv_offsets(recv_offsets.rows() - 1));
245  MPI_Neighbor_alltoallv(
246  send_data.data(), send_sizes.data(), send_offsets.data(),
247  MPI::mpi_type<T>(), recv_data.data(), recv_sizes.data(),
248  recv_offsets.data(), MPI::mpi_type<T>(), neighbor_comm);
249 
250  return graph::AdjacencyList<T>(std::move(recv_data), std::move(recv_offsets));
251 }
252 //---------------------------------------------------------------------------
253 
254 } // namespace dolfinx
dolfinx::MPI::global_offset
static std::size_t global_offset(MPI_Comm comm, std::size_t range, bool exclusive)
Find global offset (index) (wrapper for MPI_(Ex)Scan with MPI_SUM as reduction op)
Definition: MPI.cpp:90
dolfinx::graph::AdjacencyList::num_nodes
std::int32_t num_nodes() const
Number of nodes.
Definition: AdjacencyList.h:138
dolfinx::MPI::neighbors
static std::vector< int > neighbors(MPI_Comm neighbor_comm)
Return list of neighbours for a neighbourhood communicator.
Definition: MPI.cpp:135
dolfinx::MPI
This class provides utility functions for easy communication with MPI and handles cases when DOLFINX ...
Definition: MPI.h:29
dolfinx::MPI::Comm::~Comm
~Comm()
Destructor (frees wrapped communicator)
Definition: MPI.cpp:40
dolfinx::graph::AdjacencyList::array
const Eigen::Array< T, Eigen::Dynamic, 1 > & array() const
Return contiguous array of links for all nodes (const version)
Definition: AdjacencyList.h:175
dolfinx::graph::AdjacencyList
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: assemble_matrix_impl.h:26
dolfinx::MPI::rank
static int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:75
dolfinx::graph::AdjacencyList::offsets
const Eigen::Array< std::int32_t, Eigen::Dynamic, 1 > & offsets() const
Offset for each node in array() (const version)
Definition: AdjacencyList.h:178
dolfinx::MPI::size
static int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:83
dolfinx::MPI::all_to_all
static graph::AdjacencyList< T > all_to_all(MPI_Comm comm, const graph::AdjacencyList< T > &send_data)
Send in_values[p0] to process p0 and receive values from process p1 in out_values[p1].
Definition: MPI.h:183
dolfinx::MPI::Comm
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:34
dolfinx::MPI::index_owner
static int index_owner(int size, std::size_t index, std::size_t N)
Return which process owns index (inverse of local_range)
Definition: MPI.cpp:119
dolfinx::MPI::mpi_type
static MPI_Datatype mpi_type()
MPI Type.
Definition: MPI.h:113
dolfinx::MPI::local_range
static std::array< std::int64_t, 2 > local_range(int process, std::int64_t N, int size)
Return local range for given process, splitting [0, N - 1] into size() portions of almost equal size.
Definition: MPI.cpp:101
dolfinx::MPI::dependent_false
Definition: MPI.h:107
dolfinx::MPI::Comm::comm
MPI_Comm comm() const
Return the underlying MPI_Comm object.
Definition: MPI.cpp:73
dolfinx::MPI::neighbor_all_to_all
static graph::AdjacencyList< T > neighbor_all_to_all(MPI_Comm neighbor_comm, const std::vector< int > &send_offsets, const std::vector< T > &send_data)
Neighbourhood all-to-all. Send data to neighbours using offsets into contiguous data array....
Definition: MPI.h:221
dolfinx::MPI::Comm::Comm
Comm(MPI_Comm comm)
Duplicate communicator and wrap duplicate.
Definition: MPI.cpp:13