DOLFIN-X
DOLFIN-X C++ interface
MPI.h
1 // Copyright (C) 2007-2014 Magnus Vikstrøm and Garth N. Wells
2 //
3 // This file is part of DOLFINX (https://www.fenicsproject.org)
4 //
5 // SPDX-License-Identifier: LGPL-3.0-or-later
6 
7 #pragma once
8 
9 #include <algorithm>
10 #include <array>
11 #include <cassert>
12 #include <complex>
13 #include <cstdint>
14 #include <dolfinx/graph/AdjacencyList.h>
15 #include <iostream>
16 #include <numeric>
17 #include <set>
18 #include <type_traits>
19 #include <utility>
20 #include <vector>
21 
22 #define MPICH_IGNORE_CXX_SEEK 1
23 #include <mpi.h>
24 
25 namespace dolfinx
26 {
27 
30 class MPI
31 {
32 public:
35  class Comm
36  {
37  public:
39  explicit Comm(MPI_Comm comm, bool duplicate = true);
40 
42  Comm(const Comm& comm);
43 
45  Comm(Comm&& comm) noexcept;
46 
47  // Disable copy assignment operator
48  Comm& operator=(const Comm& comm) = delete;
49 
51  Comm& operator=(Comm&& comm) noexcept;
52 
54  ~Comm();
55 
57  MPI_Comm comm() const;
58 
59  private:
60  // MPI communicator
61  MPI_Comm _comm;
62  };
63 
65  static int rank(MPI_Comm comm);
66 
69  static int size(MPI_Comm comm);
70 
73  template <typename T>
75  all_to_all(MPI_Comm comm, const graph::AdjacencyList<T>& send_data);
76 
89  static std::vector<int> compute_graph_edges(MPI_Comm comm,
90  const std::set<int>& edges);
91 
95  template <typename T>
97  neighbor_all_to_all(MPI_Comm neighbor_comm,
98  const std::vector<int>& send_offsets,
99  const std::vector<T>& send_data);
100 
106  static std::tuple<std::vector<int>, std::vector<int>>
107  neighbors(MPI_Comm neighbor_comm);
108 
111  static std::size_t global_offset(MPI_Comm comm, std::size_t range,
112  bool exclusive);
113 
116  static std::array<std::int64_t, 2> local_range(int process, std::int64_t N,
117  int size);
118 
124  static int index_owner(int size, std::size_t index, std::size_t N);
125 
126  template <typename T>
127  struct dependent_false : std::false_type
128  {
129  };
130 
132  template <typename T>
133  static MPI_Datatype mpi_type()
134  {
135  static_assert(dependent_false<T>::value, "Unknown MPI type");
136  throw std::runtime_error("MPI data type unknown");
137  return MPI_CHAR;
138  }
139 };
140 
141 // Turn off doxygen for these template specialisations
143 // Specialisations for MPI_Datatypes
144 template <>
145 inline MPI_Datatype MPI::mpi_type<float>()
146 {
147  return MPI_FLOAT;
148 }
149 template <>
150 inline MPI_Datatype MPI::mpi_type<double>()
151 {
152  return MPI_DOUBLE;
153 }
154 template <>
155 inline MPI_Datatype MPI::mpi_type<std::complex<double>>()
156 {
157  return MPI_DOUBLE_COMPLEX;
158 }
159 template <>
160 inline MPI_Datatype MPI::mpi_type<short int>()
161 {
162  return MPI_SHORT;
163 }
164 template <>
165 inline MPI_Datatype MPI::mpi_type<int>()
166 {
167  return MPI_INT;
168 }
169 template <>
170 inline MPI_Datatype MPI::mpi_type<unsigned int>()
171 {
172  return MPI_UNSIGNED;
173 }
174 template <>
175 inline MPI_Datatype MPI::mpi_type<long int>()
176 {
177  return MPI_LONG;
178 }
179 template <>
180 inline MPI_Datatype MPI::mpi_type<unsigned long>()
181 {
182  return MPI_UNSIGNED_LONG;
183 }
184 template <>
185 inline MPI_Datatype MPI::mpi_type<long long>()
186 {
187  return MPI_LONG_LONG;
188 }
189 template <>
190 inline MPI_Datatype MPI::mpi_type<unsigned long long>()
191 {
192  return MPI_UNSIGNED_LONG_LONG;
193 }
194 template <>
195 inline MPI_Datatype MPI::mpi_type<bool>()
196 {
197  return MPI_C_BOOL;
198 }
200 //---------------------------------------------------------------------------
201 template <typename T>
202 graph::AdjacencyList<T>
204  const graph::AdjacencyList<T>& send_data)
205 {
206  const Eigen::Array<std::int32_t, Eigen::Dynamic, 1>& send_offsets
207  = send_data.offsets();
208  const Eigen::Array<T, Eigen::Dynamic, 1>& values_in = send_data.array();
209 
210  const int comm_size = MPI::size(comm);
211  assert(send_data.num_nodes() == comm_size);
212 
213  // Data size per destination rank
214  std::vector<int> send_size(comm_size);
215  std::adjacent_difference(send_offsets.data() + 1,
216  send_offsets.data() + send_offsets.rows(),
217  send_size.begin());
218 
219  // Get received data sizes from each rank
220  std::vector<int> recv_size(comm_size);
221  MPI_Alltoall(send_size.data(), 1, mpi_type<int>(), recv_size.data(), 1,
222  mpi_type<int>(), comm);
223 
224  // Compute receive offset
225  Eigen::Array<std::int32_t, Eigen::Dynamic, 1> recv_offset(comm_size + 1);
226  recv_offset(0) = 0;
227  std::partial_sum(recv_size.begin(), recv_size.end(), recv_offset.data() + 1);
228 
229  // Send/receive data
230  Eigen::Array<T, Eigen::Dynamic, 1> recv_values(recv_offset(comm_size));
231  MPI_Alltoallv(values_in.data(), send_size.data(), send_offsets.data(),
232  mpi_type<T>(), recv_values.data(), recv_size.data(),
233  recv_offset.data(), mpi_type<T>(), comm);
234 
235  return graph::AdjacencyList<T>(std::move(recv_values),
236  std::move(recv_offset));
237 }
238 //-----------------------------------------------------------------------------
239 template <typename T>
241 dolfinx::MPI::neighbor_all_to_all(MPI_Comm neighbor_comm,
242  const std::vector<int>& send_offsets,
243  const std::vector<T>& send_data)
244 {
245  // Get neighbor processes
246  int indegree(-1), outdegree(-2), weighted(-1);
247  MPI_Dist_graph_neighbors_count(neighbor_comm, &indegree, &outdegree,
248  &weighted);
249 
250  assert((int)send_data.size() == send_offsets.back());
251  assert(send_offsets[0] == 0);
252 
253  // Get receive sizes
254  std::vector<int> send_sizes(outdegree, 0);
255  std::vector<int> recv_sizes(indegree);
256  std::adjacent_difference(send_offsets.begin() + 1, send_offsets.end(),
257  send_sizes.begin());
258  MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI::mpi_type<int>(),
259  recv_sizes.data(), 1, MPI::mpi_type<int>(),
260  neighbor_comm);
261 
262  // Work out recv offsets
263  Eigen::Array<int, Eigen::Dynamic, 1> recv_offsets(recv_sizes.size() + 1);
264  recv_offsets(0) = 0;
265  std::partial_sum(recv_sizes.begin(), recv_sizes.end(),
266  recv_offsets.data() + 1);
267 
268  Eigen::Array<T, Eigen::Dynamic, 1> recv_data(
269  recv_offsets(recv_offsets.rows() - 1));
270  MPI_Neighbor_alltoallv(
271  send_data.data(), send_sizes.data(), send_offsets.data(),
272  MPI::mpi_type<T>(), recv_data.data(), recv_sizes.data(),
273  recv_offsets.data(), MPI::mpi_type<T>(), neighbor_comm);
274 
275  return graph::AdjacencyList<T>(std::move(recv_data), std::move(recv_offsets));
276 }
277 //---------------------------------------------------------------------------
278 
279 } // namespace dolfinx
A duplicate MPI communicator and manage lifetime of the communicator.
Definition: MPI.h:36
~Comm()
Destructor (frees wrapped communicator)
Definition: MPI.cpp:43
Comm(MPI_Comm comm, bool duplicate=true)
Duplicate communicator and wrap duplicate.
Definition: MPI.cpp:13
MPI_Comm comm() const
Return the underlying MPI_Comm object.
Definition: MPI.cpp:77
This class provides utility functions for easy communication with MPI and handles cases when DOLFINX ...
Definition: MPI.h:31
static std::vector< int > compute_graph_edges(MPI_Comm comm, const std::set< int > &edges)
Definition: MPI.cpp:139
static MPI_Datatype mpi_type()
MPI Type.
Definition: MPI.h:133
static std::array< std::int64_t, 2 > local_range(int process, std::int64_t N, int size)
Return local range for given process, splitting [0, N - 1] into size() portions of almost equal size.
Definition: MPI.cpp:105
static int rank(MPI_Comm comm)
Return process rank for the communicator.
Definition: MPI.cpp:79
static int size(MPI_Comm comm)
Return size of the group (number of processes) associated with the communicator.
Definition: MPI.cpp:87
static int index_owner(int size, std::size_t index, std::size_t N)
Return which process owns index (inverse of local_range)
Definition: MPI.cpp:123
static std::size_t global_offset(MPI_Comm comm, std::size_t range, bool exclusive)
Find global offset (index) (wrapper for MPI_(Ex)Scan with MPI_SUM as reduction op)
Definition: MPI.cpp:94
static graph::AdjacencyList< T > neighbor_all_to_all(MPI_Comm neighbor_comm, const std::vector< int > &send_offsets, const std::vector< T > &send_data)
neighborhood all-to-all. Send data to neighbors using offsets into contiguous data array....
Definition: MPI.h:241
static std::tuple< std::vector< int >, std::vector< int > > neighbors(MPI_Comm neighbor_comm)
Definition: MPI.cpp:161
static graph::AdjacencyList< T > all_to_all(MPI_Comm comm, const graph::AdjacencyList< T > &send_data)
Send in_values[p0] to process p0 and receive values from process p1 in out_values[p1].
Definition: MPI.h:203
This class provides a static adjacency list data structure. It is commonly used to store directed gra...
Definition: AdjacencyList.h:28
std::int32_t num_nodes() const
Number of nodes.
Definition: AdjacencyList.h:138
const Eigen::Array< T, Eigen::Dynamic, 1 > & array() const
Return contiguous array of links for all nodes (const version)
Definition: AdjacencyList.h:175
const Eigen::Array< std::int32_t, Eigen::Dynamic, 1 > & offsets() const
Offset for each node in array() (const version)
Definition: AdjacencyList.h:178
Definition: MPI.h:128