6 #define CheckMPIStatus(A,B) {}
12 #include <dune/common/fvector.hh>
13 #include <dune/common/hybridutilities.hh>
15 #include <dune/geometry/type.hh>
24 struct MPITypeInfo {};
27 struct MPITypeInfo< int >
29 static const unsigned int size = 1;
30 static inline MPI_Datatype getType()
36 template<
typename K,
int N>
37 struct MPITypeInfo<
Dune::FieldVector<K,N> >
39 static const unsigned int size = N;
40 static inline MPI_Datatype getType()
42 return Dune::MPITraits<K>::getType();
47 struct MPITypeInfo< unsigned int >
49 static const unsigned int size = 1;
50 static inline MPI_Datatype getType()
57 struct MPITypeInfo<
Dune::GeometryType >
59 static const unsigned int size = 1;
60 static inline MPI_Datatype getType()
62 return Dune::MPITraits< Dune::GeometryType >::getType();
67 void MPI_SetVectorSize(
68 std::vector<T> & data,
71 typedef MPITypeInfo<T> Info;
73 MPI_Get_count(&status, Info::getType(), &sz);
74 assert(sz%Info::size == 0);
75 data.resize(sz/Info::size);
88 void MPI_SendVectorInRing(
89 std::vector<T> & data,
90 std::vector<T> & next,
100 [[maybe_unused]]
int result = 0;
101 typedef MPITypeInfo<T> Info;
103 next.resize(next.capacity());
107 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, tag,
112 &(next[0]), Info::size*next.size(), Info::getType(), leftrank, tag,
124 template<
typename... Args>
125 struct call_MPI_SendVectorInRing
127 std::tuple<Args...> & remotedata;
128 std::tuple<Args...> & nextdata;
133 std::array<MPI_Request,
sizeof...(Args)> & requests_send;
134 std::array<MPI_Request,
sizeof...(Args)> & requests_recv;
139 MPI_SendVectorInRing(
140 std::get<i>(remotedata),
141 std::get<i>(nextdata),
143 rightrank, leftrank, mpicomm,
148 template<
typename... Args>
149 struct call_MPI_SetVectorSize
151 std::tuple<Args...> & nextdata;
152 std::array<MPI_Status,
sizeof...(Args)> & status_recv;
157 MPI_SetVectorSize(std::get<i>(nextdata),status_recv[i]);
161 template<
typename OP, std::size_t... Indices,
typename... Args>
162 void MPI_AllApply_impl(MPI_Comm mpicomm,
164 std::index_sequence<Indices...> indices,
167 constexpr std::size_t N =
sizeof...(Args);
171 MPI_Comm_rank(mpicomm, &myrank);
172 MPI_Comm_size(mpicomm, &commsize);
177 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE
178 std::cout << myrank <<
" Start Communication, size " << commsize << std::endl;
182 std::array<unsigned int, N> size({ ((
unsigned int)data.size())... });
185 std::array<unsigned int, N> maxSize;
186 MPI_Allreduce(&size, &maxSize,
187 size.size(), MPI_UNSIGNED, MPI_MAX, mpicomm);
188 #ifdef DEBUG_GRIDGLUE_PARALLELMERGE
189 std::cout << myrank <<
" maxSize " <<
"done... " << std::endl;
193 std::tuple<Args...> remotedata { Args(maxSize[Indices])... };
196 remotedata = std::tie(data...);
199 std::tuple<Args...> nextdata { Args(maxSize[Indices])... };
202 int rightrank = (myrank + 1 + commsize) % commsize;
203 int leftrank = (myrank - 1 + commsize) % commsize;
205 std::cout << myrank <<
": size = " << commsize << std::endl;
206 std::cout << myrank <<
": left = " << leftrank
207 <<
" right = " << rightrank << std::endl;
210 int remoterank = myrank;
212 for (
int i=1; i<commsize; i++)
215 int nextrank = (myrank - i + commsize) % commsize;
217 std::cout << myrank <<
": next = " << nextrank << std::endl;
220 std::array<MPI_Request,N> requests_send;
221 std::array<MPI_Request,N> requests_recv;
224 Dune::Hybrid::forEach(indices,
234 call_MPI_SendVectorInRing<Args...>({
238 rightrank, leftrank, mpicomm,
244 op(remoterank,std::get<Indices>(remotedata)...);
247 std::array<MPI_Status,N> status_send;
248 std::array<MPI_Status,N> status_recv;
249 MPI_Waitall(N,&requests_recv[0],&status_recv[0]);
252 remoterank = nextrank;
256 Dune::Hybrid::forEach(indices,
260 call_MPI_SetVectorSize<Args...>({
261 nextdata, status_recv
264 MPI_Waitall(N,&requests_send[0],&status_send[0]);
267 std::swap(remotedata,nextdata);
271 op(remoterank,std::get<Indices>(remotedata)...);
294 template<
typename OP,
typename... Args>
297 const Args& ... data)
299 Impl::MPI_AllApply_impl(
301 std::forward<OP>(op),
302 std::make_index_sequence<
sizeof...(Args)>(),
Definition: gridglue.hh:35
void MPI_AllApply(MPI_Comm mpicomm, OP &&op, const Args &... data)
apply an operator locally to a difstributed data set
Definition: ringcomm.hh:295