My Project
ParallelIstlInformation.hpp
1 /*
2  Copyright 2014, 2015 Dr. Markus Blatt - HPC-Simulation-Software & Services
3  Copyright 2014, 2015 Statoil ASA
4  Copyright 2015 NTNU
5 
6  This file is part of the Open Porous Media project (OPM).
7 
8  OPM is free software: you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation, either version 3 of the License, or
11  (at your option) any later version.
12 
13  OPM is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with OPM. If not, see <http://www.gnu.org/licenses/>.
20 */
21 #ifndef OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
22 #define OPM_PARALLELISTLINFORMTION_HEADER_INCLUDED
23 
24 
25 #include <opm/grid/UnstructuredGrid.h>
26 #include <opm/common/ErrorMacros.hpp>
27 #include <any>
28 #include <exception>
29 
30 #include <algorithm>
31 #include <functional>
32 #include <limits>
33 #include <numeric>
34 #include <type_traits>
35 #include <vector>
36 
37 #if HAVE_MPI && HAVE_DUNE_ISTL
38 
39 #include <opm/common/utility/platform_dependent/disable_warnings.h>
40 #include <mpi.h>
41 #include <dune/istl/owneroverlapcopy.hh>
42 #include <dune/common/parallel/interface.hh>
43 #include <dune/common/parallel/communicator.hh>
44 #include <dune/common/enumset.hh>
45 #include <opm/common/utility/platform_dependent/reenable_warnings.h>
46 
47 #include <opm/simulators/utils/ParallelCommunication.hpp>
48 
49 namespace Opm
50 {
51 namespace
52 {
53 
54  template<class T>
55  struct is_tuple
56  : std::integral_constant<bool, false>
57  {};
58  template<typename... T>
59  struct is_tuple<std::tuple<T...> >
60  : std::integral_constant<bool, true>
61  {};
62 }
63 
66 class ParallelISTLInformation
67 {
68 public:
70  typedef Dune::OwnerOverlapCopyCommunication<int, int>::ParallelIndexSet ParallelIndexSet;
72  typedef Dune::OwnerOverlapCopyCommunication<int, int>::RemoteIndices RemoteIndices;
73 
75  ParallelISTLInformation()
76  : indexSet_(new ParallelIndexSet),
77  remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, MPI_COMM_WORLD)),
78  communicator_(MPI_COMM_WORLD)
79  {}
82  ParallelISTLInformation(MPI_Comm communicator)
83  : indexSet_(new ParallelIndexSet),
84  remoteIndices_(new RemoteIndices(*indexSet_, *indexSet_, communicator)),
85  communicator_(communicator)
86  {}
91  ParallelISTLInformation(const std::shared_ptr<ParallelIndexSet>& indexSet,
92  const std::shared_ptr<RemoteIndices>& remoteIndices,
93  MPI_Comm communicator)
94  : indexSet_(indexSet), remoteIndices_(remoteIndices), communicator_(communicator)
95  {}
99  ParallelISTLInformation(const ParallelISTLInformation& other)
100  : indexSet_(other.indexSet_), remoteIndices_(other.remoteIndices_),
101  communicator_(other.communicator_)
102  {}
104  std::shared_ptr<ParallelIndexSet> indexSet() const
105  {
106  return indexSet_;
107  }
109  std::shared_ptr<RemoteIndices> remoteIndices() const
110  {
111  return remoteIndices_;
112  }
114  Parallel::Communication communicator() const
115  {
116  return communicator_;
117  }
121  void copyValuesTo(ParallelIndexSet& indexSet, RemoteIndices& remoteIndices,
122  std::size_t local_component_size = 0, std::size_t num_components = 1) const
123  {
124  ParallelIndexSet::GlobalIndex global_component_size = local_component_size;
125  if ( num_components > 1 )
126  {
127  ParallelIndexSet::GlobalIndex max_gi = 0;
128  // component the max global index
129  for( auto i = indexSet_->begin(), end = indexSet_->end(); i != end; ++i )
130  {
131  max_gi = std::max(max_gi, i->global());
132  }
133  global_component_size = max_gi+1;
134  global_component_size = communicator_.max(global_component_size);
135  }
136  indexSet.beginResize();
137  IndexSetInserter<ParallelIndexSet> inserter(indexSet, global_component_size,
138  local_component_size, num_components);
139  std::for_each(indexSet_->begin(), indexSet_->end(), inserter);
140  indexSet.endResize();
141  remoteIndices.rebuild<false>();
142  }
146  template<class T>
147  void copyOwnerToAll (const T& source, T& dest) const
148  {
149  typedef Dune::Combine<Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::owner>,Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::overlap>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> OwnerOverlapSet;
150  typedef Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::owner> OwnerSet;
151  typedef Dune::Combine<OwnerOverlapSet, Dune::EnumItem<Dune::OwnerOverlapCopyAttributeSet::AttributeSet,Dune::OwnerOverlapCopyAttributeSet::copy>,Dune::OwnerOverlapCopyAttributeSet::AttributeSet> AllSet;
152  OwnerSet sourceFlags;
153  AllSet destFlags;
154  Dune::Interface interface(communicator_);
155  if( !remoteIndices_->isSynced() )
156  {
157  remoteIndices_->rebuild<false>();
158  }
159  interface.build(*remoteIndices_,sourceFlags,destFlags);
160  Dune::BufferedCommunicator communicator;
161  communicator.template build<T>(interface);
162  communicator.template forward<CopyGatherScatter<T> >(source,dest);
163  communicator.free();
164  }
165  template<class T>
166  const std::vector<double>& updateOwnerMask(const T& container) const
167  {
168  if( ! indexSet_ )
169  {
170  OPM_THROW(std::runtime_error, "Trying to update owner mask without parallel information!");
171  }
172  if( static_cast<std::size_t>(container.size())!= ownerMask_.size() )
173  {
174  ownerMask_.resize(container.size(), 1.);
175  for( auto i=indexSet_->begin(), end=indexSet_->end(); i!=end; ++i )
176  {
177  if (i->local().attribute()!=Dune::OwnerOverlapCopyAttributeSet::owner)
178  {
179  ownerMask_[i->local().local()] = 0.;
180  }
181  }
182  }
183  return ownerMask_;
184  }
185 
191  const std::vector<double>& getOwnerMask() const
192  {
193  return ownerMask_;
194  }
195 
215  template<typename Container, typename BinaryOperator, typename T>
216  void computeReduction(const Container& container, BinaryOperator binaryOperator,
217  T& value) const
218  {
219  computeReduction(container, binaryOperator, value, is_tuple<Container>());
220  }
221 private:
225  template<typename Container, typename BinaryOperator, typename T>
226  void computeReduction(const Container& container, BinaryOperator binaryOperator,
227  T& value, std::integral_constant<bool,true>) const
228  {
229  computeTupleReduction(container, binaryOperator, value);
230  }
234  template<typename Container, typename BinaryOperator, typename T>
235  void computeReduction(const Container& container, BinaryOperator binaryOperator,
236  T& value, std::integral_constant<bool,false>) const
237  {
238  std::tuple<const Container&> containers=std::tuple<const Container&>(container);
239  auto values=std::make_tuple(value);
240  auto operators=std::make_tuple(binaryOperator);
241  computeTupleReduction(containers, operators, values);
242  value=std::get<0>(values);
243  }
245  template<typename... Containers, typename... BinaryOperators, typename... ReturnValues>
246  void computeTupleReduction(const std::tuple<Containers...>& containers,
247  std::tuple<BinaryOperators...>& operators,
248  std::tuple<ReturnValues...>& values) const
249  {
250  static_assert(std::tuple_size<std::tuple<Containers...> >::value==
251  std::tuple_size<std::tuple<BinaryOperators...> >::value,
252  "We need the same number of containers and binary operators");
253  static_assert(std::tuple_size<std::tuple<Containers...> >::value==
254  std::tuple_size<std::tuple<ReturnValues...> >::value,
255  "We need the same number of containers and return values");
256  if( std::tuple_size<std::tuple<Containers...> >::value==0 )
257  {
258  return;
259  }
260  // Copy the initial values.
261  std::tuple<ReturnValues...> init=values;
262  updateOwnerMask(std::get<0>(containers));
263  computeLocalReduction(containers, operators, values);
264  std::vector<std::tuple<ReturnValues...> > receivedValues(communicator_.size());
265  communicator_.allgather(&values, 1, &(receivedValues[0]));
266  values=init;
267  for( auto rvals=receivedValues.begin(), endvals=receivedValues.end(); rvals!=endvals;
268  ++rvals )
269  {
270  computeGlobalReduction(*rvals, operators, values);
271  }
272  }
276  template<int I=0, typename... BinaryOperators, typename... ReturnValues>
277  typename std::enable_if<I == sizeof...(BinaryOperators), void>::type
278  computeGlobalReduction(const std::tuple<ReturnValues...>&,
279  std::tuple<BinaryOperators...>&,
280  std::tuple<ReturnValues...>&) const
281  {}
283  template<int I=0, typename... BinaryOperators, typename... ReturnValues>
284  typename std::enable_if<I !=sizeof...(BinaryOperators), void>::type
285  computeGlobalReduction(const std::tuple<ReturnValues...>& receivedValues,
286  std::tuple<BinaryOperators...>& operators,
287  std::tuple<ReturnValues...>& values) const
288  {
289  auto& val=std::get<I>(values);
290  val = std::get<I>(operators).localOperator()(val, std::get<I>(receivedValues));
291  computeGlobalReduction<I+1>(receivedValues, operators, values);
292  }
296  template<int I=0, typename... Containers, typename... BinaryOperators, typename... ReturnValues>
297  typename std::enable_if<I==sizeof...(Containers), void>::type
298  computeLocalReduction(const std::tuple<Containers...>&,
299  std::tuple<BinaryOperators...>&,
300  std::tuple<ReturnValues...>&) const
301  {}
303  template<int I=0, typename... Containers, typename... BinaryOperators, typename... ReturnValues>
304  typename std::enable_if<I!=sizeof...(Containers), void>::type
305  computeLocalReduction(const std::tuple<Containers...>& containers,
306  std::tuple<BinaryOperators...>& operators,
307  std::tuple<ReturnValues...>& values) const
308  {
309  const auto& container = std::get<I>(containers);
310  if( container.size() )
311  {
312  auto& reduceOperator = std::get<I>(operators);
313  // Eigen:Block does not support STL iterators!!!!
314  // Therefore we need to rely on the harder random-access
315  // property of the containers. But this should be save, too.
316  // Just commenting out code in the hope that Eigen might improve
317  // in this regard in the future.
318  //auto newVal = container.begin();
319  auto mask = ownerMask_.begin();
320  auto& value = std::get<I>(values);
321  value = reduceOperator.getInitialValue();
322 
323  for( auto endVal=ownerMask_.end(); mask!=endVal;
324  /*++newVal,*/ ++mask )
325  {
326  value = reduceOperator(value, container[mask-ownerMask_.begin()], *mask);
327  }
328  }
329  computeLocalReduction<I+1>(containers, operators, values);
330  }
332  template<typename T>
333  struct CopyGatherScatter
334  {
335  typedef typename Dune::CommPolicy<T>::IndexedType V;
336 
337  static V gather(const T& a, std::size_t i)
338  {
339  return a[i];
340  }
341 
342  static void scatter(T& a, V v, std::size_t i)
343  {
344  a[i] = v;
345  }
346  };
347  template<class T>
348  class IndexSetInserter
349  {
350  public:
351  typedef T ParallelIndexSet;
352  typedef typename ParallelIndexSet::LocalIndex LocalIndex;
353  typedef typename ParallelIndexSet::GlobalIndex GlobalIndex;
354 
355  IndexSetInserter(ParallelIndexSet& indexSet, const GlobalIndex& component_size,
356  std::size_t local_component_size, std::size_t num_components)
357  : indexSet_(&indexSet), component_size_(component_size),
358  local_component_size_(local_component_size),
359  num_components_(num_components)
360  {}
361  void operator()(const typename ParallelIndexSet::IndexPair& pair)
362  {
363  for(std::size_t i = 0; i < num_components_; i++)
364  indexSet_->add(i * component_size_ + pair.global(),
365  LocalIndex(i * local_component_size_ + pair.local(),
366  pair.local().attribute()));
367  }
368  private:
369  ParallelIndexSet* indexSet_;
371  GlobalIndex component_size_;
373  std::size_t local_component_size_;
375  std::size_t num_components_;
376  };
377  std::shared_ptr<ParallelIndexSet> indexSet_;
378  std::shared_ptr<RemoteIndices> remoteIndices_;
379  Dune::CollectiveCommunication<MPI_Comm> communicator_;
380  mutable std::vector<double> ownerMask_;
381 };
382 
383  namespace Reduction
384  {
389  // the reduction operation.
390  template<typename BinaryOperator>
391  struct MaskIDOperator
392  {
393  // This is a real nice one: numeric limits needs a type without const
394  // or reference qualifier. Otherwise we get complete nonesense.
395  typedef typename std::remove_cv<
396  typename std::remove_reference<typename BinaryOperator::result_type>::type
397  >::type Result;
404  template<class T, class T1>
405  T operator()(const T& t1, const T& t2, const T1& mask)
406  {
407  return b_(t1, maskValue(t2, mask));
408  }
409  template<class T, class T1>
410  T maskValue(const T& t, const T1& mask)
411  {
412  return t*mask;
413  }
414  BinaryOperator& localOperator()
415  {
416  return b_;
417  }
418  Result getInitialValue()
419  {
420  return Result();
421  }
422  private:
423  BinaryOperator b_;
424  };
425 
427  template<class T>
428  struct InnerProductFunctor
429  {
436  template<class T1>
437  T operator()(const T& t1, const T& t2, const T1& mask)
438  {
439  T masked = maskValue(t2, mask);
440  return t1 + masked * masked;
441  }
442  template<class T1>
443  T maskValue(const T& t, const T1& mask)
444  {
445  return t*mask;
446  }
447  std::plus<T> localOperator()
448  {
449  return std::plus<T>();
450  }
451  T getInitialValue()
452  {
453  return T();
454  }
455  };
456 
461  // the reduction operation.
462  template<typename BinaryOperator>
463  struct MaskToMinOperator
464  {
465  // This is a real nice one: numeric limits has to a type without const
466  // or reference. Otherwise we get complete nonesense.
467  typedef typename std::remove_reference<
468  typename std::remove_const<typename BinaryOperator::result_type>::type
469  >::type Result;
470 
471  MaskToMinOperator(BinaryOperator b)
472  : b_(b)
473  {}
480  template<class T, class T1>
481  T operator()(const T& t1, const T& t2, const T1& mask)
482  {
483  return b_(t1, maskValue(t2, mask));
484  }
485  template<class T, class T1>
486  T maskValue(const T& t, const T1& mask)
487  {
488  if( mask )
489  {
490  return t;
491  }
492  else
493  {
494  return getInitialValue();
495  }
496  }
497  Result getInitialValue()
498  {
499  //g++-4.4 does not support std::numeric_limits<T>::lowest();
500  // we rely on IEE 754 for floating point values and use min()
501  // for integral types.
502  if( std::is_integral<Result>::value )
503  {
504  return std::numeric_limits<Result>::min();
505  }
506  else
507  {
508  return -std::numeric_limits<Result>::max();
509  }
510  }
515  BinaryOperator& localOperator()
516  {
517  return b_;
518  }
519  private:
520  BinaryOperator b_;
521  };
522 
526  template<typename BinaryOperator>
527  struct MaskToMaxOperator
528  {
529  // This is a real nice one: numeric limits has to a type without const
530  // or reference. Otherwise we get complete nonesense.
531  typedef typename std::remove_cv<
532  typename std::remove_reference<typename BinaryOperator::result_type>::type
533  >::type Result;
534 
535  MaskToMaxOperator(BinaryOperator b)
536  : b_(b)
537  {}
544  template<class T, class T1>
545  T operator()(const T& t1, const T& t2, const T1& mask)
546  {
547  return b_(t1, maskValue(t2, mask));
548  }
549  template<class T, class T1>
550  T maskValue(const T& t, const T1& mask)
551  {
552  if( mask )
553  {
554  return t;
555  }
556  else
557  {
558  return std::numeric_limits<T>::max();
559  }
560  }
561  BinaryOperator& localOperator()
562  {
563  return b_;
564  }
565  Result getInitialValue()
566  {
567  return std::numeric_limits<Result>::max();
568  }
569  private:
570  BinaryOperator b_;
571  };
575  template<class T>
576  MaskIDOperator<std::plus<T> >
577  makeGlobalSumFunctor()
578  {
579  return MaskIDOperator<std::plus<T> >();
580  }
584  template<class T>
585  auto makeGlobalMaxFunctor()
586  {
587  struct MaxOp
588  {
589  using result_type = T;
590  const result_type& operator()(const T& t1, const T& t2)
591  {
592  return std::max(t1, t2);
593  }
594  };
595  return MaskToMinOperator(MaxOp());
596  }
597 
598  namespace detail
599  {
601  template<typename T, typename Enable = void>
602  struct MaxAbsFunctor
603  {
604  using result_type = T;
605  result_type operator()(const T& t1,
606  const T& t2)
607  {
608  return std::max(std::abs(t1), std::abs(t2));
609  }
610  };
611 
612  // Specialization for unsigned integers. They need their own
613  // version since abs(x) is ambiguous (as well as somewhat
614  // meaningless).
615  template<typename T>
616  struct MaxAbsFunctor<T, typename std::enable_if<std::is_unsigned<T>::value>::type>
617  {
618  using result_type = T;
619  result_type operator()(const T& t1,
620  const T& t2)
621  {
622  return std::max(t1, t2);
623  }
624  };
625  }
626 
630  template<class T>
631  MaskIDOperator<detail::MaxAbsFunctor<T> >
632  makeLInfinityNormFunctor()
633  {
634  return MaskIDOperator<detail::MaxAbsFunctor<T> >();
635  }
639  template<class T>
640  auto
641  makeGlobalMinFunctor()
642  {
643  struct MinOp
644  {
645  using result_type = T;
646  const result_type& operator()(const T& t1, const T& t2)
647  {
648  return std::min(t1, t2);
649  }
650  };
651  return MaskToMaxOperator(MinOp());
652  }
653  template<class T>
654  InnerProductFunctor<T>
655  makeInnerProductFunctor()
656  {
657  return InnerProductFunctor<T>();
658  }
659  } // end namespace Reduction
660 } // end namespace Opm
661 
662 #endif
663 
664 namespace Opm
665 {
675 
676 inline void extractParallelGridInformationToISTL(std::any& anyComm, const UnstructuredGrid& grid)
677 {
678  (void)anyComm; (void)grid;
679 }
680 
687 template<class T1>
688 auto
689 accumulateMaskedValues(const T1& container, const std::vector<double>* maskContainer)
690  -> decltype(container[0]*(*maskContainer)[0])
691 {
692  decltype(container[0]*(*maskContainer)[0]) initial = 0;
693 
694  if( maskContainer )
695  {
696  return std::inner_product(container.begin(), container.end(), maskContainer->begin(),
697  initial);
698  }else
699  {
700  return std::accumulate(container.begin(), container.end(), initial);
701  }
702 }
703 } // end namespace Opm
704 
705 #endif
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition: BlackoilPhases.hpp:27
void extractParallelGridInformationToISTL(std::any &anyComm, const UnstructuredGrid &grid)
Extracts the information about the data decomposition from the grid for dune-istl.
Definition: ParallelIstlInformation.hpp:676
auto accumulateMaskedValues(const T1 &container, const std::vector< double > *maskContainer) -> decltype(container[0] *(*maskContainer)[0])
Accumulates entries masked with 1.
Definition: ParallelIstlInformation.hpp:689