My Project
CpGridData.hpp
1 //===========================================================================
2 //
3 // File: CpGrid.hpp
4 //
5 // Created: Sep 17 21:11:41 2013
6 //
7 // Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8 // Bård Skaflestad <bard.skaflestad@sintef.no>
9 // Markus Blatt <markus@dr-blatt.de>
10 //
11 // Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
12 // and got transfered here during refactoring for the parallelization.
13 //
14 // $Date$
15 //
16 // $Revision$
17 //
18 //===========================================================================
19 
20 /*
21  Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
22  Copyright 2009, 2010, 2013 Statoil ASA.
23  Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
24 
25  This file is part of The Open Porous Media project (OPM).
26 
27  OPM is free software: you can redistribute it and/or modify
28  it under the terms of the GNU General Public License as published by
29  the Free Software Foundation, either version 3 of the License, or
30  (at your option) any later version.
31 
32  OPM is distributed in the hope that it will be useful,
33  but WITHOUT ANY WARRANTY; without even the implied warranty of
34  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35  GNU General Public License for more details.
36 
37  You should have received a copy of the GNU General Public License
38  along with OPM. If not, see <http://www.gnu.org/licenses/>.
39 */
47 #ifndef OPM_CPGRIDDATA_HEADER
48 #define OPM_CPGRIDDATA_HEADER
49 
50 // Warning suppression for Dune includes.
51 #include <opm/grid/utility/platform_dependent/disable_warnings.h>
52 
53 #include <dune/common/version.hh>
54 #include <dune/common/parallel/mpihelper.hh>
55 #ifdef HAVE_DUNE_ISTL
56 #include <dune/istl/owneroverlapcopy.hh>
57 #endif
58 
59 #if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
60 #include <dune/common/parallel/communication.hh>
61 #else
62 #include <dune/common/parallel/collectivecommunication.hh>
63 #endif
64 #include <dune/common/parallel/indexset.hh>
65 #include <dune/common/parallel/interface.hh>
66 #include <dune/common/parallel/plocalindex.hh>
67 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
68 #include <dune/common/parallel/variablesizecommunicator.hh>
69 #else
70 #include <opm/grid/utility/VariableSizeCommunicator.hpp>
71 #endif
72 #include <dune/grid/common/gridenums.hh>
73 
74 #include <opm/grid/utility/platform_dependent/reenable_warnings.h>
75 
76 #if HAVE_ECL_INPUT
77 #include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
78 #endif
79 
80 #include <array>
81 #include <tuple>
82 #include <algorithm>
83 #include <set>
84 
85 #include "OrientedEntityTable.hpp"
86 #include "DefaultGeometryPolicy.hpp"
88 
89 #include <opm/grid/utility/OpmParserIncludes.hpp>
90 
91 #include "Entity2IndexDataHandle.hpp"
92 #include "DataHandleWrappers.hpp"
93 #include "GlobalIdMapping.hpp"
94 namespace Opm
95 {
96 class EclipseState;
97 }
98 namespace Dune
99 {
100 class CpGrid;
101 
102 namespace cpgrid
103 {
104 
105 class IndexSet;
106 class IdSet;
107 class LevelGlobalIdSet;
108 class PartitionTypeIndicator;
109 template<int,int> class Geometry;
110 template<int> class Entity;
111 template<int> class EntityRep;
112 
113 namespace mover
114 {
115 template<class T, int i> struct Mover;
116 }
117 
123 {
124  template<class T, int i> friend struct mover::Mover;
125 
126  friend class GlobalIdSet;
127 
128 private:
129  CpGridData(const CpGridData& g);
130 
131 public:
132  enum{
133 #ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
141 #else
146  MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
147 #endif
148  };
149 
153  explicit CpGridData(MPIHelper::MPICommunicator comm);
154 
156  CpGridData();
158  ~CpGridData();
160  int size(int codim) const;
161 
163  int size (GeometryType type) const
164  {
165  if (type.isCube()) {
166  return size(3 - type.dim());
167  } else {
168  return 0;
169  }
170  }
174  void readSintefLegacyFormat(const std::string& grid_prefix);
175 
179  void writeSintefLegacyFormat(const std::string& grid_prefix) const;
180 
186  void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
187 
188 #if HAVE_ECL_INPUT
197  void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
198  const std::vector<double>& poreVolume = std::vector<double>());
199 
211  std::vector<std::size_t> processEclipseFormat(const Opm::EclipseGrid* ecl_grid, Opm::EclipseState* ecl_state,
212  bool periodic_extension, bool turn_normals = false, bool clip_z = false, bool pinchActive = true);
213 #endif
214 
224  void processEclipseFormat(const grdecl& input_data, Opm::EclipseState* ecl_state,
225  std::array<std::set<std::pair<int, int>>, 2>& nnc,
226  bool remove_ij_boundary, bool turn_normals, bool pinchActive);
227 
235  void getIJK(int c, std::array<int,3>& ijk) const
236  {
237  int gc = global_cell_[c];
238  ijk[0] = gc % logical_cartesian_size_[0]; gc /= logical_cartesian_size_[0];
239  ijk[1] = gc % logical_cartesian_size_[1];
240  ijk[2] = gc / logical_cartesian_size_[1];
241  }
242 
243  // Make unique boundary ids for all intersections.
244  void computeUniqueBoundaryIds();
245 
249  bool uniqueBoundaryIds() const
250  {
251  return use_unique_boundary_ids_;
252  }
253 
256  void setUniqueBoundaryIds(bool uids)
257  {
258  use_unique_boundary_ids_ = uids;
259  if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
260  computeUniqueBoundaryIds();
261  }
262  }
263 
267  const std::vector<double>& zcornData() const {
268  return zcorn;
269  }
270 
271 
274  const IndexSet& indexSet() const
275  {
276  return *index_set_;
277  }
281  const std::array<int, 3>& logicalCartesianSize() const
282  {
283  return logical_cartesian_size_;
284  }
285 
289  void distributeGlobalGrid(CpGrid& grid,
290  const CpGridData& view_data,
291  const std::vector<int>& cell_part);
292 
298  template<class DataHandle>
299  void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
300 
301 #if HAVE_MPI
302 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
304  using Communicator = VariableSizeCommunicator<>;
305 #else
307  using Communicator = Opm::VariableSizeCommunicator<>;
308 #endif
309 
311  using InterfaceMap = Communicator::InterfaceMap;
312 
314  using CommunicationType = Dune::OwnerOverlapCopyCommunication<int,int>;
315 
317  using ParallelIndexSet = CommunicationType::ParallelIndexSet;
318 
320  using RemoteIndices = Dune::RemoteIndices<ParallelIndexSet>;
321 
325  CommunicationType& cellCommunication()
326  {
327  return cell_comm_;
328  }
329 
333  const CommunicationType& cellCommunication() const
334  {
335  return cell_comm_;
336  }
337 
338  ParallelIndexSet& cellIndexSet()
339  {
340  return cellCommunication().indexSet();
341  }
342 
343  const ParallelIndexSet& cellIndexSet() const
344  {
345  return cellCommunication().indexSet();
346  }
347 
348  RemoteIndices& cellRemoteIndices()
349  {
350  return cellCommunication().remoteIndices();
351  }
352 
353  const RemoteIndices& cellRemoteIndices() const
354  {
355  return cellCommunication().remoteIndices();
356  }
357 #endif
358 
359 #ifdef HAVE_DUNE_ISTL
361  typedef Dune::OwnerOverlapCopyAttributeSet::AttributeSet AttributeSet;
362 #else
364  enum AttributeSet{owner, overlap, copy};
365 #endif
366 
368  const std::vector<int>& sortedNumAquiferCells() const
369  {
370  return aquifer_cells_;
371  }
372 
373 private:
374 
376  void populateGlobalCellIndexSet();
377 
378 #if HAVE_MPI
379 
385  template<class DataHandle>
386  void gatherData(DataHandle& data, CpGridData* global_view,
387  CpGridData* distributed_view);
388 
389 
396  template<int codim, class DataHandle>
397  void gatherCodimData(DataHandle& data, CpGridData* global_data,
398  CpGridData* distributed_data);
399 
406  template<class DataHandle>
407  void scatterData(DataHandle& data, CpGridData* global_data,
408  CpGridData* distributed_data, const InterfaceMap& cell_inf,
409  const InterfaceMap& point_inf);
410 
418  template<int codim, class DataHandle>
419  void scatterCodimData(DataHandle& data, CpGridData* global_data,
420  CpGridData* distributed_data);
421 
430  template<int codim, class DataHandle>
431  void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
432  const Interface& interface);
433 
442  template<int codim, class DataHandle>
443  void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
444  const InterfaceMap& interface);
445 
446 #endif
447 
448  void computeGeometry(CpGrid& grid,
449  const DefaultGeometryPolicy& globalGeometry,
450  const std::vector<int>& globalAquiferCells,
451  const OrientedEntityTable<0, 1>& globalCell2Faces,
452  DefaultGeometryPolicy& geometry,
453  std::vector<int>& aquiferCells,
454  const OrientedEntityTable<0, 1>& cell2Faces,
455  const std::vector< std::array<int,8> >& cell2Points);
456 
457  // Representing the topology
459  cpgrid::OrientedEntityTable<0, 1> cell_to_face_;
467  cpgrid::OrientedEntityTable<1, 0> face_to_cell_;
469  Opm::SparseTable<int> face_to_point_;
471  std::vector< std::array<int,8> > cell_to_point_;
478  std::array<int, 3> logical_cartesian_size_;
485  std::vector<int> global_cell_;
487  cpgrid::EntityVariable<enum face_tag, 1> face_tag_; // {LEFT, BACK, TOP}
491  typedef FieldVector<double, 3> PointType;
495  cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
497  cpgrid::IndexSet* index_set_;
499  const cpgrid::IdSet* local_id_set_;
501  LevelGlobalIdSet* global_id_set_;
503  PartitionTypeIndicator* partition_type_indicator_;
504 
506  typedef MPIHelper::MPICommunicator MPICommunicator;
507  typedef Dune::CollectiveCommunication<MPICommunicator> CollectiveCommunication;
509  CollectiveCommunication ccobj_;
510 
511  // Boundary information (optional).
512  bool use_unique_boundary_ids_;
513 
519  std::vector<double> zcorn;
520 
522  std::vector<int> aquifer_cells_;
523 
524 #if HAVE_MPI
525 
527  CommunicationType cell_comm_;
528 
530  std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
531  /*
532  // code deactivated, because users cannot access face indices and therefore
533  // communication on faces makes no sense!
535  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
536  face_interfaces_;
537  */
539  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
540  point_interfaces_;
541 
542 #endif
543 
544  // Return the geometry vector corresponding to the given codim.
545  template <int codim>
546  const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
547  {
548  return geometry_.geomVector<codim>();
549  }
550 
551  friend class Dune::CpGrid;
552  template<int> friend class Entity;
553  template<int> friend class EntityRep;
554  template<int> friend class EntityPointer;
555  friend class Intersection;
556  friend class PartitionTypeIndicator;
557 };
558 
559 #if HAVE_MPI
560 
561 namespace
562 {
567 template<class T>
568 T& getInterface(InterfaceType iftype,
569  std::tuple<T,T,T,T,T>& interfaces)
570 {
571  switch(iftype)
572  {
573  case 0:
574  return std::get<0>(interfaces);
575  case 1:
576  return std::get<1>(interfaces);
577  case 2:
578  return std::get<2>(interfaces);
579  case 3:
580  return std::get<3>(interfaces);
581  case 4:
582  return std::get<4>(interfaces);
583  }
584  OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
585 }
586 
587 } // end unnamed namespace
588 
589 template<int codim, class DataHandle>
590 void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
591  const Interface& interface)
592 {
593  this->template communicateCodim<codim>(data, dir, interface.interfaces());
594 }
595 
596 template<int codim, class DataHandle>
597 void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
598  const InterfaceMap& interface)
599 {
600  Communicator comm(ccobj_, interface);
601 
602  if(dir==ForwardCommunication)
603  comm.forward(data_wrapper);
604  else
605  comm.backward(data_wrapper);
606 }
607 #endif
608 
609 template<class DataHandle>
610 void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
611  CommunicationDirection dir)
612 {
613 #if HAVE_MPI
614  if(data.contains(3,0))
615  {
616  Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
617  communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
618  }
619  if(data.contains(3,3))
620  {
621  Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
622  communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
623  }
624 #else
625  // Suppress warnings for unused arguments.
626  (void) data;
627  (void) iftype;
628  (void) dir;
629 #endif
630 }
631 }}
632 
633 #if HAVE_MPI
634 #include <opm/grid/cpgrid/Entity.hpp>
635 #include <opm/grid/cpgrid/Indexsets.hpp>
636 
637 namespace Dune {
638 namespace cpgrid {
639 
640 namespace mover
641 {
642 template<class T>
643 class MoveBuffer
644 {
645  friend class Dune::cpgrid::CpGridData;
646 public:
647  void read(T& data)
648  {
649  data=buffer_[index_++];
650  }
651  void write(const T& data)
652  {
653  buffer_[index_++]=data;
654  }
655  void reset()
656  {
657  index_=0;
658  }
659  void resize(std::size_t size)
660  {
661  buffer_.resize(size);
662  index_=0;
663  }
664 private:
665  std::vector<T> buffer_;
666  typename std::vector<T>::size_type index_;
667 };
668 template<class DataHandle,int codim>
669 struct Mover
670 {
671 };
672 
673 template<class DataHandle>
674 struct BaseMover
675 {
676  BaseMover(DataHandle& data)
677  : data_(data)
678  {}
679  template<class E>
680  void moveData(const E& from, const E& to)
681  {
682  std::size_t size=data_.size(from);
683  buffer.resize(size);
684  data_.gather(buffer, from);
685  buffer.reset();
686  data_.scatter(buffer, to, size);
687  }
688  DataHandle& data_;
689  MoveBuffer<typename DataHandle::DataType> buffer;
690 };
691 
692 
693 template<class DataHandle>
694 struct Mover<DataHandle,0> : public BaseMover<DataHandle>
695 {
696  Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
697  CpGridData* scatterView)
698  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
699  {}
700 
701  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
702  {
703  Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
704  Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
705  this->moveData(from_entity, to_entity);
706  }
707  CpGridData* gatherView_;
708  CpGridData* scatterView_;
709 };
710 
711 template<class DataHandle>
712 struct Mover<DataHandle,1> : public BaseMover<DataHandle>
713 {
714  Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
715  CpGridData* scatterView)
716  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
717  {}
718 
719  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
720  {
721  typedef typename OrientedEntityTable<0,1>::row_type row_type;
722  EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
723  EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
724  OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
725  row_type from_faces=table.operator[](from_cell);
726  row_type to_faces=scatterView_->cell_to_face_[to_cell];
727 
728  for(int i=0; i<from_faces.size(); ++i)
729  this->moveData(from_faces[i], to_faces[i]);
730  }
731  CpGridData *gatherView_;
732  CpGridData *scatterView_;
733 };
734 
735 template<class DataHandle>
736 struct Mover<DataHandle,3> : public BaseMover<DataHandle>
737 {
738  Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
739  CpGridData* scatterView)
740  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
741  {}
742  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
743  {
744  const std::array<int,8>& from_cell_points=
745  gatherView_->cell_to_point_[from_cell_index];
746  const std::array<int,8>& to_cell_points=
747  scatterView_->cell_to_point_[to_cell_index];
748  for(std::size_t i=0; i<8; ++i)
749  {
750  this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
751  Entity<3>(*scatterView_, to_cell_points[i], true));
752  }
753  }
754  CpGridData* gatherView_;
755  CpGridData* scatterView_;
756 };
757 
758 } // end mover namespace
759 
760 template<class DataHandle>
761 void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
762  CpGridData* distributed_data, const InterfaceMap& cell_inf,
763  const InterfaceMap& point_inf)
764 {
765 #if HAVE_MPI
766  if(data.contains(3,0))
767  {
768  Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
769  communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
770  }
771  if(data.contains(3,3))
772  {
773  Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
774  communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
775  }
776 #endif
777 }
778 
779 template<int codim, class DataHandle>
780 void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
781  CpGridData* distributed_data)
782 {
783  CpGridData *gather_view, *scatter_view;
784  gather_view=global_data;
785  scatter_view=distributed_data;
786 
787  mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
788 
789 
790  for(auto index=distributed_data->cellIndexSet().begin(),
791  end = distributed_data->cellIndexSet().end();
792  index!=end; ++index)
793  {
794  std::size_t from=index->global();
795  std::size_t to=index->local();
796  mover(from,to);
797  }
798 }
799 
800 namespace
801 {
802 
803 template<int codim, class T, class F>
804 void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
805 {
806  for(T it=begin; it!=endit; ++it)
807  {
808  Entity<codim> entity(distributed_data, it-begin, true);
809  PartitionType pt = entity.partitionType();
810  if(pt==Dune::InteriorEntity)
811  {
812  func(*it, entity);
813  }
814  }
815 }
816 
817 template<class DataHandle>
818 struct GlobalIndexSizeGatherer
819 {
820  GlobalIndexSizeGatherer(DataHandle& data_,
821  std::vector<int>& ownedGlobalIndices_,
822  std::vector<int>& ownedSizes_)
823  : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
824  {}
825 
826  template<class T, class E>
827  void operator()(T& i, E& entity)
828  {
829  ownedGlobalIndices.push_back(i);
830  ownedSizes.push_back(data.size(entity));
831  }
832  DataHandle& data;
833  std::vector<int>& ownedGlobalIndices;
834  std::vector<int>& ownedSizes;
835 };
836 
837 template<class DataHandle>
838 struct DataGatherer
839 {
840  DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
841  DataHandle& data_)
842  : buffer(buffer_), data(data_)
843  {}
844 
845  template<class T, class E>
846  void operator()(T& /* it */, E& entity)
847  {
848  data.gather(buffer, entity);
849  }
850  mover::MoveBuffer<typename DataHandle::DataType>& buffer;
851  DataHandle& data;
852 };
853 
854 }
855 
856 template<class DataHandle>
857 void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
858  CpGridData* distributed_data)
859 {
860 #if HAVE_MPI
861  if(data.contains(3,0))
862  gatherCodimData<0>(data, global_data, distributed_data);
863  if(data.contains(3,3))
864  gatherCodimData<3>(data, global_data, distributed_data);
865 #endif
866 }
867 
868 template<int codim, class DataHandle>
869 void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
870  CpGridData* distributed_data)
871 {
872 #if HAVE_MPI
873  // Get the mapping to global index from the global id set
874  const std::vector<int>& mapping =
875  distributed_data->global_id_set_->getMapping<codim>();
876 
877  // Get the global indices and data size for the entities whose data is
878  // to be sent, i.e. the ones that we own.
879  std::vector<int> owned_global_indices;
880  std::vector<int> owned_sizes;
881  owned_global_indices.reserve(mapping.size());
882  owned_sizes.reserve(mapping.size());
883 
884  GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
885  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
886 
887  // communicate the number of indices that each processor sends
888  int no_indices=owned_sizes.size();
889  // We will take the address of the first elemet for MPI_Allgather below.
890  // Make sure the containers have such an element.
891  if ( owned_global_indices.empty() )
892  owned_global_indices.resize(1);
893  if ( owned_sizes.empty() )
894  owned_sizes.resize(1);
895  std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
896  distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
897  // compute size of the vector capable for receiving all indices
898  // and allgather the global indices and the sizes.
899  // calculate displacements
900  std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
901  std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
902  std::plus<int>());
903  int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
904  std::vector<int> global_indices(global_size);
905  std::vector<int> global_sizes(global_size);
906  MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
907  &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
908  MPITraits<int>::getType(),
909  distributed_data->ccobj_);
910  MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
911  &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
912  MPITraits<int>::getType(),
913  distributed_data->ccobj_);
914  std::vector<int>().swap(owned_global_indices); // free data for reuse.
915  // Compute the number of data items to send
916  std::vector<int> no_data_send(distributed_data->ccobj_.size());
917  for(typename std::vector<int>::iterator begin=no_data_send.begin(),
918  i=begin, end=no_data_send.end(); i!=end; ++i)
919  *i = std::accumulate(global_sizes.begin()+displ[i-begin],
920  global_sizes.begin()+displ[i-begin+1], std::size_t());
921  // free at least some memory that can be reused.
922  std::vector<int>().swap(owned_sizes);
923  // compute the displacements for receiving with allgatherv
924  displ[0]=0;
925  std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
926  std::plus<std::size_t>());
927  // Compute the number of data items we will receive
928  int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
929 
930  // Collect the data to send, gather it
931  mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
932  if ( no_data_send[distributed_data->ccobj_.rank()] )
933  {
934  local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
935  }
936  else
937  {
938  local_data_buffer.resize(1);
939  }
940  global_data_buffer.resize(no_data_recv);
941 
942  DataGatherer<DataHandle> gatherer(local_data_buffer, data);
943  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
944  MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
945  MPITraits<typename DataHandle::DataType>::getType(),
946  &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
947  MPITraits<typename DataHandle::DataType>::getType(),
948  distributed_data->ccobj_);
949  Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
950  int offset=0;
951  for(int i=0; i< codim; ++i)
952  offset+=global_data->size(i);
953 
954  typename std::vector<int>::const_iterator s=global_sizes.begin();
955  for(typename std::vector<int>::const_iterator i=global_indices.begin(),
956  end=global_indices.end();
957  i!=end; ++s, ++i)
958  {
959  edata.scatter(global_data_buffer, *i-offset, *s);
960  }
961 #endif
962 }
963 
964 } // end namespace cpgrid
965 } // end namespace Dune
966 
967 #endif
968 
969 #endif
[ provides Dune::Grid ]
Definition: CpGrid.hpp:207
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:123
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:140
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:163
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:610
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition: CpGridData.hpp:249
CpGridData()
Constructor.
Definition: CpGridData.cpp:43
void readSintefLegacyFormat(const std::string &grid_prefix)
Read the Sintef legacy grid format ('topogeom').
Definition: readSintefLegacyFormat.cpp:66
int size(int codim) const
number of leaf entities per codim in this process
Definition: CpGridData.cpp:144
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format ('grdecl').
~CpGridData()
Destructor.
Definition: CpGridData.cpp:93
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition: CpGridData.hpp:235
const IndexSet & indexSet() const
Get the index set.
Definition: CpGridData.hpp:274
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition: CpGridData.hpp:281
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
Definition: CpGridData.cpp:1553
void processEclipseFormat(const grdecl &input_data, Opm::EclipseState *ecl_state, std::array< std::set< std::pair< int, int >>, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive)
Read the Eclipse grid format ('grdecl').
Definition: processEclipseFormat.cpp:249
AttributeSet
The type of the set of the attributes.
Definition: CpGridData.hpp:364
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition: CpGridData.hpp:256
void writeSintefLegacyFormat(const std::string &grid_prefix) const
Write the Sintef legacy grid format ('topogeom').
Definition: writeSintefLegacyFormat.cpp:72
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:368
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition: CpGridData.hpp:267
Definition: DefaultGeometryPolicy.hpp:49
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:74
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:54
This class encapsulates geometry for both vertices, intersections and cells.
Definition: Geometry.hpp:68
The global id set for Dune.
Definition: Indexsets.hpp:325
Definition: Indexsets.hpp:198
Definition: Indexsets.hpp:54
Definition: Indexsets.hpp:257
Definition: PartitionTypeIndicator.hpp:50
Copyright 2019 Equinor AS.
Definition: CartesianIndexMapper.hpp:10
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:29
Low-level corner-point processing routines and supporting data structures.
Definition: CpGridData.hpp:115
Raw corner-point specification of a particular geological model.
Definition: preprocess.h:56