My Project
CpGridData.hpp
1 //===========================================================================
2 //
3 // File: CpGrid.hpp
4 //
5 // Created: Sep 17 21:11:41 2013
6 //
7 // Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8 // Bård Skaflestad <bard.skaflestad@sintef.no>
9 // Markus Blatt <markus@dr-blatt.de>
10 //
11 // Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
12 // and got transfered here during refactoring for the parallelization.
13 //
14 // $Date$
15 //
16 // $Revision$
17 //
18 //===========================================================================
19 
20 /*
21  Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
22  Copyright 2009, 2010, 2013 Statoil ASA.
23  Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
24 
25  This file is part of The Open Porous Media project (OPM).
26 
27  OPM is free software: you can redistribute it and/or modify
28  it under the terms of the GNU General Public License as published by
29  the Free Software Foundation, either version 3 of the License, or
30  (at your option) any later version.
31 
32  OPM is distributed in the hope that it will be useful,
33  but WITHOUT ANY WARRANTY; without even the implied warranty of
34  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35  GNU General Public License for more details.
36 
37  You should have received a copy of the GNU General Public License
38  along with OPM. If not, see <http://www.gnu.org/licenses/>.
39 */
47 #ifndef OPM_CPGRIDDATA_HEADER
48 #define OPM_CPGRIDDATA_HEADER
49 
50 // Warning suppression for Dune includes.
51 #include <opm/grid/utility/platform_dependent/disable_warnings.h>
52 
53 #include <dune/common/version.hh>
54 #include <dune/common/parallel/mpihelper.hh>
55 #ifdef HAVE_DUNE_ISTL
56 #include <dune/istl/owneroverlapcopy.hh>
57 #endif
58 
59 #if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
60 #include <dune/common/parallel/communication.hh>
61 #else
62 #include <dune/common/parallel/collectivecommunication.hh>
63 #endif
64 #include <dune/common/parallel/indexset.hh>
65 #include <dune/common/parallel/interface.hh>
66 #include <dune/common/parallel/plocalindex.hh>
67 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
68 #include <dune/common/parallel/variablesizecommunicator.hh>
69 #else
70 #include <opm/grid/utility/VariableSizeCommunicator.hpp>
71 #endif
72 #include <dune/grid/common/gridenums.hh>
73 
74 #include <opm/grid/utility/platform_dependent/reenable_warnings.h>
75 
76 #if HAVE_ECL_INPUT
77 #include <opm/parser/eclipse/EclipseState/Grid/NNC.hpp>
78 #endif
79 
80 #include <array>
81 #include <tuple>
82 #include <algorithm>
83 #include <set>
84 
85 #include "OrientedEntityTable.hpp"
86 #include "DefaultGeometryPolicy.hpp"
88 
89 #include <opm/grid/utility/OpmParserIncludes.hpp>
90 
91 #include "Entity2IndexDataHandle.hpp"
92 #include "DataHandleWrappers.hpp"
93 #include "GlobalIdMapping.hpp"
94 namespace Opm
95 {
96 class EclipseState;
97 }
98 namespace Dune
99 {
100 class CpGrid;
101 
102 namespace cpgrid
103 {
104 
105 class IndexSet;
106 class IdSet;
107 class LevelGlobalIdSet;
108 class PartitionTypeIndicator;
109 template<int,int> class Geometry;
110 template<int> class Entity;
111 template<int> class EntityRep;
112 
113 namespace mover
114 {
115 template<class T, int i> struct Mover;
116 }
117 
123 {
124  template<class T, int i> friend struct mover::Mover;
125 
126  friend class GlobalIdSet;
127 
128 private:
129  CpGridData(const CpGridData& g);
130 
131 public:
132  enum{
133 #ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
141 #else
146  MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
147 #endif
148  };
151  explicit CpGridData(CpGrid& grid);
152 
156  explicit CpGridData(MPIHelper::MPICommunicator comm);
157 
159  CpGridData();
161  ~CpGridData();
163  int size(int codim) const;
164 
166  int size (GeometryType type) const
167  {
168  if (type.isCube()) {
169  return size(3 - type.dim());
170  } else {
171  return 0;
172  }
173  }
177  void readSintefLegacyFormat(const std::string& grid_prefix);
178 
182  void writeSintefLegacyFormat(const std::string& grid_prefix) const;
183 
189  void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
190 
191 #if HAVE_ECL_INPUT
200  void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
201  const std::vector<double>& poreVolume = std::vector<double>());
202 
214  std::vector<std::size_t> processEclipseFormat(const Opm::EclipseGrid* ecl_grid, Opm::EclipseState* ecl_state,
215  bool periodic_extension, bool turn_normals = false, bool clip_z = false, bool pinchActive = true);
216 #endif
217 
227  void processEclipseFormat(const grdecl& input_data, Opm::EclipseState* ecl_state,
228  std::array<std::set<std::pair<int, int>>, 2>& nnc,
229  bool remove_ij_boundary, bool turn_normals, bool pinchActive);
230 
238  void getIJK(int c, std::array<int,3>& ijk) const
239  {
240  int gc = global_cell_[c];
241  ijk[0] = gc % logical_cartesian_size_[0]; gc /= logical_cartesian_size_[0];
242  ijk[1] = gc % logical_cartesian_size_[1];
243  ijk[2] = gc / logical_cartesian_size_[1];
244  }
245 
246  // Make unique boundary ids for all intersections.
247  void computeUniqueBoundaryIds();
248 
252  bool uniqueBoundaryIds() const
253  {
254  return use_unique_boundary_ids_;
255  }
256 
259  void setUniqueBoundaryIds(bool uids)
260  {
261  use_unique_boundary_ids_ = uids;
262  if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
263  computeUniqueBoundaryIds();
264  }
265  }
266 
270  const std::vector<double>& zcornData() const {
271  return zcorn;
272  }
273 
274 
277  const IndexSet& indexSet() const
278  {
279  return *index_set_;
280  }
284  const std::array<int, 3>& logicalCartesianSize() const
285  {
286  return logical_cartesian_size_;
287  }
288 
292  void distributeGlobalGrid(CpGrid& grid,
293  const CpGridData& view_data,
294  const std::vector<int>& cell_part);
295 
301  template<class DataHandle>
302  void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
303 
304 #if HAVE_MPI
305 #if DUNE_VERSION_NEWER(DUNE_GRID, 2, 7)
307  using Communicator = VariableSizeCommunicator<>;
308 #else
310  using Communicator = Opm::VariableSizeCommunicator<>;
311 #endif
312 
314  using InterfaceMap = Communicator::InterfaceMap;
315 
317  using CommunicationType = Dune::OwnerOverlapCopyCommunication<int,int>;
318 
320  using ParallelIndexSet = CommunicationType::ParallelIndexSet;
321 
323  using RemoteIndices = Dune::RemoteIndices<ParallelIndexSet>;
324 
328  CommunicationType& cellCommunication()
329  {
330  return cell_comm_;
331  }
332 
336  const CommunicationType& cellCommunication() const
337  {
338  return cell_comm_;
339  }
340 
341  ParallelIndexSet& cellIndexSet()
342  {
343  return cellCommunication().indexSet();
344  }
345 
346  const ParallelIndexSet& cellIndexSet() const
347  {
348  return cellCommunication().indexSet();
349  }
350 
351  RemoteIndices& cellRemoteIndices()
352  {
353  return cellCommunication().remoteIndices();
354  }
355 
356  const RemoteIndices& cellRemoteIndices() const
357  {
358  return cellCommunication().remoteIndices();
359  }
360 #endif
361 
362 #ifdef HAVE_DUNE_ISTL
364  typedef Dune::OwnerOverlapCopyAttributeSet::AttributeSet AttributeSet;
365 #else
367  enum AttributeSet{owner, overlap, copy};
368 #endif
369 
370 private:
371 
373  void populateGlobalCellIndexSet();
374 
375 #if HAVE_MPI
376 
382  template<class DataHandle>
383  void gatherData(DataHandle& data, CpGridData* global_view,
384  CpGridData* distributed_view);
385 
386 
393  template<int codim, class DataHandle>
394  void gatherCodimData(DataHandle& data, CpGridData* global_data,
395  CpGridData* distributed_data);
396 
403  template<class DataHandle>
404  void scatterData(DataHandle& data, CpGridData* global_data,
405  CpGridData* distributed_data, const InterfaceMap& cell_inf,
406  const InterfaceMap& point_inf);
407 
415  template<int codim, class DataHandle>
416  void scatterCodimData(DataHandle& data, CpGridData* global_data,
417  CpGridData* distributed_data);
418 
427  template<int codim, class DataHandle>
428  void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
429  const Interface& interface);
430 
439  template<int codim, class DataHandle>
440  void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
441  const InterfaceMap& interface);
442 
443 #endif
444 
445  void computeGeometry(CpGrid& grid,
446  const DefaultGeometryPolicy& globalGeometry,
447  const OrientedEntityTable<0, 1>& globalCell2Faces,
448  DefaultGeometryPolicy& geometry,
449  const OrientedEntityTable<0, 1>& cell2Faces,
450  const std::vector< std::array<int,8> >& cell2Points);
451 
452  // Representing the topology
454  cpgrid::OrientedEntityTable<0, 1> cell_to_face_;
462  cpgrid::OrientedEntityTable<1, 0> face_to_cell_;
464  Opm::SparseTable<int> face_to_point_;
466  std::vector< std::array<int,8> > cell_to_point_;
473  std::array<int, 3> logical_cartesian_size_;
480  std::vector<int> global_cell_;
482  cpgrid::EntityVariable<enum face_tag, 1> face_tag_; // {LEFT, BACK, TOP}
486  typedef FieldVector<double, 3> PointType;
490  cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
492  cpgrid::IndexSet* index_set_;
494  const cpgrid::IdSet* local_id_set_;
496  LevelGlobalIdSet* global_id_set_;
498  PartitionTypeIndicator* partition_type_indicator_;
499 
501  typedef MPIHelper::MPICommunicator MPICommunicator;
502  typedef Dune::CollectiveCommunication<MPICommunicator> CollectiveCommunication;
504  CollectiveCommunication ccobj_;
505 
506  // Boundary information (optional).
507  bool use_unique_boundary_ids_;
508 
514  std::vector<double> zcorn;
515 
516 #if HAVE_MPI
517 
519  CommunicationType cell_comm_;
520 
522  std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
523  /*
524  // code deactivated, because users cannot access face indices and therefore
525  // communication on faces makes no sense!
527  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
528  face_interfaces_;
529  */
531  std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
532  point_interfaces_;
533 
534 #endif
535 
536  // Return the geometry vector corresponding to the given codim.
537  template <int codim>
538  const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
539  {
540  return geometry_.geomVector<codim>();
541  }
542 
543  friend class Dune::CpGrid;
544  template<int> friend class Entity;
545  template<int> friend class EntityRep;
546  template<int> friend class EntityPointer;
547  friend class Intersection;
548  friend class PartitionTypeIndicator;
549 };
550 
551 #if HAVE_MPI
552 
553 namespace
554 {
559 template<class T>
560 T& getInterface(InterfaceType iftype,
561  std::tuple<T,T,T,T,T>& interfaces)
562 {
563  switch(iftype)
564  {
565  case 0:
566  return std::get<0>(interfaces);
567  case 1:
568  return std::get<1>(interfaces);
569  case 2:
570  return std::get<2>(interfaces);
571  case 3:
572  return std::get<3>(interfaces);
573  case 4:
574  return std::get<4>(interfaces);
575  }
576  OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
577 }
578 
579 } // end unnamed namespace
580 
581 template<int codim, class DataHandle>
582 void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
583  const Interface& interface)
584 {
585  this->template communicateCodim<codim>(data, dir, interface.interfaces());
586 }
587 
588 template<int codim, class DataHandle>
589 void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
590  const InterfaceMap& interface)
591 {
592  Communicator comm(ccobj_, interface);
593 
594  if(dir==ForwardCommunication)
595  comm.forward(data_wrapper);
596  else
597  comm.backward(data_wrapper);
598 }
599 #endif
600 
601 template<class DataHandle>
602 void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
603  CommunicationDirection dir)
604 {
605 #if HAVE_MPI
606  if(data.contains(3,0))
607  {
608  Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
609  communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
610  }
611  if(data.contains(3,3))
612  {
613  Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
614  communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
615  }
616 #else
617  // Suppress warnings for unused arguments.
618  (void) data;
619  (void) iftype;
620  (void) dir;
621 #endif
622 }
623 }}
624 
625 #if HAVE_MPI
626 #include <opm/grid/cpgrid/Entity.hpp>
627 #include <opm/grid/cpgrid/Indexsets.hpp>
628 
629 namespace Dune {
630 namespace cpgrid {
631 
632 namespace mover
633 {
634 template<class T>
635 class MoveBuffer
636 {
637  friend class Dune::cpgrid::CpGridData;
638 public:
639  void read(T& data)
640  {
641  data=buffer_[index_++];
642  }
643  void write(const T& data)
644  {
645  buffer_[index_++]=data;
646  }
647  void reset()
648  {
649  index_=0;
650  }
651  void resize(std::size_t size)
652  {
653  buffer_.resize(size);
654  index_=0;
655  }
656 private:
657  std::vector<T> buffer_;
658  typename std::vector<T>::size_type index_;
659 };
660 template<class DataHandle,int codim>
661 struct Mover
662 {
663 };
664 
665 template<class DataHandle>
666 struct BaseMover
667 {
668  BaseMover(DataHandle& data)
669  : data_(data)
670  {}
671  template<class E>
672  void moveData(const E& from, const E& to)
673  {
674  std::size_t size=data_.size(from);
675  buffer.resize(size);
676  data_.gather(buffer, from);
677  buffer.reset();
678  data_.scatter(buffer, to, size);
679  }
680  DataHandle& data_;
681  MoveBuffer<typename DataHandle::DataType> buffer;
682 };
683 
684 
685 template<class DataHandle>
686 struct Mover<DataHandle,0> : public BaseMover<DataHandle>
687 {
688  Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
689  CpGridData* scatterView)
690  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
691  {}
692 
693  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
694  {
695  Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
696  Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
697  this->moveData(from_entity, to_entity);
698  }
699  CpGridData* gatherView_;
700  CpGridData* scatterView_;
701 };
702 
703 template<class DataHandle>
704 struct Mover<DataHandle,1> : public BaseMover<DataHandle>
705 {
706  Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
707  CpGridData* scatterView)
708  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
709  {}
710 
711  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
712  {
713  typedef typename OrientedEntityTable<0,1>::row_type row_type;
714  EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
715  EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
716  OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
717  row_type from_faces=table.operator[](from_cell);
718  row_type to_faces=scatterView_->cell_to_face_[to_cell];
719 
720  for(int i=0; i<from_faces.size(); ++i)
721  this->moveData(from_faces[i], to_faces[i]);
722  }
723  CpGridData *gatherView_;
724  CpGridData *scatterView_;
725 };
726 
727 template<class DataHandle>
728 struct Mover<DataHandle,3> : public BaseMover<DataHandle>
729 {
730  Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
731  CpGridData* scatterView)
732  : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
733  {}
734  void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
735  {
736  const std::array<int,8>& from_cell_points=
737  gatherView_->cell_to_point_[from_cell_index];
738  const std::array<int,8>& to_cell_points=
739  scatterView_->cell_to_point_[to_cell_index];
740  for(std::size_t i=0; i<8; ++i)
741  {
742  this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
743  Entity<3>(*scatterView_, to_cell_points[i], true));
744  }
745  }
746  CpGridData* gatherView_;
747  CpGridData* scatterView_;
748 };
749 
750 } // end mover namespace
751 
752 template<class DataHandle>
753 void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
754  CpGridData* distributed_data, const InterfaceMap& cell_inf,
755  const InterfaceMap& point_inf)
756 {
757 #if HAVE_MPI
758  if(data.contains(3,0))
759  {
760  Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
761  communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
762  }
763  if(data.contains(3,3))
764  {
765  Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
766  communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
767  }
768 #endif
769 }
770 
771 template<int codim, class DataHandle>
772 void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
773  CpGridData* distributed_data)
774 {
775  CpGridData *gather_view, *scatter_view;
776  gather_view=global_data;
777  scatter_view=distributed_data;
778 
779  mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
780 
781 
782  for(auto index=distributed_data->cellIndexSet().begin(),
783  end = distributed_data->cellIndexSet().end();
784  index!=end; ++index)
785  {
786  std::size_t from=index->global();
787  std::size_t to=index->local();
788  mover(from,to);
789  }
790 }
791 
792 namespace
793 {
794 
795 template<int codim, class T, class F>
796 void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
797 {
798  for(T it=begin; it!=endit; ++it)
799  {
800  Entity<codim> entity(distributed_data, it-begin, true);
801  PartitionType pt = entity.partitionType();
802  if(pt==Dune::InteriorEntity)
803  {
804  func(*it, entity);
805  }
806  }
807 }
808 
809 template<class DataHandle>
810 struct GlobalIndexSizeGatherer
811 {
812  GlobalIndexSizeGatherer(DataHandle& data_,
813  std::vector<int>& ownedGlobalIndices_,
814  std::vector<int>& ownedSizes_)
815  : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
816  {}
817 
818  template<class T, class E>
819  void operator()(T& i, E& entity)
820  {
821  ownedGlobalIndices.push_back(i);
822  ownedSizes.push_back(data.size(entity));
823  }
824  DataHandle& data;
825  std::vector<int>& ownedGlobalIndices;
826  std::vector<int>& ownedSizes;
827 };
828 
829 template<class DataHandle>
830 struct DataGatherer
831 {
832  DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
833  DataHandle& data_)
834  : buffer(buffer_), data(data_)
835  {}
836 
837  template<class T, class E>
838  void operator()(T& /* it */, E& entity)
839  {
840  data.gather(buffer, entity);
841  }
842  mover::MoveBuffer<typename DataHandle::DataType>& buffer;
843  DataHandle& data;
844 };
845 
846 }
847 
848 template<class DataHandle>
849 void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
850  CpGridData* distributed_data)
851 {
852 #if HAVE_MPI
853  if(data.contains(3,0))
854  gatherCodimData<0>(data, global_data, distributed_data);
855  if(data.contains(3,3))
856  gatherCodimData<3>(data, global_data, distributed_data);
857 #endif
858 }
859 
860 template<int codim, class DataHandle>
861 void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
862  CpGridData* distributed_data)
863 {
864 #if HAVE_MPI
865  // Get the mapping to global index from the global id set
866  const std::vector<int>& mapping =
867  distributed_data->global_id_set_->getMapping<codim>();
868 
869  // Get the global indices and data size for the entities whose data is
870  // to be sent, i.e. the ones that we own.
871  std::vector<int> owned_global_indices;
872  std::vector<int> owned_sizes;
873  owned_global_indices.reserve(mapping.size());
874  owned_sizes.reserve(mapping.size());
875 
876  GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
877  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
878 
879  // communicate the number of indices that each processor sends
880  int no_indices=owned_sizes.size();
881  // We will take the address of the first elemet for MPI_Allgather below.
882  // Make sure the containers have such an element.
883  if ( owned_global_indices.empty() )
884  owned_global_indices.resize(1);
885  if ( owned_sizes.empty() )
886  owned_sizes.resize(1);
887  std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
888  distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
889  // compute size of the vector capable for receiving all indices
890  // and allgather the global indices and the sizes.
891  // calculate displacements
892  std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
893  std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
894  std::plus<int>());
895  int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
896  std::vector<int> global_indices(global_size);
897  std::vector<int> global_sizes(global_size);
898  MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
899  &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
900  MPITraits<int>::getType(),
901  distributed_data->ccobj_);
902  MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
903  &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
904  MPITraits<int>::getType(),
905  distributed_data->ccobj_);
906  std::vector<int>().swap(owned_global_indices); // free data for reuse.
907  // Compute the number of data items to send
908  std::vector<int> no_data_send(distributed_data->ccobj_.size());
909  for(typename std::vector<int>::iterator begin=no_data_send.begin(),
910  i=begin, end=no_data_send.end(); i!=end; ++i)
911  *i = std::accumulate(global_sizes.begin()+displ[i-begin],
912  global_sizes.begin()+displ[i-begin+1], std::size_t());
913  // free at least some memory that can be reused.
914  std::vector<int>().swap(owned_sizes);
915  // compute the displacements for receiving with allgatherv
916  displ[0]=0;
917  std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
918  std::plus<std::size_t>());
919  // Compute the number of data items we will receive
920  int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
921 
922  // Collect the data to send, gather it
923  mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
924  if ( no_data_send[distributed_data->ccobj_.rank()] )
925  {
926  local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
927  }
928  else
929  {
930  local_data_buffer.resize(1);
931  }
932  global_data_buffer.resize(no_data_recv);
933 
934  DataGatherer<DataHandle> gatherer(local_data_buffer, data);
935  visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
936  MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
937  MPITraits<typename DataHandle::DataType>::getType(),
938  &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
939  MPITraits<typename DataHandle::DataType>::getType(),
940  distributed_data->ccobj_);
941  Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
942  int offset=0;
943  for(int i=0; i< codim; ++i)
944  offset+=global_data->size(i);
945 
946  typename std::vector<int>::const_iterator s=global_sizes.begin();
947  for(typename std::vector<int>::const_iterator i=global_indices.begin(),
948  end=global_indices.end();
949  i!=end; ++s, ++i)
950  {
951  edata.scatter(global_data_buffer, *i-offset, *s);
952  }
953 #endif
954 }
955 
956 } // end namespace cpgrid
957 } // end namespace Dune
958 
959 #endif
960 
961 #endif
[ provides Dune::Grid ]
Definition: CpGrid.hpp:207
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:123
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:140
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:166
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:602
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition: CpGridData.hpp:252
CpGridData()
Constructor.
Definition: CpGridData.cpp:39
void readSintefLegacyFormat(const std::string &grid_prefix)
Read the Sintef legacy grid format ('topogeom').
Definition: readSintefLegacyFormat.cpp:66
int size(int codim) const
number of leaf entities per codim in this process
Definition: CpGridData.cpp:145
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format ('grdecl').
~CpGridData()
Destructor.
Definition: CpGridData.cpp:94
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition: CpGridData.hpp:238
const IndexSet & indexSet() const
Get the index set.
Definition: CpGridData.hpp:277
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition: CpGridData.hpp:284
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
Definition: CpGridData.cpp:1525
void processEclipseFormat(const grdecl &input_data, Opm::EclipseState *ecl_state, std::array< std::set< std::pair< int, int >>, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive)
Read the Eclipse grid format ('grdecl').
Definition: processEclipseFormat.cpp:249
AttributeSet
The type of the set of the attributes.
Definition: CpGridData.hpp:367
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition: CpGridData.hpp:259
void writeSintefLegacyFormat(const std::string &grid_prefix) const
Write the Sintef legacy grid format ('topogeom').
Definition: writeSintefLegacyFormat.cpp:72
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition: CpGridData.hpp:270
Definition: DefaultGeometryPolicy.hpp:49
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:74
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:54
This class encapsulates geometry for both vertices, intersections and cells.
Definition: Geometry.hpp:68
The global id set for Dune.
Definition: Indexsets.hpp:325
Definition: Indexsets.hpp:198
Definition: Indexsets.hpp:54
Definition: Indexsets.hpp:257
Definition: PartitionTypeIndicator.hpp:50
Copyright 2019 Equinor AS.
Definition: CartesianIndexMapper.hpp:10
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:29
Low-level corner-point processing routines and supporting data structures.
Definition: CpGridData.hpp:115
Raw corner-point specification of a particular geological model.
Definition: preprocess.h:56