My Project
CpGridData.hpp
1//===========================================================================
2//
3// File: CpGrid.hpp
4//
5// Created: Sep 17 21:11:41 2013
6//
7// Author(s): Atgeirr F Rasmussen <atgeirr@sintef.no>
8// Bård Skaflestad <bard.skaflestad@sintef.no>
9// Markus Blatt <markus@dr-blatt.de>
10//
11// Comment: Major parts of this file originated in dune/grid/CpGrid.hpp
12// and got transfered here during refactoring for the parallelization.
13//
14// $Date$
15//
16// $Revision$
17//
18//===========================================================================
19
20/*
21 Copyright 2009, 2010 SINTEF ICT, Applied Mathematics.
22 Copyright 2009, 2010, 2013, 2022 Equinor ASA.
23 Copyright 2013 Dr. Blatt - HPC-Simulation-Software & Services
24
25 This file is part of The Open Porous Media project (OPM).
26
27 OPM is free software: you can redistribute it and/or modify
28 it under the terms of the GNU General Public License as published by
29 the Free Software Foundation, either version 3 of the License, or
30 (at your option) any later version.
31
32 OPM is distributed in the hope that it will be useful,
33 but WITHOUT ANY WARRANTY; without even the implied warranty of
34 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 GNU General Public License for more details.
36
37 You should have received a copy of the GNU General Public License
38 along with OPM. If not, see <http://www.gnu.org/licenses/>.
39*/
47#ifndef OPM_CPGRIDDATA_HEADER
48#define OPM_CPGRIDDATA_HEADER
49
50
51#include <dune/common/parallel/mpihelper.hh>
52#ifdef HAVE_DUNE_ISTL
53#include <dune/istl/owneroverlapcopy.hh>
54#endif
55
56#include <dune/common/parallel/communication.hh>
57#include <dune/common/parallel/variablesizecommunicator.hh>
58#include <dune/grid/common/gridenums.hh>
59
60#if HAVE_ECL_INPUT
61#include <opm/input/eclipse/EclipseState/Grid/NNC.hpp>
62#endif
63
65
66#include "Entity2IndexDataHandle.hpp"
67#include "CpGridDataTraits.hpp"
68//#include "DataHandleWrappers.hpp"
69//#include "GlobalIdMapping.hpp"
70#include "Geometry.hpp"
71
72namespace Opm
73{
74class EclipseState;
75}
76namespace Dune
77{
78class CpGrid;
79
80namespace cpgrid
81{
82
83class IndexSet;
84class IdSet;
85class LevelGlobalIdSet;
86class PartitionTypeIndicator;
87template<int,int> class Geometry;
88template<int> class Entity;
89template<int> class EntityRep;
90}
91}
92
93void refine_and_check(const Dune::cpgrid::Geometry<3, 3>&,
94 const std::array<int, 3>&,
95 bool);
96void refinePatch_and_check(const std::array<int,3>&,
97 const std::array<int,3>&,
98 const std::array<int,3>&);
99
100void refinePatch_and_check(Dune::CpGrid&,
101 const std::array<int,3>&,
102 const std::array<int,3>&,
103 const std::array<int,3>&);
104
105void check_global_refine(const Dune::CpGrid&,
106 const Dune::CpGrid&);
107
108namespace Dune
109{
110namespace cpgrid
111{
112namespace mover
113{
114template<class T, int i> struct Mover;
115}
116
122{
123 template<class T, int i> friend struct mover::Mover;
124
125 friend class GlobalIdSet;
126 friend class HierarchicIterator;
127 friend class Dune::cpgrid::IndexSet;
128
129 friend
130 void ::refine_and_check(const Dune::cpgrid::Geometry<3, 3>&,
131 const std::array<int, 3>&,
132 bool);
133 friend
134 void::refinePatch_and_check(const std::array<int,3>&,
135 const std::array<int,3>&,
136 const std::array<int,3>&);
137
138 friend
139 void ::refinePatch_and_check(Dune::CpGrid&,
140 const std::array<int,3>&,
141 const std::array<int,3>&,
142 const std::array<int,3>&);
143
144 friend
145 void ::check_global_refine(const Dune::CpGrid&,
146 const Dune::CpGrid&);
147
148private:
149 CpGridData(const CpGridData& g);
150
151public:
152 enum{
153#ifndef MAX_DATA_COMMUNICATED_PER_ENTITY
161#else
166 MAX_DATA_PER_CELL = MAX_DATA_COMMUNICATED_PER_ENTITY
167#endif
168 };
169
173 explicit CpGridData(MPIHelper::MPICommunicator comm, std::vector<std::shared_ptr<CpGridData>>& data);
174
175
176
178 CpGridData(std::vector<std::shared_ptr<CpGridData>>& data);
180 ~CpGridData();
181
182
183
184
186 int size(int codim) const;
187
189 int size (GeometryType type) const
190 {
191 if (type.isCube()) {
192 return size(3 - type.dim());
193 } else {
194 return 0;
195 }
196 }
200 void readSintefLegacyFormat(const std::string& grid_prefix);
201
205 void writeSintefLegacyFormat(const std::string& grid_prefix) const;
206
212 void readEclipseFormat(const std::string& filename, bool periodic_extension, bool turn_normals = false);
213
214#if HAVE_ECL_INPUT
223 void processEclipseFormat(const Opm::Deck& deck, bool periodic_extension, bool turn_normals = false, bool clip_z = false,
224 const std::vector<double>& poreVolume = std::vector<double>());
225
237 std::vector<std::size_t> processEclipseFormat(const Opm::EclipseGrid* ecl_grid, Opm::EclipseState* ecl_state,
238 bool periodic_extension, bool turn_normals = false, bool clip_z = false,
239 bool pinchActive = true);
240#endif
241
252 void processEclipseFormat(const grdecl& input_data,
253#if HAVE_ECL_INPUT
254 Opm::EclipseState* ecl_state,
255#endif
256 std::array<std::set<std::pair<int, int>>, 2>& nnc,
257 bool remove_ij_boundary, bool turn_normals, bool pinchActive);
258
266 void getIJK(int c, std::array<int,3>& ijk) const
267 {
268 int gc = global_cell_[c];
269 ijk[0] = gc % logical_cartesian_size_[0]; gc /= logical_cartesian_size_[0];
270 ijk[1] = gc % logical_cartesian_size_[1];
271 ijk[2] = gc / logical_cartesian_size_[1];
272 }
273
274private:
282 const std::array<int,3> getPatchDim(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
283
291 const std::array<std::vector<int>,3> getPatchGeomIndices(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
292
307 const Geometry<3,3> cellifyPatch(const std::array<int,3>& startIJK, const std::array<int,3>& endIJK,
308 const std::vector<int>& patch_cells, DefaultGeometryPolicy& cellifiedPatch_geometry,
309 std::array<int,8>& cellifiedPatch_to_point,
310 std::array<int,8>& allcorners_cellifiedPatch) const;
311
312public:
334 const std::tuple< const std::shared_ptr<CpGridData>,
335 const std::vector<std::array<int,2>>, // parent_to_refined_corners(~boundary_old_to_new_corners)
336 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces (~boundary_old_to_new_faces)
337 const std::tuple<int, std::vector<int>>, // parent_to_children_cells
338 const std::vector<std::array<int,2>>, // child_to_parent_faces
339 const std::vector<std::array<int,2>>> // child_to_parent_cells
340 refineSingleCell(const std::array<int,3>& cells_per_dim, const int& parent_idx) const;
341
354 const std::tuple< std::shared_ptr<CpGridData>,
355 const std::vector<std::array<int,2>>, // boundary_old_to_new_corners
356 const std::vector<std::tuple<int,std::vector<int>>>, // boundary_old_to_new_faces
357 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_faces
358 const std::vector<std::tuple<int,std::vector<int>>>, // parent_to_children_cell
359 const std::vector<std::array<int,2>>, // child_to_parent_faces
360 const std::vector<std::array<int,2>>> // child_to_parent_cells
361 refinePatch(const std::array<int,3>& cells_per_dim, const std::array<int,3>& startIJK, const std::array<int,3>& endIJK) const;
362
363 // Make unique boundary ids for all intersections.
364 void computeUniqueBoundaryIds();
365
369 bool uniqueBoundaryIds() const
370 {
371 return use_unique_boundary_ids_;
372 }
373
376 void setUniqueBoundaryIds(bool uids)
377 {
378 use_unique_boundary_ids_ = uids;
379 if (use_unique_boundary_ids_ && unique_boundary_ids_.empty()) {
380 computeUniqueBoundaryIds();
381 }
382 }
383
387 const std::vector<double>& zcornData() const {
388 return zcorn;
389 }
390
391
394 const IndexSet& indexSet() const
395 {
396 return *index_set_;
397 }
401 const std::array<int, 3>& logicalCartesianSize() const
402 {
403 return logical_cartesian_size_;
404 }
405
409 void distributeGlobalGrid(CpGrid& grid,
410 const CpGridData& view_data,
411 const std::vector<int>& cell_part);
412
418 template<class DataHandle>
419 void communicate(DataHandle& data, InterfaceType iftype, CommunicationDirection dir);
420
424 using Communication = CpGridDataTraits::Communication;
425 using CollectiveCommunication = CpGridDataTraits::CollectiveCommunication;
426#if HAVE_MPI
428 using AttributeSet = CpGridDataTraits::AttributeSet;
429
431 using Communicator = CpGridDataTraits::Communicator;
432
434 using InterfaceMap = CpGridDataTraits::InterfaceMap;
435
437 using CommunicationType = CpGridDataTraits::CommunicationType;
438
440 using ParallelIndexSet = CpGridDataTraits::ParallelIndexSet;
441
443 using RemoteIndices = CpGridDataTraits::RemoteIndices;
444
448 CommunicationType& cellCommunication()
449 {
450 return cell_comm_;
451 }
452
456 const CommunicationType& cellCommunication() const
457 {
458 return cell_comm_;
459 }
460
461 ParallelIndexSet& cellIndexSet()
462 {
463 return cellCommunication().indexSet();
464 }
465
466 const ParallelIndexSet& cellIndexSet() const
467 {
468 return cellCommunication().indexSet();
469 }
470
471 RemoteIndices& cellRemoteIndices()
472 {
473 return cellCommunication().remoteIndices();
474 }
475
476 const RemoteIndices& cellRemoteIndices() const
477 {
478 return cellCommunication().remoteIndices();
479 }
480#endif
481
483 const std::vector<int>& sortedNumAquiferCells() const
484 {
485 return aquifer_cells_;
486 }
487
488private:
489
491 void populateGlobalCellIndexSet();
492
493#if HAVE_MPI
494
500 template<class DataHandle>
501 void gatherData(DataHandle& data, CpGridData* global_view,
502 CpGridData* distributed_view);
503
504
511 template<int codim, class DataHandle>
512 void gatherCodimData(DataHandle& data, CpGridData* global_data,
513 CpGridData* distributed_data);
514
521 template<class DataHandle>
522 void scatterData(DataHandle& data, CpGridData* global_data,
523 CpGridData* distributed_data, const InterfaceMap& cell_inf,
524 const InterfaceMap& point_inf);
525
533 template<int codim, class DataHandle>
534 void scatterCodimData(DataHandle& data, CpGridData* global_data,
535 CpGridData* distributed_data);
536
545 template<int codim, class DataHandle>
546 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
547 const Interface& interface);
548
557 template<int codim, class DataHandle>
558 void communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
559 const InterfaceMap& interface);
560
561#endif
562
563 void computeGeometry(CpGrid& grid,
564 const DefaultGeometryPolicy& globalGeometry,
565 const std::vector<int>& globalAquiferCells,
566 const OrientedEntityTable<0, 1>& globalCell2Faces,
567 DefaultGeometryPolicy& geometry,
568 std::vector<int>& aquiferCells,
569 const OrientedEntityTable<0, 1>& cell2Faces,
570 const std::vector< std::array<int,8> >& cell2Points);
571
572 // Representing the topology
584 Opm::SparseTable<int> face_to_point_;
586 std::vector< std::array<int,8> > cell_to_point_;
593 std::array<int, 3> logical_cartesian_size_;
600 std::vector<int> global_cell_;
606 typedef FieldVector<double, 3> PointType;
610 cpgrid::EntityVariable<int, 1> unique_boundary_ids_;
612 std::unique_ptr<cpgrid::IndexSet> index_set_;
614 std::shared_ptr<const cpgrid::IdSet> local_id_set_;
616 std::shared_ptr<LevelGlobalIdSet> global_id_set_;
618 std::shared_ptr<PartitionTypeIndicator> partition_type_indicator_;
620 int level_;
622 std::vector<std::shared_ptr<CpGridData>>* level_data_ptr_;
623 // SUITABLE FOR ALL LEVELS EXCEPT FOR LEAFVIEW
625 std::map<int,int> level_to_leaf_cells_; // {level cell index, leafview cell index} // {level LGR, {child0, child1, ...}}
627 std::vector<std::tuple<int,std::vector<int>>> parent_to_children_cells_;
628 // SUITABLE ONLY FOR LEAFVIEW // {level, cell index in that level}
630 std::vector<std::array<int,2>> leaf_to_level_cells_;
631 // SUITABLE FOR ALL LEVELS INCLUDING LEAFVIEW // {level parent cell, parent cell index}
633 std::vector<std::array<int,2>> child_to_parent_cells_;
634
636 Communication ccobj_;
637
638 // Boundary information (optional).
639 bool use_unique_boundary_ids_;
640
646 std::vector<double> zcorn;
647
649 std::vector<int> aquifer_cells_;
650
651#if HAVE_MPI
652
654 CommunicationType cell_comm_;
655
657 std::tuple<Interface,Interface,Interface,Interface,Interface> cell_interfaces_;
658 /*
659 // code deactivated, because users cannot access face indices and therefore
660 // communication on faces makes no sense!
662 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
663 face_interfaces_;
664 */
666 std::tuple<InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap,InterfaceMap>
667 point_interfaces_;
668
669#endif
670
671 // Return the geometry vector corresponding to the given codim.
672 template <int codim>
673 const EntityVariable<Geometry<3 - codim, 3>, codim>& geomVector() const
674 {
675 return geometry_.geomVector<codim>();
676 }
677
678 friend class Dune::CpGrid;
679 template<int> friend class Entity;
680 template<int> friend class EntityRep;
681 friend class Intersection;
682 friend class PartitionTypeIndicator;
683};
684
685
686
687#if HAVE_MPI
688
689namespace
690{
695template<class T>
696T& getInterface(InterfaceType iftype,
697 std::tuple<T,T,T,T,T>& interfaces)
698{
699 switch(iftype)
700 {
701 case 0:
702 return std::get<0>(interfaces);
703 case 1:
704 return std::get<1>(interfaces);
705 case 2:
706 return std::get<2>(interfaces);
707 case 3:
708 return std::get<3>(interfaces);
709 case 4:
710 return std::get<4>(interfaces);
711 }
712 OPM_THROW(std::runtime_error, "Invalid Interface type was used during communication");
713}
714
715} // end unnamed namespace
716
717template<int codim, class DataHandle>
718void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data, CommunicationDirection dir,
719 const Interface& interface)
720{
721 this->template communicateCodim<codim>(data, dir, interface.interfaces());
722}
723
724template<int codim, class DataHandle>
725void CpGridData::communicateCodim(Entity2IndexDataHandle<DataHandle, codim>& data_wrapper, CommunicationDirection dir,
726 const InterfaceMap& interface)
727{
728 Communicator comm(ccobj_, interface);
729
730 if(dir==ForwardCommunication)
731 comm.forward(data_wrapper);
732 else
733 comm.backward(data_wrapper);
734}
735#endif
736
737template<class DataHandle>
738void CpGridData::communicate(DataHandle& data, InterfaceType iftype,
739 CommunicationDirection dir)
740{
741#if HAVE_MPI
742 if(data.contains(3,0))
743 {
744 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*this, data);
745 communicateCodim<0>(data_wrapper, dir, getInterface(iftype, cell_interfaces_));
746 }
747 if(data.contains(3,3))
748 {
749 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*this, data);
750 communicateCodim<3>(data_wrapper, dir, getInterface(iftype, point_interfaces_));
751 }
752#else
753 // Suppress warnings for unused arguments.
754 (void) data;
755 (void) iftype;
756 (void) dir;
757#endif
758}
759}}
760
761#if HAVE_MPI
762#include <opm/grid/cpgrid/Entity.hpp>
763#include <opm/grid/cpgrid/Indexsets.hpp>
764
765namespace Dune {
766namespace cpgrid {
767
768namespace mover
769{
770template<class T>
771class MoveBuffer
772{
773 friend class Dune::cpgrid::CpGridData;
774public:
775 void read(T& data)
776 {
777 data=buffer_[index_++];
778 }
779 void write(const T& data)
780 {
781 buffer_[index_++]=data;
782 }
783 void reset()
784 {
785 index_=0;
786 }
787 void resize(std::size_t size)
788 {
789 buffer_.resize(size);
790 index_=0;
791 }
792private:
793 std::vector<T> buffer_;
794 typename std::vector<T>::size_type index_;
795};
796template<class DataHandle,int codim>
797struct Mover
798{
799};
800
801template<class DataHandle>
802struct BaseMover
803{
804 BaseMover(DataHandle& data)
805 : data_(data)
806 {}
807 template<class E>
808 void moveData(const E& from, const E& to)
809 {
810 std::size_t size=data_.size(from);
811 buffer.resize(size);
812 data_.gather(buffer, from);
813 buffer.reset();
814 data_.scatter(buffer, to, size);
815 }
816 DataHandle& data_;
817 MoveBuffer<typename DataHandle::DataType> buffer;
818};
819
820
821template<class DataHandle>
822struct Mover<DataHandle,0> : public BaseMover<DataHandle>
823{
824 Mover<DataHandle,0>(DataHandle& data, CpGridData* gatherView,
825 CpGridData* scatterView)
826 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
827 {}
828
829 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
830 {
831 Entity<0> from_entity=Entity<0>(*gatherView_, from_cell_index, true);
832 Entity<0> to_entity=Entity<0>(*scatterView_, to_cell_index, true);
833 this->moveData(from_entity, to_entity);
834 }
835 CpGridData* gatherView_;
836 CpGridData* scatterView_;
837};
838
839template<class DataHandle>
840struct Mover<DataHandle,1> : public BaseMover<DataHandle>
841{
842 Mover<DataHandle,1>(DataHandle& data, CpGridData* gatherView,
843 CpGridData* scatterView)
844 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
845 {}
846
847 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
848 {
849 typedef typename OrientedEntityTable<0,1>::row_type row_type;
850 EntityRep<0> from_cell=EntityRep<0>(from_cell_index, true);
851 EntityRep<0> to_cell=EntityRep<0>(to_cell_index, true);
852 OrientedEntityTable<0,1>& table = gatherView_->cell_to_face_;
853 row_type from_faces=table.operator[](from_cell);
854 row_type to_faces=scatterView_->cell_to_face_[to_cell];
855
856 for(int i=0; i<from_faces.size(); ++i)
857 this->moveData(from_faces[i], to_faces[i]);
858 }
859 CpGridData *gatherView_;
860 CpGridData *scatterView_;
861};
862
863template<class DataHandle>
864struct Mover<DataHandle,3> : public BaseMover<DataHandle>
865{
866 Mover<DataHandle,3>(DataHandle& data, CpGridData* gatherView,
867 CpGridData* scatterView)
868 : BaseMover<DataHandle>(data), gatherView_(gatherView), scatterView_(scatterView)
869 {}
870 void operator()(std::size_t from_cell_index,std::size_t to_cell_index)
871 {
872 const std::array<int,8>& from_cell_points=
873 gatherView_->cell_to_point_[from_cell_index];
874 const std::array<int,8>& to_cell_points=
875 scatterView_->cell_to_point_[to_cell_index];
876 for(std::size_t i=0; i<8; ++i)
877 {
878 this->moveData(Entity<3>(*gatherView_, from_cell_points[i], true),
879 Entity<3>(*scatterView_, to_cell_points[i], true));
880 }
881 }
882 CpGridData* gatherView_;
883 CpGridData* scatterView_;
884};
885
886} // end mover namespace
887
888template<class DataHandle>
889void CpGridData::scatterData(DataHandle& data, CpGridData* global_data,
890 CpGridData* distributed_data, const InterfaceMap& cell_inf,
891 const InterfaceMap& point_inf)
892{
893#if HAVE_MPI
894 if(data.contains(3,0))
895 {
896 Entity2IndexDataHandle<DataHandle, 0> data_wrapper(*global_data, *distributed_data, data);
897 communicateCodim<0>(data_wrapper, ForwardCommunication, cell_inf);
898 }
899 if(data.contains(3,3))
900 {
901 Entity2IndexDataHandle<DataHandle, 3> data_wrapper(*global_data, *distributed_data, data);
902 communicateCodim<3>(data_wrapper, ForwardCommunication, point_inf);
903 }
904#endif
905}
906
907template<int codim, class DataHandle>
908void CpGridData::scatterCodimData(DataHandle& data, CpGridData* global_data,
909 CpGridData* distributed_data)
910{
911 CpGridData *gather_view, *scatter_view;
912 gather_view=global_data;
913 scatter_view=distributed_data;
914
915 mover::Mover<DataHandle,codim> mover(data, gather_view, scatter_view);
916
917
918 for(auto index=distributed_data->cellIndexSet().begin(),
919 end = distributed_data->cellIndexSet().end();
920 index!=end; ++index)
921 {
922 std::size_t from=index->global();
923 std::size_t to=index->local();
924 mover(from,to);
925 }
926}
927
928namespace
929{
930
931template<int codim, class T, class F>
932void visitInterior(CpGridData& distributed_data, T begin, T endit, F& func)
933{
934 for(T it=begin; it!=endit; ++it)
935 {
936 Entity<codim> entity(distributed_data, it-begin, true);
937 PartitionType pt = entity.partitionType();
938 if(pt==Dune::InteriorEntity)
939 {
940 func(*it, entity);
941 }
942 }
943}
944
945template<class DataHandle>
946struct GlobalIndexSizeGatherer
947{
948 GlobalIndexSizeGatherer(DataHandle& data_,
949 std::vector<int>& ownedGlobalIndices_,
950 std::vector<int>& ownedSizes_)
951 : data(data_), ownedGlobalIndices(ownedGlobalIndices_), ownedSizes(ownedSizes_)
952 {}
953
954 template<class T, class E>
955 void operator()(T& i, E& entity)
956 {
957 ownedGlobalIndices.push_back(i);
958 ownedSizes.push_back(data.size(entity));
959 }
960 DataHandle& data;
961 std::vector<int>& ownedGlobalIndices;
962 std::vector<int>& ownedSizes;
963};
964
965template<class DataHandle>
966struct DataGatherer
967{
968 DataGatherer(mover::MoveBuffer<typename DataHandle::DataType>& buffer_,
969 DataHandle& data_)
970 : buffer(buffer_), data(data_)
971 {}
972
973 template<class T, class E>
974 void operator()(T& /* it */, E& entity)
975 {
976 data.gather(buffer, entity);
977 }
978 mover::MoveBuffer<typename DataHandle::DataType>& buffer;
979 DataHandle& data;
980};
981
982}
983
984template<class DataHandle>
985void CpGridData::gatherData(DataHandle& data, CpGridData* global_data,
986 CpGridData* distributed_data)
987{
988#if HAVE_MPI
989 if(data.contains(3,0))
990 gatherCodimData<0>(data, global_data, distributed_data);
991 if(data.contains(3,3))
992 gatherCodimData<3>(data, global_data, distributed_data);
993#endif
994}
995
996template<int codim, class DataHandle>
997void CpGridData::gatherCodimData(DataHandle& data, CpGridData* global_data,
998 CpGridData* distributed_data)
999{
1000#if HAVE_MPI
1001 // Get the mapping to global index from the global id set
1002 const std::vector<int>& mapping =
1003 distributed_data->global_id_set_->getMapping<codim>();
1004
1005 // Get the global indices and data size for the entities whose data is
1006 // to be sent, i.e. the ones that we own.
1007 std::vector<int> owned_global_indices;
1008 std::vector<int> owned_sizes;
1009 owned_global_indices.reserve(mapping.size());
1010 owned_sizes.reserve(mapping.size());
1011
1012 GlobalIndexSizeGatherer<DataHandle> gisg(data, owned_global_indices, owned_sizes);
1013 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gisg);
1014
1015 // communicate the number of indices that each processor sends
1016 int no_indices=owned_sizes.size();
1017 // We will take the address of the first elemet for MPI_Allgather below.
1018 // Make sure the containers have such an element.
1019 if ( owned_global_indices.empty() )
1020 owned_global_indices.resize(1);
1021 if ( owned_sizes.empty() )
1022 owned_sizes.resize(1);
1023 std::vector<int> no_indices_to_recv(distributed_data->ccobj_.size());
1024 distributed_data->ccobj_.allgather(&no_indices, 1, &(no_indices_to_recv[0]));
1025 // compute size of the vector capable for receiving all indices
1026 // and allgather the global indices and the sizes.
1027 // calculate displacements
1028 std::vector<int> displ(distributed_data->ccobj_.size()+1, 0);
1029 std::transform(displ.begin(), displ.end()-1, no_indices_to_recv.begin(), displ.begin()+1,
1030 std::plus<int>());
1031 int global_size=displ[displ.size()-1];//+no_indices_to_recv[displ.size()-1];
1032 std::vector<int> global_indices(global_size);
1033 std::vector<int> global_sizes(global_size);
1034 MPI_Allgatherv(&(owned_global_indices[0]), no_indices, MPITraits<int>::getType(),
1035 &(global_indices[0]), &(no_indices_to_recv[0]), &(displ[0]),
1036 MPITraits<int>::getType(),
1037 distributed_data->ccobj_);
1038 MPI_Allgatherv(&(owned_sizes[0]), no_indices, MPITraits<int>::getType(),
1039 &(global_sizes[0]), &(no_indices_to_recv[0]), &(displ[0]),
1040 MPITraits<int>::getType(),
1041 distributed_data->ccobj_);
1042 std::vector<int>().swap(owned_global_indices); // free data for reuse.
1043 // Compute the number of data items to send
1044 std::vector<int> no_data_send(distributed_data->ccobj_.size());
1045 for(typename std::vector<int>::iterator begin=no_data_send.begin(),
1046 i=begin, end=no_data_send.end(); i!=end; ++i)
1047 *i = std::accumulate(global_sizes.begin()+displ[i-begin],
1048 global_sizes.begin()+displ[i-begin+1], std::size_t());
1049 // free at least some memory that can be reused.
1050 std::vector<int>().swap(owned_sizes);
1051 // compute the displacements for receiving with allgatherv
1052 displ[0]=0;
1053 std::transform(displ.begin(), displ.end()-1, no_data_send.begin(), displ.begin()+1,
1054 std::plus<std::size_t>());
1055 // Compute the number of data items we will receive
1056 int no_data_recv = displ[displ.size()-1];//+global_sizes[displ.size()-1];
1057
1058 // Collect the data to send, gather it
1059 mover::MoveBuffer<typename DataHandle::DataType> local_data_buffer, global_data_buffer;
1060 if ( no_data_send[distributed_data->ccobj_.rank()] )
1061 {
1062 local_data_buffer.resize(no_data_send[distributed_data->ccobj_.rank()]);
1063 }
1064 else
1065 {
1066 local_data_buffer.resize(1);
1067 }
1068 global_data_buffer.resize(no_data_recv);
1069
1070 DataGatherer<DataHandle> gatherer(local_data_buffer, data);
1071 visitInterior<codim>(*distributed_data, mapping.begin(), mapping.end(), gatherer);
1072 MPI_Allgatherv(&(local_data_buffer.buffer_[0]), no_data_send[distributed_data->ccobj_.rank()],
1073 MPITraits<typename DataHandle::DataType>::getType(),
1074 &(global_data_buffer.buffer_[0]), &(no_data_send[0]), &(displ[0]),
1075 MPITraits<typename DataHandle::DataType>::getType(),
1076 distributed_data->ccobj_);
1077 Entity2IndexDataHandle<DataHandle, codim> edata(*global_data, data);
1078 int offset=0;
1079 for(int i=0; i< codim; ++i)
1080 offset+=global_data->size(i);
1081
1082 typename std::vector<int>::const_iterator s=global_sizes.begin();
1083 for(typename std::vector<int>::const_iterator i=global_indices.begin(),
1084 end=global_indices.end();
1085 i!=end; ++s, ++i)
1086 {
1087 edata.scatter(global_data_buffer, *i-offset, *s);
1088 }
1089#endif
1090}
1091
1092} // end namespace cpgrid
1093} // end namespace Dune
1094
1095#endif
1096
1097#endif
[ provides Dune::Grid ]
Definition: CpGrid.hpp:214
Struct that hods all the data needed to represent a Cpgrid.
Definition: CpGridData.hpp:122
@ MAX_DATA_PER_CELL
The maximum data items allowed per cell (DUNE < 2.5.2)
Definition: CpGridData.hpp:160
int size(GeometryType type) const
number of leaf entities per geometry type in this process
Definition: CpGridData.hpp:189
const std::array< int, 3 > & logicalCartesianSize() const
The logical cartesian size of the grid.
Definition: CpGridData.hpp:401
void communicate(DataHandle &data, InterfaceType iftype, CommunicationDirection dir)
communicate objects for all codims on a given level
Definition: CpGridData.hpp:738
const std::tuple< const std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::tuple< int, std::vector< int > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refineSingleCell(const std::array< int, 3 > &cells_per_dim, const int &parent_idx) const
Refine a single cell and return a shared pointer of CpGridData type.
Definition: CpGridData.cpp:1820
bool uniqueBoundaryIds() const
Is the grid currently using unique boundary ids?
Definition: CpGridData.hpp:369
const std::tuple< std::shared_ptr< CpGridData >, const std::vector< std::array< int, 2 > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::tuple< int, std::vector< int > > >, const std::vector< std::array< int, 2 > >, const std::vector< std::array< int, 2 > > > refinePatch(const std::array< int, 3 > &cells_per_dim, const std::array< int, 3 > &startIJK, const std::array< int, 3 > &endIJK) const
Refine a (connected block-shaped) patch of cells.
Definition: CpGridData.cpp:1949
void processEclipseFormat(const grdecl &input_data, std::array< std::set< std::pair< int, int > >, 2 > &nnc, bool remove_ij_boundary, bool turn_normals, bool pinchActive)
Read the Eclipse grid format ('grdecl').
Definition: processEclipseFormat.cpp:252
void readSintefLegacyFormat(const std::string &grid_prefix)
Read the Sintef legacy grid format ('topogeom').
Definition: readSintefLegacyFormat.cpp:67
int size(int codim) const
number of leaf entities per codim in this process
Definition: CpGridData.cpp:144
void readEclipseFormat(const std::string &filename, bool periodic_extension, bool turn_normals=false)
Read the Eclipse grid format ('grdecl').
CpGridDataTraits::Communication Communication
The type of the collective communication.
Definition: CpGridData.hpp:424
~CpGridData()
Destructor.
Definition: CpGridData.cpp:97
void getIJK(int c, std::array< int, 3 > &ijk) const
Extract Cartesian index triplet (i,j,k) of an active cell.
Definition: CpGridData.hpp:266
const IndexSet & indexSet() const
Get the index set.
Definition: CpGridData.hpp:394
CpGridDataTraits::MPICommunicator MPICommunicator
The type of the mpi communicator.
Definition: CpGridData.hpp:422
void distributeGlobalGrid(CpGrid &grid, const CpGridData &view_data, const std::vector< int > &cell_part)
Redistribute a global grid.
Definition: CpGridData.cpp:1513
const std::vector< double > & zcornData() const
Return the internalized zcorn copy from the grid processing, if no cells were adjusted during the min...
Definition: CpGridData.hpp:387
void setUniqueBoundaryIds(bool uids)
Set whether we want to have unique boundary ids.
Definition: CpGridData.hpp:376
void writeSintefLegacyFormat(const std::string &grid_prefix) const
Write the Sintef legacy grid format ('topogeom').
Definition: writeSintefLegacyFormat.cpp:73
const std::vector< int > & sortedNumAquiferCells() const
Get sorted active cell indices of numerical aquifer.
Definition: CpGridData.hpp:483
Definition: DefaultGeometryPolicy.hpp:53
const EntityVariable< cpgrid::Geometry< 3 - codim, 3 >, codim > & geomVector() const
Definition: DefaultGeometryPolicy.hpp:81
Wrapper that turns a data handle suitable for dune-grid into one based on integers instead of entitie...
Definition: Entity2IndexDataHandle.hpp:56
This class encapsulates geometry for vertices, intersections, and cells.
Definition: Geometry.hpp:74
The global id set for Dune.
Definition: Indexsets.hpp:304
Only needs to provide interface for doing nothing.
Definition: Iterators.hpp:108
Definition: Indexsets.hpp:55
Copyright 2019 Equinor AS.
Definition: CartesianIndexMapper.hpp:10
Holds the implementation of the CpGrid as a pimple.
Definition: CellQuadrature.hpp:29
Low-level corner-point processing routines and supporting data structures.
MPIHelper::MPICommunicator MPICommunicator
The type of the collective communication.
Definition: CpGridDataTraits.hpp:55
AttributeSet
The type of the set of the attributes.
Definition: CpGridDataTraits.hpp:65
Definition: CpGridData.hpp:114
Raw corner-point specification of a particular geological model.
Definition: preprocess.h:56