dune-alugrid 3.0.0
structuredgridfactory.hh
Go to the documentation of this file.
1#ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
2#define DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
3
4#include <array>
5#include <memory>
6#include <vector>
7
8#include <dune/common/version.hh>
9
10#include <dune/common/classname.hh>
11#include <dune/common/exceptions.hh>
12#include <dune/common/fvector.hh>
13#include <dune/common/shared_ptr.hh>
14
15#include <dune/grid/common/gridfactory.hh>
16#include <dune/grid/common/exceptions.hh>
17
20
22
23#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
24// include DGF parser implementation for YaspGrid
25#include <dune/grid/io/file/dgfparser/dgfyasp.hh>
26#else
27// include DGF parser implementation for SGrid
28#include <dune/grid/io/file/dgfparser/dgfs.hh>
29#endif
30
31namespace Dune
32{
33
34 // External Forward Declarations
35 // -----------------------------
36
37 template< class Grid >
39
40
41
42 // StructuredGridFactory for ALUGrid
43 // ---------------------------------
44
45 template< int dim, int dimworld, ALUGridElementType eltype, ALUGridRefinementType refineType, class Comm >
46 class StructuredGridFactory< ALUGrid< dim, dimworld, eltype, refineType, Comm > >
47 {
48 public:
50 // mygrid_ptr in GridPtr is a derived from std::shared_ptr and implements a method release
51 typedef typename Dune::GridPtr< Grid > :: mygrid_ptr SharedPtrType;
52
53 protected:
55
56 private:
57 // SimplePartitioner
58 // -----------------
59 template< class GV, PartitionIteratorType pitype, class IS = typename GV::IndexSet >
60 class SimplePartitioner
61 {
62 typedef SimplePartitioner< GV, pitype, IS > This;
63
64 public:
65 typedef GV GridView;
66 typedef typename GridView::Grid Grid;
67
68 typedef IS IndexSet;
69
70 protected:
71 typedef typename IndexSet::IndexType IndexType;
72
73 static const int dimension = Grid::dimension;
74
75 typedef typename Grid::template Codim< 0 >::Entity Element;
76
77 typedef typename Element::Geometry::GlobalCoordinate VertexType;
78
79 // type of communicator
80 typedef Dune :: CollectiveCommunication< typename MPIHelper :: MPICommunicator >
82
83#ifdef USE_ALUGRID_SFC_ORDERING
84 typedef SpaceFillingCurveOrdering< VertexType > SpaceFillingCurveOrderingType;
85#endif
86
87 public:
88 SimplePartitioner ( const GridView &gridView, const CollectiveCommunication& comm,
89 const VertexType& lowerLeft, const VertexType& upperRight )
90 : comm_( comm ),
91 gridView_( gridView ),
92 indexSet_( gridView_.indexSet() ),
93 pSize_( comm_.size() ),
94 elementCuts_( pSize_, -1 ),
95#ifdef USE_ALUGRID_SFC_ORDERING
96 sfc_( lowerLeft, upperRight, comm_ ),
97#endif
98 maxIndex_( double(indexSet_.size(0)-1) )
99 {
100 // compute decomposition of sfc
101 calculateElementCuts();
102 }
103
104 public:
105 template< class Entity >
106 double index( const Entity &entity ) const
107 {
108 alugrid_assert ( Entity::codimension == 0 );
109#ifdef USE_ALUGRID_SFC_ORDERING
110 // get center of entity's geometry
111 VertexType center = entity.geometry().center();
112 // get hilbert index in [0,1]
113 return sfc_.index( center );
114#else
115 return double(indexSet_.index( entity ));
116#endif
117 }
118
119 template< class Entity >
120 int rank( const Entity &entity ) const
121 {
122 alugrid_assert ( Entity::codimension == 0 );
123#ifdef USE_ALUGRID_SFC_ORDERING
124 // get center of entity's geometry
125 VertexType center = entity.geometry().center();
126 // get hilbert index in [0,1]
127 const double hidx = sfc_.index( center );
128 // transform to element index
129 const long int index = (hidx * maxIndex_);
130#else
131 const long int index = indexSet_.index( entity );
132#endif
133 return rank( index );
134 }
135
136 protected:
137 int rank( long int index ) const
138 {
139 if( index < elementCuts_[ 0 ] ) return 0;
140 for( int p=1; p<pSize_; ++p )
141 {
142 if( index >= elementCuts_[ p-1 ] && index < elementCuts_[ p ] )
143 return p;
144 }
145 return pSize_-1;
146 }
147
148 void calculateElementCuts()
149 {
150 const size_t nElements = indexSet_.size( 0 );
151
152 // get number of MPI processes
153 const int nRanks = pSize_;
154
155 // get minimal number of entities per process
156 const size_t minPerProc = (double(nElements) / double( nRanks ));
157 size_t maxPerProc = minPerProc ;
158 if( nElements % nRanks != 0 )
159 ++ maxPerProc ;
160
161 // calculate percentage of elements with larger number
162 // of elements per process
163 double percentage = (double(nElements) / double( nRanks ));
164 percentage -= minPerProc ;
165 percentage *= nRanks ;
166
167 int rank = 0;
168 size_t elementCount = maxPerProc ;
169 size_t elementNumber = 0;
170 size_t localElementNumber = 0;
171 const int lastRank = nRanks - 1;
172
173 const size_t size = indexSet_.size( 0 );
174 for( size_t i=0; i<size; ++i )
175 {
176 if( localElementNumber >= elementCount )
177 {
178 elementCuts_[ rank ] = i ;
179
180 // increase rank
181 if( rank < lastRank ) ++ rank;
182
183 // reset local number
184 localElementNumber = 0;
185
186 // switch to smaller number if red line is crossed
187 if( elementCount == maxPerProc && rank >= percentage )
188 elementCount = minPerProc ;
189 }
190
191 // increase counters
192 ++elementNumber;
193 ++localElementNumber;
194 }
195
196 // set cut for last process
197 elementCuts_[ lastRank ] = size ;
198
199 //for( int p=0; p<pSize_; ++p )
200 // std::cout << "P[ " << p << " ] = " << elementCuts_[ p ] << std::endl;
201 }
202
203 const CollectiveCommunication& comm_;
204
205 const GridView& gridView_;
206 const IndexSet &indexSet_;
207
208 const int pSize_;
209 std::vector< long int > elementCuts_ ;
210
211#ifdef USE_ALUGRID_SFC_ORDERING
212 // get element to hilbert (or Z) index mapping
214#endif
215 const double maxIndex_ ;
216 };
217
218 public:
219 typedef typename Grid::ctype ctype;
220 typedef typename MPIHelper :: MPICommunicator MPICommunicatorType ;
221
222 // type of communicator
223 typedef Dune :: CollectiveCommunication< MPICommunicatorType >
225
226 static SharedPtrType
227 createCubeGrid( const std::string& filename,
228 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
229 {
230 std::ifstream file( filename.c_str() );
231 if( ! file )
232 {
233 DUNE_THROW(InvalidStateException,"file not found " << filename );
234 }
235 return createCubeGrid( file, filename, mpiComm );
236 }
237
238 static SharedPtrType
239 createCubeGrid( std::istream& input,
240 const std::string& name,
241 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
242 {
243 CollectiveCommunication comm( MPIHelper :: getCommunicator() );
244#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
245 static_assert( dim == dimworld, "YaspGrid is used for creation of the structured grid which only supports dim == dimworld");
246#endif
247
248 Dune::dgf::IntervalBlock intervalBlock( input );
249 if( !intervalBlock.isactive() )
250 {
251 std::cerr << "No interval block found, using default DGF method to create ALUGrid!" << std::endl;
252 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
253 }
254
255 if( intervalBlock.numIntervals() != 1 )
256 {
257 std::cerr << "ALUGrid creation from YaspGrid can only handle 1 interval block, using default DGF method to create ALUGrid!" << std::endl;
258 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
259 }
260
261 if( intervalBlock.dimw() != dim )
262 {
263 std::cerr << "ALUGrid creation from YaspGrid only works for dim == dimworld, using default DGF method to create ALUGrid!" << std::endl;
264 return SharedPtrType( GridPtr< Grid > (input, mpiComm ).release());
265 }
266
267 const dgf::IntervalBlock::Interval &interval = intervalBlock.get( 0 );
268
269 // only work for the new ALUGrid version
270 // if creation of SGrid fails the DGF file does not contain a proper
271 // IntervalBlock, and thus we cannot create the grid parallel,
272 // we will use the standard technique
273 std::array<int, dim> dims;
274 FieldVector<ctype, dimworld> lowerLeft;
275 FieldVector<ctype, dimworld> upperRight;
276 for( int i=0; i<dim; ++i )
277 {
278 dims[ i ] = interval.n[ i ] ;
279 lowerLeft[ i ] = interval.p[ 0 ][ i ];
280 upperRight[ i ] = interval.p[ 1 ][ i ];
281 }
282
283 // broadcast array values
284 comm.broadcast( &dims[ 0 ], dim, 0 );
285 comm.broadcast( &lowerLeft [ 0 ], dim, 0 );
286 comm.broadcast( &upperRight[ 0 ], dim, 0 );
287
288 std::string nameYasp( name );
289 nameYasp += " via YaspGrid";
291 return SGF :: createCubeGridImpl( lowerLeft, upperRight, dims, comm, nameYasp );
292 }
293
294 template < class int_t >
295 static SharedPtrType
296 createSimplexGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
297 const FieldVector<ctype,dimworld>& upperRight,
298 const array< int_t, dim>& elements,
299 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
300 {
301 // create DGF interval block and use DGF parser to create simplex grid
302 std::stringstream dgfstream;
303 dgfstream << "DGF" << std::endl;
304 dgfstream << "Interval" << std::endl;
305 dgfstream << lowerLeft << std::endl;
306 dgfstream << upperRight << std::endl;
307 for( int i=0; i<dim; ++ i)
308 dgfstream << elements[ i ] << " ";
309 dgfstream << std::endl;
310 dgfstream << "#" << std::endl;
311 dgfstream << "Simplex" << std::endl;
312 dgfstream << "#" << std::endl;
313
314 Dune::GridPtr< Grid > grid( dgfstream, mpiComm );
315 return SharedPtrType( grid.release() );
316 }
317
318 template < class int_t >
319 static SharedPtrType
320 createCubeGrid ( const FieldVector<ctype,dimworld>& lowerLeft,
321 const FieldVector<ctype,dimworld>& upperRight,
322 const array< int_t, dim>& elements,
323 MPICommunicatorType mpiComm = MPIHelper :: getCommunicator() )
324 {
325 CollectiveCommunication comm( mpiComm );
326 std::string name( "Cartesian ALUGrid via SGrid" );
327 return createCubeGridImpl( lowerLeft, upperRight, elements, comm, name );
328 }
329
330 protected:
331 template <int codim, class Entity>
332 int subEntities ( const Entity& entity ) const
333 {
334#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
335 return entity.subEntities( codim );
336#else
337 return entity.template count< codim > ();
338#endif
339 }
340
341 template < class int_t >
342 static SharedPtrType
343 createCubeGridImpl ( const FieldVector<ctype,dimworld>& lowerLeft,
344 const FieldVector<ctype,dimworld>& upperRight,
345 const array< int_t, dim>& elements,
346 const CollectiveCommunication& comm,
347 const std::string& name )
348 {
349 const int myrank = comm.rank();
350
351#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
352 typedef YaspGrid< dimworld, EquidistantOffsetCoordinates<double,dimworld> > CartesianGridType ;
353 std::array< int, dim > dims;
354 for( int i=0; i<dim; ++i ) dims[ i ] = elements[ i ];
355
356 CollectiveCommunication commSelf( MPIHelper :: getLocalCommunicator() );
357 // create YaspGrid to partition and insert elements that belong to process directly
358 CartesianGridType sgrid( lowerLeft, upperRight, dims, std::bitset<dim>(0ULL), 1, commSelf );
359#else
360 typedef SGrid< dim, dimworld, double > CartesianGridType ;
361 FieldVector< int, dim > dims;
362 for( int i=0; i<dim; ++i ) dims[ i ] = elements[ i ];
363
364 // create SGrid to partition and insert elements that belong to process directly
365 CartesianGridType sgrid( dims, lowerLeft, upperRight );
366#endif
367
368 typedef typename CartesianGridType :: LeafGridView GridView ;
369 typedef typename GridView :: IndexSet IndexSet ;
370 typedef typename IndexSet :: IndexType IndexType ;
371 typedef typename GridView :: template Codim< 0 > :: Iterator ElementIterator ;
372 typedef typename ElementIterator::Entity Entity ;
373 typedef typename GridView :: IntersectionIterator IntersectionIterator ;
374 typedef typename IntersectionIterator :: Intersection Intersection ;
375
376 GridView gridView = sgrid.leafGridView();
377 const IndexSet &indexSet = gridView.indexSet();
378
379 // get decompostition of the marco grid
380 SimplePartitioner< GridView, InteriorBorder_Partition > partitioner( gridView, comm, lowerLeft, upperRight );
381
382 // create ALUGrid GridFactory
383 GridFactory< Grid > factory;
384
385 // map global vertex ids to local ones
386 std::map< IndexType, unsigned int > vtxMap;
387 std::map< double, const Entity > sortedElementList;
388
389 const int numVertices = (1 << dim);
390 std::vector< unsigned int > vertices( numVertices );
391
392 const ElementIterator end = gridView.template end< 0 >();
393 for( ElementIterator it = gridView.template begin< 0 >(); it != end; ++it )
394 {
395 const Entity &entity = *it;
396
397 // if the element does not belong to our partition, continue
398 if( partitioner.rank( entity ) != myrank )
399 continue;
400
401 const double elIndex = partitioner.index( entity );
402 assert( sortedElementList.find( elIndex ) == sortedElementList.end() );
403 sortedElementList.insert( std::make_pair( elIndex, entity ) );
404 }
405
406 int nextElementIndex = 0;
407 const auto endSorted = sortedElementList.end();
408 for( auto it = sortedElementList.begin(); it != endSorted; ++it )
409 {
410 const Entity &entity = (*it).second;
411
412 // insert vertices and element
413 const typename Entity::Geometry geo = entity.geometry();
414 alugrid_assert( numVertices == geo.corners() );
415 for( int i = 0; i < numVertices; ++i )
416 {
417 const IndexType vtxId = indexSet.subIndex( entity, i, dim );
418 //auto result = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
419 std::pair< typename std::map< IndexType, unsigned int >::iterator, bool > result
420 = vtxMap.insert( std::make_pair( vtxId, vtxMap.size() ) );
421 if( result.second )
422 factory.insertVertex( geo.corner( i ), vtxId );
423 vertices[ i ] = result.first->second;
424 }
425
426 factory.insertElement( entity.type(), vertices );
427 const int elementIndex = nextElementIndex++;
428
429 //const auto iend = gridView.iend( entity );
430 //for( auto iit = gridView.ibegin( entity ); iit != iend; ++iit )
431 const IntersectionIterator iend = gridView.iend( entity );
432 for( IntersectionIterator iit = gridView.ibegin( entity ); iit != iend; ++iit )
433 {
434 const Intersection &isec = *iit;
435 const int faceNumber = isec.indexInInside();
436 // insert boundary face in case of domain boundary
437 if( isec.boundary() )
438 factory.insertBoundary( elementIndex, faceNumber );
439 // insert process boundary if the neighboring element has a different rank
440#if DUNE_VERSION_NEWER(DUNE_GRID,2,4)
441 if( isec.neighbor() && (partitioner.rank( isec.outside() ) != myrank) )
442#else
443 if( isec.neighbor() && (partitioner.rank( *isec.outside() ) != myrank) )
444#endif
445 factory.insertProcessBorder( elementIndex, faceNumber );
446 }
447 }
448
449 // create shared grid pointer
450 return SharedPtrType( factory.createGrid( true, true, name ) );
451 }
452 };
453
454} // namespace Dune
455
456#endif // #ifndef DUNE_ALUGRID_STRUCTUREDGRIDFACTORY_HH
#define alugrid_assert(EX)
Definition alugrid_assert.hh:20
Definition alu3dinclude.hh:50
Definition alu3dinclude.hh:80
unstructured parallel implementation of the DUNE grid interface
Definition alugrid.hh:31
Definition hsfc.hh:166
Definition structuredgridfactory.hh:38
Dune ::CollectiveCommunication< MPICommunicatorType > CollectiveCommunication
Definition structuredgridfactory.hh:224
MPIHelper::MPICommunicator MPICommunicatorType
Definition structuredgridfactory.hh:220
static SharedPtrType createCubeGrid(const std::string &filename, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition structuredgridfactory.hh:227
static SharedPtrType createCubeGrid(std::istream &input, const std::string &name, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition structuredgridfactory.hh:239
ALUGrid< dim, dimworld, eltype, refineType, Comm > Grid
Definition structuredgridfactory.hh:49
static SharedPtrType createCubeGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition structuredgridfactory.hh:320
int subEntities(const Entity &entity) const
Definition structuredgridfactory.hh:332
static SharedPtrType createCubeGridImpl(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const array< int_t, dim > &elements, const CollectiveCommunication &comm, const std::string &name)
Definition structuredgridfactory.hh:343
StructuredGridFactory< Grid > This
Definition structuredgridfactory.hh:54
static SharedPtrType createSimplexGrid(const FieldVector< ctype, dimworld > &lowerLeft, const FieldVector< ctype, dimworld > &upperRight, const array< int_t, dim > &elements, MPICommunicatorType mpiComm=MPIHelper ::getCommunicator())
Definition structuredgridfactory.hh:296
Dune::GridPtr< Grid >::mygrid_ptr SharedPtrType
Definition structuredgridfactory.hh:51