dune-grid-glue 2.5-git
gridglue.cc
Go to the documentation of this file.
1// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2// vi: set et ts=4 sw=2 sts=2:
3/* IMPLEMENTATION OF CLASS G R I D G L U E */
4
5#include "intersection.hh"
6#include <vector>
7#include <iterator>
8#include "../gridglue.hh"
9
10#include "../common/multivector.hh"
11
13#define CheckMPIStatus(A,B) {}
14
15#if HAVE_MPI
16namespace {
17 template<typename T>
18 struct MPITypeInfo {};
19
20 template<>
21 struct MPITypeInfo< int >
22 {
23 static const unsigned int size = 1;
24 static inline MPI_Datatype getType()
25 {
26 return MPI_INT;
27 }
28 static const int tag = 1234560;
29 };
30
31 template<typename K, int N>
32 struct MPITypeInfo< Dune::FieldVector<K,N> >
33 {
34 static const unsigned int size = N;
35 static inline MPI_Datatype getType()
36 {
37 return Dune::MPITraits<K>::getType();
38 }
39 static const int tag = 1234561;
40 };
41
42 template<>
43 struct MPITypeInfo< unsigned int >
44 {
45 static const unsigned int size = 1;
46 static inline MPI_Datatype getType()
47 {
48 return MPI_UNSIGNED;
49 }
50 static const int tag = 1234562;
51 };
52
53 template<>
54 struct MPITypeInfo< Dune::GeometryType >
55 {
56 static const unsigned int size = 1;
57 static inline MPI_Datatype getType()
58 {
59 return Dune::MPITraits< Dune::GeometryType >::getType();
60 }
61 static const int tag = 1234563;
62 };
63
71 template<typename T>
72 void MPI_SendVectorInRing(
73 std::vector<T> & data,
74 std::vector<T> & tmp,
75 int leftsize,
76 int rightrank,
77 int leftrank,
78 MPI_Comm comm
79 )
80 {
81 // mpi status stuff
82 int result = 0;
83 MPI_Status status;
84 typedef MPITypeInfo<T> Info;
85 // alloc buffer
86 unsigned int tmpsize = tmp.size();
87 tmp.resize(leftsize);
88 // send data
89 int rank; MPI_Comm_rank(comm, &rank);
90 // std::cout << rank << " send " << data.size() << " to " << rightrank << std::endl;
91 // std::cout << rank << " recv " << tmp.size() << " from " << leftrank << std::endl;
92 if (leftsize > 0 && data.size() > 0)
93 {
94 // send & receive
95 result =
96 MPI_Sendrecv(
97 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, Info::tag,
98 &(tmp[0]), Info::size*tmp.size(), Info::getType(), leftrank, Info::tag,
99 comm, &status);
100 }
101 if (leftsize == 0 && data.size() > 0)
102 {
103 // send
104 result =
105 MPI_Send(
106 &(data[0]), Info::size*data.size(), Info::getType(), rightrank, Info::tag,
107 comm);
108 }
109 if (leftsize > 0 && data.size() == 0)
110 {
111 // receive
112 result =
113 MPI_Recv(
114 &(tmp[0]), Info::size*tmp.size(), Info::getType(), leftrank, Info::tag,
115 comm, &status);
116 }
117 // check result
118 CheckMPIStatus(result, status);
119 // swap buffers
120 data.swap(tmp);
121 // resize tmp buffer
122 tmp.resize(tmpsize);
123
124 MPI_Barrier(comm);
125 }
126
128 struct PatchSizes
129 {
130 PatchSizes() :
131 patch0coords(0), patch0entities(0), patch0types(0),
132 patch1coords(0), patch1entities(0), patch1types(0) {}
133
135 PatchSizes(unsigned int c0, unsigned int e0, unsigned int t0,
136 unsigned int c1, unsigned int e1, unsigned int t1) :
137 patch0coords(c0), patch0entities(e0), patch0types(t0),
138 patch1coords(c1), patch1entities(e1), patch1types(t1) {}
139
141 template<typename C, typename E, typename T>
142 PatchSizes(const C & c0, const E & e0, const T & t0,
143 const C & c1, const E & e1, const T & t1) :
144 patch0coords(c0.size()), patch0entities(e0.size()), patch0types(t0.size()),
145 patch1coords(c1.size()), patch1entities(e1.size()), patch1types(t1.size()) {}
146
147 unsigned int patch0coords, patch0entities, patch0types,
148 patch1coords, patch1entities, patch1types;
149
150 unsigned int maxCoords() const { return std::max(patch0coords, patch1coords); }
151 unsigned int maxEntities() const { return std::max(patch0entities, patch1entities); }
152 unsigned int maxTypes() const { return std::max(patch0types, patch1types); }
153 };
154}
155#endif // HAVE_MPI
156
157namespace Dune {
158namespace GridGlue {
159
160template<typename P0, typename P1>
161GridGlue<P0, P1>::GridGlue(const Grid0Patch& gp0, const Grid1Patch& gp1, Merger* merger) :
162 GridGlue(Dune::stackobject_to_shared_ptr(gp0), Dune::stackobject_to_shared_ptr(gp1), Dune::stackobject_to_shared_ptr(*merger))
163{
164 /* Nothing. */
165}
166
167template<typename P0, typename P1>
168GridGlue<P0, P1>::GridGlue(const std::shared_ptr<const Grid0Patch> gp0, const std::shared_ptr<const Grid1Patch> gp1, const std::shared_ptr<Merger> merger)
169 : patch0_(gp0), patch1_(gp1), merger_(merger)
170{
171#if HAVE_MPI
172 // if we have only seq. meshes don't use parallel glueing
173 if (gp0->gridView().comm().size() == 1
174 && gp1->gridView().comm().size() == 1)
175 mpicomm_ = MPI_COMM_SELF;
176 else
177 mpicomm_ = MPI_COMM_WORLD;
178#endif // HAVE_MPI
179 std::cout << "GridGlue: Constructor succeeded!" << std::endl;
180}
181
182template<typename P0, typename P1>
184{
185 int myrank = 0;
186#if HAVE_MPI
187 int commsize = 1;
188 MPI_Comm_rank(mpicomm_, &myrank);
189 MPI_Comm_size(mpicomm_, &commsize);
190#endif // HAVE_MPI
191
192 // clear the contents from the current intersections array
193 {
194 std::vector<IntersectionData> dummy(1); // we need size 1, as we always store data for the end-intersection
195 intersections_.swap(dummy);
196 }
197
198 std::vector<Dune::FieldVector<ctype, dimworld> > patch0coords;
199 std::vector<unsigned int> patch0entities;
200 std::vector<Dune::GeometryType> patch0types;
201 std::vector<Dune::FieldVector<ctype,dimworld> > patch1coords;
202 std::vector<unsigned int> patch1entities;
203 std::vector<Dune::GeometryType> patch1types;
204
205 /*
206 * extract global surface patchs
207 */
208
209 // retrieve the coordinate and topology information from the extractors
210 // and apply transformations if necessary
211 extractGrid(*patch0_, patch0coords, patch0entities, patch0types);
212 extractGrid(*patch1_, patch1coords, patch1entities, patch1types);
213
214 std::cout << ">>>> rank " << myrank << " coords: "
215 << patch0coords.size() << " and " << patch1coords.size() << std::endl;
216 std::cout << ">>>> rank " << myrank << " entities: "
217 << patch0entities.size() << " and " << patch1entities.size() << std::endl;
218 std::cout << ">>>> rank " << myrank << " types: "
219 << patch0types.size() << " and " << patch1types.size() << std::endl;
220
221#ifdef WRITE_TO_VTK
222 const char prefix[] = "GridGlue::Builder::build() : ";
223 char patch0surf[256];
224 sprintf(patch0surf, "/tmp/vtk-patch0-test-%i", myrank);
225 char patch1surf[256];
226 sprintf(patch1surf, "/tmp/vtk-patch1-test-%i", myrank);
227
228 // std::cout << prefix << "Writing patch0 surface to '" << patch0surf << ".vtk'...\n";
229 // VtkSurfaceWriter vtksw(patch0surf);
230 // vtksw.writeSurface(patch0coords, patch0entities, grid0dim, dimworld);
231 // std::cout << prefix << "Done writing patch0 surface!\n";
232
233 // std::cout << prefix << "Writing patch1 surface to '" << patch1surf << ".vtk'...\n";
234 // vtksw.setFilename(patch1surf);
235 // vtksw.writeSurface(patch1coords, patch1entities, grid1dim, dimworld);
236 // std::cout << prefix << "Done writing patch1 surface!\n";
237#endif // WRITE_TO_VTK
238
239#if HAVE_MPI
240 if (commsize > 1)
241 {
242 // setup parallel indexset
243 patch0_is_.beginResize();
244 patch1_is_.beginResize();
245 }
246#endif // HAVE_MPI
247
248 // merge local patches and add to intersection list
249 if (patch0entities.size() > 0 && patch1entities.size() > 0)
250 mergePatches(patch0coords, patch0entities, patch0types, myrank,
251 patch1coords, patch1entities, patch1types, myrank);
252
253#ifdef CALL_MERGER_TWICE
254 if (patch0entities.size() > 0 && patch1entities.size() > 0)
255 mergePatches(patch0coords, patch0entities, patch0types, myrank,
256 patch1coords, patch1entities, patch1types, myrank);
257#endif
258
259#if HAVE_MPI
260
261 // status variables of communication
262 int mpi_result;
263 MPI_Status mpi_status;
264
265#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
266 std::cout << myrank << " Comm Size" << commsize << std::endl;
267#endif
268
269 if (commsize > 1)
270 {
271 // get patch sizes
272 PatchSizes patchSizes (patch0coords, patch0entities, patch0types,
273 patch1coords, patch1entities, patch1types);
274
275#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
276 std::cout << myrank << " Start Communication" << std::endl;
277#endif
278
279 // communicate max patch size
280 PatchSizes maxPatchSizes;
281 mpi_result = MPI_Allreduce(&patchSizes, &maxPatchSizes,
282 6, MPI_UNSIGNED, MPI_MAX, MPI_COMM_WORLD);
283 CheckMPIStatus(mpi_result, 0);
284#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
285 std::cout << myrank << " maxPatchSizes " << "done" << std::endl;
286#endif
287
291 // allocate remote buffers (maxsize to avoid reallocation)
292 std::vector<Dune::FieldVector<ctype, dimworld> > remotePatch0coords ( maxPatchSizes.patch0coords );
293 std::vector<unsigned int> remotePatch0entities ( maxPatchSizes.patch0entities );
294 std::vector<Dune::GeometryType> remotePatch0types ( maxPatchSizes.patch0types );
295 std::vector<Dune::FieldVector<ctype,dimworld> > remotePatch1coords ( maxPatchSizes.patch1coords );
296 std::vector<unsigned int> remotePatch1entities ( maxPatchSizes.patch1entities );
297 std::vector<Dune::GeometryType> remotePatch1types ( maxPatchSizes.patch1types );
298
299 // copy local patches to remote patch buffers
300 remotePatch0coords.clear();
301 std::copy(patch0coords.begin(), patch0coords.end(), std::back_inserter(remotePatch0coords));
302 remotePatch0entities.clear();
303 std::copy(patch0entities.begin(), patch0entities.end(), std::back_inserter(remotePatch0entities));
304 remotePatch0types.clear();
305 std::copy(patch0types.begin(), patch0types.end(), std::back_inserter(remotePatch0types));
306
307 remotePatch1coords.clear();
308 std::copy(patch1coords.begin(), patch1coords.end(), std::back_inserter(remotePatch1coords));
309 remotePatch1entities.clear();
310 std::copy(patch1entities.begin(), patch1entities.end(), std::back_inserter(remotePatch1entities));
311 remotePatch1types.clear();
312 std::copy(patch1types.begin(), patch1types.end(), std::back_inserter(remotePatch1types));
313
314 // allocate tmp buffers (maxsize to avoid reallocation)
315 std::vector<Dune::FieldVector<ctype, dimworld> > tmpPatchCoords ( maxPatchSizes.maxCoords() );
316 std::vector<unsigned int> tmpPatchEntities ( maxPatchSizes.maxEntities() );
317 std::vector<Dune::GeometryType> tmpPatchTypes ( maxPatchSizes.maxTypes() );
318
319 // communicate patches in the ring
320 for (int i=1; i<commsize; i++)
321 {
322 int remoterank = (myrank - i + commsize) % commsize;
323 int rightrank = (myrank + 1 + commsize) % commsize;
324 int leftrank = (myrank - 1 + commsize) % commsize;
325
326 // communicate current patch sizes
327 // patchsizes were initialized before
328 {
329 // send to right neighbor, receive from left neighbor
330 mpi_result =
331 MPI_Sendrecv_replace(
332 &patchSizes, 6, MPI_UNSIGNED,
333 rightrank, MPITypeInfo<unsigned int>::tag,
334 leftrank, MPITypeInfo<unsigned int>::tag,
335 mpicomm_, &mpi_status);
336 CheckMPIStatus(mpi_result, mpi_status);
337 }
338
339#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
340 std::cout << myrank << " patchSizes " << "done" << std::endl;
341#endif
342
343 /* send remote patch to right neighbor and receive from left neighbor */
344
345 // patch0coords
346#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
347 std::cout << myrank << " patch0coords" << std::endl;
348#endif
349 MPI_SendVectorInRing(
350 remotePatch0coords, tmpPatchCoords, patchSizes.patch0coords,
351 rightrank, leftrank, mpicomm_);
352
353 // patch0entities
354#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
355 std::cout << myrank << " patch0entities" << std::endl;
356#endif
357 MPI_SendVectorInRing(
358 remotePatch0entities, tmpPatchEntities, patchSizes.patch0entities,
359 rightrank, leftrank, mpicomm_);
360
361 // patch0types
362#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
363 std::cout << myrank << " patch0types" << std::endl;
364#endif
365 MPI_SendVectorInRing(
366 remotePatch0types, tmpPatchTypes, patchSizes.patch0types,
367 rightrank, leftrank, mpicomm_);
368
369 // patch1coords
370#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
371 std::cout << myrank << " patch1coords" << std::endl;
372#endif
373 MPI_SendVectorInRing(
374 remotePatch1coords, tmpPatchCoords, patchSizes.patch1coords,
375 rightrank, leftrank, mpicomm_);
376
377 // patch1entities
378#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
379 std::cout << myrank << " patch1entities" << std::endl;
380#endif
381 MPI_SendVectorInRing(
382 remotePatch1entities, tmpPatchEntities, patchSizes.patch1entities,
383 rightrank, leftrank, mpicomm_);
384
385 // patch1types
386#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
387 std::cout << myrank << " patch1types" << std::endl;
388#endif
389 MPI_SendVectorInRing(
390 remotePatch1types, tmpPatchTypes, patchSizes.patch1types,
391 rightrank, leftrank, mpicomm_);
392
393 /* merging */
394 // merge local & remote patches
395 // patch0_is_ and patch1_is_ are updated automatically
396 if (remotePatch1entities.size() > 0 && patch0entities.size() > 0)
397 mergePatches(patch0coords, patch0entities, patch0types, myrank,
398 remotePatch1coords, remotePatch1entities, remotePatch1types, remoterank);
399 if (remotePatch0entities.size() > 0 && patch1entities.size() > 0)
400 mergePatches(remotePatch0coords, remotePatch0entities, remotePatch0types, remoterank,
401 patch1coords, patch1entities, patch1types, myrank);
402
403 std::cout << "Sync processes" << std::endl;
404 MPI_Barrier(mpicomm_);
405 std::cout << "...done" << std::endl;
406 }
407 }
408
409 if (commsize > 1)
410 {
411 // finalize ParallelIndexSet & RemoteIndices
412 patch0_is_.endResize();
413 patch1_is_.endResize();
414
415 // setup remote index information
416 remoteIndices_.setIncludeSelf(true);
417#warning add list of neighbors ...
418 remoteIndices_.setIndexSets(patch0_is_, patch1_is_, mpicomm_) ;
419 remoteIndices_.rebuild<true/* all indices are public */>();
420
421 // DEBUG Print all remote indices
422#ifdef DEBUG_GRIDGLUE_PARALLELMERGE
423 for (auto it = remoteIndices_.begin(); it != remoteIndices_.end(); it++)
424 {
425 std::cout << myrank << "\tri-list\t" << it->first << std::endl;
426 for (auto xit = it->second.first->begin(); xit != it->second.first->end(); ++xit)
427 std::cout << myrank << "\tri-list 1 \t" << it->first << "\t" << *xit << std::endl;
428 for (auto xit = it->second.second->begin(); xit != it->second.second->end(); ++xit)
429 std::cout << myrank << "\tri-list 2 \t" << it->first << "\t" << *xit << std::endl;
430 }
431#endif
432 }
433#endif
434
435}
436
437template<typename T>
438void printVector(const std::vector<T> & v, std::string name)
439{
440 std::cout << name << std::endl;
441 for (size_t i=0; i<v.size(); i++)
442 {
443 std::cout << v[i] << " ";
444 }
445 std::cout << std::endl;
446}
447
448template<typename P0, typename P1>
450 const std::vector<Dune::FieldVector<ctype,dimworld> >& patch0coords,
451 const std::vector<unsigned int>& patch0entities,
452 const std::vector<Dune::GeometryType>& patch0types,
453 const int patch0rank,
454 const std::vector<Dune::FieldVector<ctype,dimworld> >& patch1coords,
455 const std::vector<unsigned int>& patch1entities,
456 const std::vector<Dune::GeometryType>& patch1types,
457 const int patch1rank)
458{
459
460 // howto handle overlap etc?
461
462 int myrank = 0;
463#if HAVE_MPI
464 int commsize = 1;
465 MPI_Comm_rank(mpicomm_, &myrank);
466 MPI_Comm_size(mpicomm_, &commsize);
467#endif // HAVE_MPI
468
469 // which patches are local?
470 const bool patch0local = (myrank == patch0rank);
471 const bool patch1local = (myrank == patch1rank);
472
473 // remember the number of previous remote intersections
474 const unsigned int offset = intersections_.size()-1;
475
476 std::cout << myrank
477 << " GridGlue::mergePatches : rank " << patch0rank << " / " << patch1rank << std::endl;
478
479 // start the actual build process
480 merger_->build(patch0coords, patch0entities, patch0types,
481 patch1coords, patch1entities, patch1types);
482
483 // append to intersections list
484 intersections_.resize(merger_->nSimplices() + offset + 1);
485 for (unsigned int i = 0; i < merger_->nSimplices(); ++i)
486 {
487 IntersectionData data(*this, i, offset, patch0local, patch1local);
488 intersections_[offset+i] = data;
489 }
490
491 index__sz = intersections_.size() - 1;
492
493 std::cout << myrank
494 << " GridGlue::mergePatches : "
495 << "The number of remote intersections is " << intersections_.size()-1 << std::endl;
496
497 // printVector(patch0coords,"patch0coords");
498 // printVector(patch0entities,"patch0entities");
499 // printVector(patch0types,"patch0types");
500 // printVector(patch1coords,"patch1coords");
501 // printVector(patch1entities,"patch1entities");
502 // printVector(patch1types,"patch1types");
503
504#if HAVE_MPI
505 if (commsize > 1)
506 {
507 // update remote index sets
508 assert(Dune::RESIZE == patch0_is_.state());
509 assert(Dune::RESIZE == patch1_is_.state());
510
511 for (unsigned int i = 0; i < merger_->nSimplices(); i++)
512 {
513#warning only handle the newest intersections / merger info
514 const IntersectionData & it = intersections_[i];
515 GlobalId gid(patch0rank, patch1rank, i);
516 if (it.grid0local_)
517 {
518 Dune::PartitionType ptype = patch0_->element(it.grid0indices_[0]).partitionType();
519 patch0_is_.add (gid, LocalIndex(offset+i, ptype) );
520 }
521 if (it.grid1local_)
522 {
523 Dune::PartitionType ptype = patch1_->element(it.grid1indices_[0]).partitionType();
524 patch1_is_.add (gid, LocalIndex(offset+i, ptype) );
525 }
526 }
527 }
528#endif // HAVE_MPI
529
530 // cleanup the merger
531 merger_->clear();
532}
533
534template<typename P0, typename P1>
535template<typename Extractor>
537 std::vector<Dune::FieldVector<ctype, dimworld> > & coords,
538 std::vector<unsigned int> & entities,
539 std::vector<Dune::GeometryType>& geometryTypes) const
540{
541 std::vector<typename Extractor::Coords> tempcoords;
542 std::vector<typename Extractor::VertexVector> tempentities;
543
544 extractor.getCoords(tempcoords);
545 coords.clear();
546 coords.reserve(tempcoords.size());
547
548 for (unsigned int i = 0; i < tempcoords.size(); ++i)
549 {
550 assert(int(dimworld) == int(Extractor::dimworld));
551 coords.push_back(Dune::FieldVector<ctype, dimworld>());
552 for (size_t j = 0; j <dimworld; ++j)
553 coords.back()[j] = tempcoords[i][j];
554 }
555
556 extractor.getFaces(tempentities);
557 entities.clear();
558
559 for (unsigned int i = 0; i < tempentities.size(); ++i) {
560 for (unsigned int j = 0; j < tempentities[i].size(); ++j)
561 entities.push_back(tempentities[i][j]);
562 }
563
564 // get the list of geometry types from the extractor
565 extractor.getGeometryTypes(geometryTypes);
566
567}
568
569} // end namespace GridGlue
570} // end namespace Dune
#define CheckMPIStatus(A, B)
Definition gridglue.cc:13
Model of the Intersection concept provided by GridGlue.
Definition gridglue.hh:30
void printVector(const std::vector< T > &v, std::string name)
Definition gridglue.cc:438
sequential adapter to couple two grids at specified close together boundaries
Definition gridglue.hh:93
void mergePatches(const std::vector< Dune::FieldVector< ctype, dimworld > > &patch0coords, const std::vector< unsigned int > &patch0entities, const std::vector< Dune::GeometryType > &patch0types, const int patch0rank, const std::vector< Dune::FieldVector< ctype, dimworld > > &patch1coords, const std::vector< unsigned int > &patch1entities, const std::vector< Dune::GeometryType > &patch1types, const int patch1rank)
after building the merged grid the intersection can be updated through this method (for internal use)
Definition gridglue.cc:449
P1 Grid1Patch
Coupling patch of grid 1.
Definition gridglue.hh:148
void build()
Definition gridglue.cc:183
void extractGrid(const Extractor &extractor, std::vector< Dune::FieldVector< ctype, dimworld > > &coords, std::vector< unsigned int > &faces, std::vector< Dune::GeometryType > &geometryTypes) const
Definition gridglue.cc:536
P0 Grid0Patch
Coupling patch of grid 0.
Definition gridglue.hh:129
storage class for Dune::GridGlue::Intersection related data
Definition intersection.hh:31
std::vector< Grid0IndexType > grid0indices_
indices of the associated local grid0 entity
Definition intersection.hh:73
bool grid1local_
true if the associated grid1 entity is local
Definition intersection.hh:74
std::vector< Grid1IndexType > grid1indices_
indices of the associated local grid1 entity
Definition intersection.hh:75
bool grid0local_
true if the associated grid0 entity is local
Definition intersection.hh:72
Definition gridgluecommunicate.hh:24
Provides codimension-independent methods for grid extraction.
Definition extractor.hh:43
@ dimworld
Definition extractor.hh:47
void getFaces(std::vector< VertexVector > &faces) const
Get the corners of the extracted subentities.
Definition extractor.hh:299
void getGeometryTypes(std::vector< Dune::GeometryType > &geometryTypes) const
Get the list of geometry types.
Definition extractor.hh:288
void getCoords(std::vector< Dune::FieldVector< ctype, dimworld > > &coords) const
getter for the coordinates array
Definition extractor.hh:270
Abstract base for all classes that take extracted grids and build sets of intersections.
Definition merger.hh:91