3#ifndef DUNE_ISTL_REPARTITION_HH
4#define DUNE_ISTL_REPARTITION_HH
20#include <dune/common/timer.hh>
21#include <dune/common/unused.hh>
22#include <dune/common/enumset.hh>
23#include <dune/common/stdstreams.hh>
24#include <dune/common/parallel/mpitraits.hh>
25#include <dune/common/parallel/communicator.hh>
26#include <dune/common/parallel/indexset.hh>
27#include <dune/common/parallel/indicessyncer.hh>
28#include <dune/common/parallel/remoteindices.hh>
57 template<
class G,
class T1,
class T2>
61 typedef typename IndexSet::LocalIndex::Attribute Attribute;
63 IndexSet& indexSet =
oocomm.indexSet();
67 typedef typename G::ConstVertexIterator VertexIterator;
70 std::size_t sum=0,
needed = graph.noVertices()-indexSet.size();
74 for(
int i=0; i<
oocomm.communicator().size(); ++i)
83 typedef typename IndexSet::const_iterator Iterator;
86 for(Iterator
it = indexSet.begin();
it != end; ++
it)
95 for(
int i=0; i<
oocomm.communicator().rank(); ++i)
99 std::map<int,SLList<std::pair<T1,Attribute> > >
globalIndices;
101 indexSet.beginResize();
103 for(VertexIterator vertex = graph.begin(),
vend=graph.end(); vertex !=
vend; ++vertex) {
104 const typename IndexSet::IndexPair*
pair=
lookup.pair(*vertex);
112 indexSet.endResize();
116 oocomm.freeGlobalLookup();
117 oocomm.buildGlobalLookup();
119 std::cout<<
"Holes are filled!"<<std::endl;
120 std::cout<<
oocomm.communicator().rank()<<
": "<<
oocomm.indexSet()<<std::endl;
127 class ParmetisDuneIndexMap
132 #if PARMETIS_MAJOR_VERSION > 3
133 typedef idx_t idxtype;
138 typedef std::size_t idxtype;
141 template<
class Graph,
class OOComm>
142 ParmetisDuneIndexMap(
const Graph& graph,
const OOComm& com);
143 int toParmetis(
int i)
const
145 return duneToParmetis[i];
147 int toLocalParmetis(
int i)
const
149 return duneToParmetis[i]-base_;
151 int operator[](
int i)
const
153 return duneToParmetis[i];
155 int toDune(
int i)
const
157 return parmetisToDune[i];
159 std::vector<int>::size_type numOfOwnVtx()
const
161 return parmetisToDune.size();
170 std::vector<int> duneToParmetis;
171 std::vector<int> parmetisToDune;
173 std::vector<idxtype> vtxDist_;
176 template<
class G,
class OOComm>
177 ParmetisDuneIndexMap::ParmetisDuneIndexMap(
const G& graph,
const OOComm& oocomm)
178 : duneToParmetis(graph.noVertices(), -1), vtxDist_(oocomm.communicator().size()+1)
180 int npes=oocomm.communicator().size(), mype=oocomm.communicator().rank();
182 typedef typename OOComm::ParallelIndexSet::const_iterator Iterator;
183 typedef typename OOComm::OwnerSet OwnerSet;
186 Iterator end = oocomm.indexSet().end();
187 for(Iterator index = oocomm.indexSet().begin(); index != end; ++index) {
188 if (OwnerSet::contains(index->local().attribute())) {
192 parmetisToDune.resize(numOfOwnVtx);
193 std::vector<int> globalNumOfVtx(npes);
195 MPI_Allgather(&numOfOwnVtx, 1, MPI_INT, &(globalNumOfVtx[0]), 1, MPI_INT, oocomm.communicator());
199 for(
int i=0; i<npes; i++) {
201 base += globalNumOfVtx[i];
203 vtxDist_[i+1] = vtxDist_[i] + globalNumOfVtx[i];
209 typedef typename G::ConstVertexIterator VertexIterator;
211 std::cout << oocomm.communicator().rank()<<
" vtxDist: ";
212 for(
int i=0; i<= npes; ++i)
213 std::cout << vtxDist_[i]<<
" ";
214 std::cout<<std::endl;
221 VertexIterator vend = graph.end();
222 for(VertexIterator vertex = graph.begin(); vertex != vend; ++vertex) {
223 const typename OOComm::ParallelIndexSet::IndexPair* index=oocomm.globalLookup().pair(*vertex);
225 if (OwnerSet::contains(index->local().attribute())) {
227 parmetisToDune[base-base_]=index->local();
228 duneToParmetis[index->local()] = base++;
238 std::cout <<oocomm.communicator().rank()<<
": before ";
239 for(std::size_t i=0; i<duneToParmetis.size(); ++i)
240 std::cout<<duneToParmetis[i]<<
" ";
241 std::cout<<std::endl;
243 oocomm.copyOwnerToAll(duneToParmetis,duneToParmetis);
245 std::cout <<oocomm.communicator().rank()<<
": after ";
246 for(std::size_t i=0; i<duneToParmetis.size(); ++i)
247 std::cout<<duneToParmetis[i]<<
" ";
248 std::cout<<std::endl;
260 template<
class Flags,
class IS>
263 std::map<int,int>
sizes;
265 typedef typename IS::const_iterator
IIter;
266 for(
IIter i=idxset.begin(), end=idxset.end(); i!=end; ++i)
267 if(Flags::contains(i->local().attribute()))
271 typedef std::map<int,int>::const_iterator
MIter;
273 interfaces()[i->first].first.reserve(i->second);
276 typedef typename IS::const_iterator
IIter;
277 for(
IIter i=idxset.begin(), end=idxset.end(); i!=end; ++i)
278 if(Flags::contains(i->local().attribute()))
290 template<
typename TG>
293 typedef typename std::vector<std::pair<TG,int> >::const_iterator
VIter;
308 typedef ParmetisDuneIndexMap :: idxtype idxtype ;
320 void createSendBuf(std::vector<GI>& ownerVec, std::set<GI>& overlapVec, std::set<int>& neighbors,
char *sendBuf,
int buffersize, MPI_Comm comm) {
322 std::size_t s=ownerVec.size();
326 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
327 MPI_Pack(&(ownerVec[0]), s, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
328 s = overlapVec.size();
329 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
330 typedef typename std::set<GI>::iterator Iter;
331 for(Iter i=overlapVec.begin(), end= overlapVec.end(); i != end; ++i)
332 MPI_Pack(
const_cast<GI*
>(&(*i)), 1, MPITraits<GI>::getType(), sendBuf, buffersize, &pos, comm);
335 MPI_Pack(&s, 1, MPITraits<std::size_t>::getType(), sendBuf, buffersize, &pos, comm);
336 typedef typename std::set<int>::iterator IIter;
338 for(IIter i=neighbors.begin(), end= neighbors.end(); i != end; ++i)
339 MPI_Pack(
const_cast<int*
>(&(*i)), 1, MPI_INT, sendBuf, buffersize, &pos, comm);
350 void saveRecvBuf(
char *recvBuf,
int bufferSize, std::vector<std::pair<GI,int> >& ownerVec,
351 std::set<GI>& overlapVec, std::set<int>& neighbors, RedistributeInterface& inf,
int from, MPI_Comm comm) {
355 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
356 inf.reserveSpaceForReceiveInterface(from, size);
357 ownerVec.reserve(ownerVec.size()+size);
358 for(; size!=0; --size) {
360 MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
361 ownerVec.push_back(std::make_pair(gi,from));
364 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
365 typename std::set<GI>::iterator ipos = overlapVec.begin();
366 Dune::dverb <<
"unpacking "<<size<<
" overlap"<<std::endl;
367 for(; size!=0; --size) {
369 MPI_Unpack(recvBuf, bufferSize, &pos, &gi, 1, MPITraits<GI>::getType(), comm);
370 ipos=overlapVec.insert(ipos, gi);
373 MPI_Unpack(recvBuf, bufferSize, &pos, &size, 1, MPITraits<std::size_t>::getType(), comm);
374 Dune::dverb <<
"unpacking "<<size<<
" neighbors"<<std::endl;
375 typename std::set<int>::iterator npos = neighbors.begin();
376 for(; size!=0; --size) {
378 MPI_Unpack(recvBuf, bufferSize, &pos, &n, 1, MPI_INT, comm);
379 npos=neighbors.insert(npos, n);
397 void getDomain(
const MPI_Comm& comm, T *part,
int numOfOwnVtx,
int nparts,
int *myDomain, std::vector<int> &domainMapping) {
399 MPI_Comm_size(comm, &npes);
400 MPI_Comm_rank(comm, &mype);
407 std::vector<int> domain(nparts, 0);
408 std::vector<int> assigned(npes, 0);
410 domainMapping.assign(domainMapping.size(), -1);
413 for (i=0; i<numOfOwnVtx; i++) {
417 std::vector<int> domainMatrix(npes * nparts, -1);
420 int *buf =
new int[nparts];
421 for (i=0; i<nparts; i++) {
423 domainMatrix[mype*nparts+i] = domain[i];
426 int src = (mype-1+npes)%npes;
427 int dest = (mype+1)%npes;
429 for (i=0; i<npes-1; i++) {
430 MPI_Sendrecv_replace(buf, nparts, MPI_INT, dest, 0, src, 0, comm, &status);
432 pe = ((mype-1-i)+npes)%npes;
433 for(j=0; j<nparts; j++) {
435 domainMatrix[pe*nparts+j] = buf[j];
443 int maxOccurance = 0;
445 std::set<std::size_t> unassigned;
447 for(i=0; i<nparts; i++) {
448 for(j=0; j<npes; j++) {
450 if (assigned[j]==0) {
451 if (maxOccurance < domainMatrix[j*nparts+i]) {
452 maxOccurance = domainMatrix[j*nparts+i];
460 domainMapping[i] = pe;
470 unassigned.insert(i);
475 typename std::vector<int>::iterator next_free = assigned.begin();
477 for(
typename std::set<std::size_t>::iterator domain = unassigned.begin(),
478 end = unassigned.end(); domain != end; ++domain)
480 next_free = std::find_if(next_free, assigned.end(), std::bind2nd(std::less<int>(), 1));
481 assert(next_free != assigned.end());
482 domainMapping[*domain] = next_free-assigned.begin();
490 bool operator()(
const T& t1,
const T& t2)
const
508 void mergeVec(std::vector<std::pair<GI, int> >& ownerVec, std::set<GI>& overlapSet) {
510 typedef typename std::vector<std::pair<GI,int> >::const_iterator VIter;
513 if(ownerVec.size()>0)
515 VIter old=ownerVec.begin();
516 for(VIter i=old+1, end=ownerVec.end(); i != end; old=i++)
518 if(i->first==old->first)
520 std::cerr<<
"Value at indes"<<old-ownerVec.begin()<<
" is the same as at index "
521 <<i-ownerVec.begin()<<
" ["<<old->first<<
","<<old->second<<
"]==["
522 <<i->first<<
","<<i->second<<
"]"<<std::endl;
530 typedef typename std::set<GI>::iterator SIter;
531 VIter v=ownerVec.begin(), vend=ownerVec.end();
532 for(SIter s=overlapSet.begin(), send=overlapSet.end(); s!=send;)
534 while(v!=vend && v->first<*s) ++v;
535 if(v!=vend && v->first==*s) {
540 overlapSet.erase(tmp);
560 template<
class OwnerSet,
class Graph,
class IS,
class GI>
561 void getNeighbor(
const Graph& g, std::vector<int>& part,
562 typename Graph::VertexDescriptor vtx,
const IS& indexSet,
563 int toPe, std::set<GI>& neighbor, std::set<int>& neighborProcs) {
564 typedef typename Graph::ConstEdgeIterator Iter;
565 for(Iter edge=g.beginEdges(vtx), end=g.endEdges(vtx); edge!=end; ++edge)
567 const typename IS::IndexPair* pindex = indexSet.pair(edge.target());
569 if(part[pindex->local()]!=toPe || !OwnerSet::contains(pindex->local().attribute()))
572 neighbor.insert(pindex->global());
573 neighborProcs.insert(part[pindex->local()]);
578 template<
class T,
class I>
579 void my_push_back(std::vector<T>& ownerVec,
const I& index,
int proc)
581 DUNE_UNUSED_PARAMETER(proc);
582 ownerVec.push_back(index);
585 template<
class T,
class I>
586 void my_push_back(std::vector<std::pair<T,int> >& ownerVec,
const I& index,
int proc)
588 ownerVec.push_back(std::make_pair(index,proc));
591 void reserve(std::vector<T>&, RedistributeInterface&,
int)
594 void reserve(std::vector<std::pair<T,int> >& ownerVec, RedistributeInterface& redist,
int proc)
596 redist.reserveSpaceForReceiveInterface(proc, ownerVec.size());
617 template<
class OwnerSet,
class G,
class IS,
class T,
class GI>
618 void getOwnerOverlapVec(
const G& graph, std::vector<int>& part, IS& indexSet,
619 int myPe,
int toPe, std::vector<T>& ownerVec, std::set<GI>& overlapSet,
620 RedistributeInterface& redist, std::set<int>& neighborProcs) {
621 DUNE_UNUSED_PARAMETER(myPe);
623 typedef typename IS::const_iterator Iterator;
624 for(Iterator index = indexSet.begin(); index != indexSet.end(); ++index) {
626 if(OwnerSet::contains(index->local().attribute()))
628 if(part[index->local()]==toPe)
630 getNeighbor<OwnerSet>(graph, part, index->local(), indexSet,
631 toPe, overlapSet, neighborProcs);
632 my_push_back(ownerVec, index->global(), toPe);
636 reserve(ownerVec, redist, toPe);
647 template<
class F,
class IS>
648 inline bool isOwner(IS& indexSet,
int index) {
650 const typename IS::IndexPair* pindex=indexSet.pair(index);
653 return F::contains(pindex->local().attribute());
657 class BaseEdgeFunctor
660 BaseEdgeFunctor(idxtype* adj,
const ParmetisDuneIndexMap& data)
661 : i_(), adj_(adj), data_(data)
665 void operator()(
const T& edge)
669 adj_[i_] = data_.toParmetis(edge.target());
680 const ParmetisDuneIndexMap& data_;
685 :
public BaseEdgeFunctor
687 EdgeFunctor(idxtype* adj,
const ParmetisDuneIndexMap& data, std::size_t)
688 : BaseEdgeFunctor(adj, data)
691 idxtype* getWeights()
698 template<
class G,
class V,
class E,
class VM,
class EM>
699 class EdgeFunctor<
Dune::Amg::PropertiesGraph<G,V,E,VM,EM> >
700 :
public BaseEdgeFunctor
703 EdgeFunctor(idxtype* adj,
const ParmetisDuneIndexMap& data, std::size_t s)
704 : BaseEdgeFunctor(adj, data)
706 weight_=
new idxtype[s];
710 void operator()(
const T& edge)
712 weight_[index()]=edge.properties().depends() ? 3 : 1;
713 BaseEdgeFunctor::operator()(edge);
715 idxtype* getWeights()
744 template<
class F,
class G,
class IS,
class EW>
745 void getAdjArrays(G& graph, IS& indexSet, idxtype *xadj,
751 typedef typename G::ConstVertexIterator VertexIterator;
753 typedef typename IS::const_iterator Iterator;
755 VertexIterator vend = graph.end();
758 for(VertexIterator vertex = graph.begin(); vertex != vend; ++vertex) {
759 if (isOwner<F>(indexSet,*vertex)) {
761 typedef typename G::ConstEdgeIterator EdgeIterator;
762 EdgeIterator eend = vertex.end();
763 xadj[j] = ew.index();
765 for(EdgeIterator edge = vertex.begin(); edge != eend; ++edge) {
770 xadj[j] = ew.index();
774 template<
class G,
class T1,
class T2>
778 RedistributeInterface& redistInf,
781#ifndef METIS_VER_MAJOR
785 void METIS_PartGraphKway(
int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt,
786 idxtype *adjwgt,
int *wgtflag,
int *numflag,
int *nparts,
787 int *options,
int *edgecut, idxtype *part);
789 void METIS_PartGraphRecursive(
int *nvtxs, idxtype *xadj, idxtype *adjncy, idxtype *vwgt,
790 idxtype *adjwgt,
int *wgtflag,
int *numflag,
int *nparts,
791 int *options,
int *edgecut, idxtype *part);
796 template<
class S,
class T>
799 for(T *
cur=array, *end=array+
l;
cur!=end; ++
cur)
810 std::cerr <<
"Check graph: xadj["<<
vtx<<
"]="<<
xadj[
vtx]<<
" (>"
811 <<noEdges<<
") out of range!"<<std::endl;
815 std::cerr <<
"Check graph: xadj["<<
vtx+1<<
"]="<<
xadj[
vtx+1]<<
" (>"
816 <<noEdges<<
") out of range!"<<std::endl;
822 std::cerr<<
" Edge "<<
adjncy[i]<<
" out of range ["<<0<<
","<<
noVtx<<
")"
832 for(idxtype j=
xadj[target]; j<
xadj[target+1]; ++j)
836 std::cerr<<
"Edge ("<<target<<
","<<
vtx<<
") "<<i<<
" time"<<std::endl;
845 template<
class M,
class T1,
class T2>
852 if(verbose &&
oocomm.communicator().rank()==0)
853 std::cout<<
"Repartitioning from "<<
oocomm.communicator().size()
854 <<
" to "<<
nparts<<
" parts"<<std::endl;
858 int*
part =
new int[1];
861 idxtype*
part =
new idxtype[1];
873 typedef typename RemoteIndices::const_iterator
888 idxtype *
xadj=
new idxtype[2];
889 idxtype *
vtxdist=
new idxtype[
oocomm.communicator().size()+1];
897 for(
int i=0; i<
oocomm.communicator().size(); ++i)
937 vwgt =
new idxtype[1];
946 if(n->first !=
rank) {
964 for(
int i=0; i<
nparts; ++i)
966 int options[5] ={ 0,1,15,0,0};
983 oocomm.communicator().barrier();
984 if(verbose &&
oocomm.communicator().rank()==0)
985 std::cout<<
"Creating comm graph took "<<
time.elapsed()<<std::endl;
988#ifdef PARALLEL_PARTITION
999 if(verbose &&
oocomm.communicator().rank()==0)
1000 std::cout<<
"ParMETIS took "<<
time.elapsed()<<std::endl;
1011 if(verbose &&
oocomm.communicator().rank()==0)
1012 std::cout<<
"Gathering noedges took "<<
time1.elapsed()<<std::endl;
1037 noxs =
new int[
oocomm.communicator().size()];
1039 novs =
new int[
oocomm.communicator().size()];
1041 for(
int i=0; i <
oocomm.communicator().size(); ++i) {
1075 <<
" gnoedges: "<<
gnoedges<<std::endl;
1077 gpart =
new idxtype[noVertices];
1079 gvwgt =
new idxtype[noVertices];
1085 if(verbose &&
oocomm.communicator().rank()==0)
1086 std::cout<<
"Preparing global graph took "<<
time1.elapsed()<<std::endl;
1104 if(verbose &&
oocomm.communicator().rank()==0)
1105 std::cout<<
"Gathering global graph data took "<<
time1.elapsed()<<std::endl;
1115 idxtype increment =
vtxdist[1];
1116 idxtype *start=
gxadj+1;
1117 for(
int i=1; i<
oocomm.communicator().size(); ++i) {
1123 increment = *(start-1);
1124 std::transform(start+offset, start+
l+offset, start, std::bind2nd(std::plus<idxtype>(), increment));
1138 if(verbose &&
oocomm.communicator().rank()==0)
1139 std::cout<<
"Postprocesing global graph data took "<<
time1.elapsed()<<std::endl;
1146 if(verbose &&
oocomm.communicator().rank()==0)
1147 std::cout<<
"Creating grah one 1 process took "<<
time.elapsed()<<std::endl;
1149 options[0]=0; options[1]=1; options[2]=1; options[3]=3; options[4]=3;
1150#if METIS_VER_MAJOR >= 5
1163 if(verbose &&
oocomm.communicator().rank()==0)
1164 std::cout<<
"METIS took "<<
time.elapsed()<<std::endl;
1210 if(verbose &&
oocomm.communicator().rank()==0)
1211 std::cout<<
"Scattering repartitioning took "<<
time.elapsed()<<std::endl;
1218 if(verbose &&
oocomm.communicator().rank()==0)
1219 std::cout<<
"Filling index set took "<<
time.elapsed()<<std::endl;
1225 /
oocomm.communicator().size();
1226 if(
oocomm.communicator().rank()==0)
1227 std::cout<<
"Average no neighbours was "<<
noNeighbours<<std::endl;
1231 if(verbose &&
oocomm.communicator().rank()==0)
1232 std::cout<<
"Building index sets took "<<
time.elapsed()<<std::endl;
1254 template<
class G,
class T1,
class T2>
1263 oocomm.buildGlobalLookup(graph.noVertices());
1266 if(verbose &&
oocomm.communicator().rank()==0)
1267 std::cout<<
"Filling holes took "<<
time.elapsed()<<std::endl;
1302 typedef typename OOComm::OwnerSet OwnerSet;
1311 std::size_t *
part =
new std::size_t[
indexMap.numOfOwnVtx()];
1313 for(std::size_t i=0; i <
indexMap.numOfOwnVtx(); ++i)
1318 std::cerr<<
"ParMETIS not activated. Will repartition to 1 domain instead of requested "
1319 <<
nparts<<
" domains."<<std::endl;
1327 idxtype *
adjncy =
new idxtype[graph.noEdges()];
1338 for(
int i=0; i<
nparts; ++i)
1356 std::cout<<std::endl;
1357 std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {"
1358 <<options[1]<<
" "<<options[2]<<
"}, Ncon: "
1370 oocomm.communicator().barrier();
1371 if(
oocomm.communicator().rank()==0)
1372 std::cout<<
"Preparing for parmetis took "<<
time.elapsed()<<std::endl;
1392 std::cout<<std::endl;
1393 std::cout<<
"ParMETIS_V3_PartKway reported a cut of "<<
edgecut<<std::endl;
1394 std::cout<<std::endl;
1396 std::cout<<
mype<<
": PARMETIS-Result: ";
1398 std::cout<<
part[i]<<
" ";
1400 std::cout<<std::endl;
1401 std::cout<<
"Testing ParMETIS_V3_PartKway with options[1-2] = {"
1402 <<options[1]<<
" "<<options[2]<<
"}, Ncon: "
1414 oocomm.communicator().barrier();
1415 if(
oocomm.communicator().rank()==0)
1416 std::cout<<
"Parmetis took "<<
time.elapsed()<<std::endl;
1423 for(std::size_t i=0; i<
indexMap.numOfOwnVtx(); ++i)
1441 std::cout<<
mype<<
": DomainMapping: ";
1442 for(
int j=0; j<
nparts; j++) {
1445 std::cout<<std::endl;
1454 typedef typename OOComm::ParallelIndexSet::const_iterator Iterator;
1456 for(Iterator index =
oocomm.indexSet().begin(); index !=
oocomm.indexSet().end(); ++index)
1457 if(OwnerSet::contains(index->local().attribute())) {
1465 if (
oocomm.getSolverCategory() ==
1466 static_cast<int>(SolverCategory::nonoverlapping))
1471 oocomm.communicator().barrier();
1472 if(
oocomm.communicator().rank()==0)
1473 std::cout<<
"Creating indexsets took "<<
time.elapsed()<<std::endl;
1480 template<
class G,
class T1,
class T2>
1488 typedef typename OOComm::OwnerSet OwnerSet;
1528 typedef typename std::vector<int>::const_iterator
VIter;
1538 typedef std::set<int>::const_iterator iterator;
1553 for(
int i=0; i<
npes; ++i)
1560 for(
int i=0; i<
npes; ++i)
1583 std::cout<<
mype<<
": recvFrom: ";
1584 typedef typename std::set<int>::const_iterator
siter;
1590 std::cout<<std::endl<<std::endl;
1591 std::cout<<
mype<<
": sendTo: ";
1593 std::cout<<
sendTo[i]<<
" ";
1595 std::cout<<std::endl<<std::endl;
1599 if(
oocomm.communicator().rank()==0)
1600 std::cout<<
" Communicating the receive information took "<<
1601 time.elapsed()<<std::endl;
1615 typedef typename OOComm::ParallelIndexSet::GlobalIndex
GI;
1667 oocomm.communicator().barrier();
1668 if(
oocomm.communicator().rank()==0)
1669 std::cout<<
" Creating sends took "<<
1670 time.elapsed()<<std::endl;
1707 std::cerr<<
mype<<
": Error in sending :"<<std::endl;
1714 std::cerr<<
" source="<<
statuses[i].MPI_SOURCE<<
" message: ";
1718 std::cerr<<std::endl;
1722 oocomm.communicator().barrier();
1723 if(
oocomm.communicator().rank()==0)
1724 std::cout<<
" Receiving and saving took "<<
1725 time.elapsed()<<std::endl;
1766 typedef typename std::set<int>::const_iterator
IIter;
1769 std::cout<<
oocomm.communicator().rank()<<
" ";
1773 std::cout<<*i<<
"->"<<
newranks[*i]<<
" ";
1776 std::cout<<std::endl;
1787 oocomm.communicator().barrier();
1788 if(
oocomm.communicator().rank()==0)
1789 std::cout<<
" Calculating new neighbours ("<<
tneighbors.size()<<
") took "<<
1790 time.elapsed()<<std::endl;
1802 typedef typename OOComm::ParallelIndexSet::LocalIndex LocalIndex;
1803 typedef typename std::vector<std::pair<GI,int> >::const_iterator
VPIter;
1806 outputIndexSet.add(
g->first,LocalIndex(i, OwnerOverlapCopyAttributeSet::owner,
true));
1811 oocomm.communicator().barrier();
1812 if(
oocomm.communicator().rank()==0)
1813 std::cout<<
" Adding owner indices took "<<
1814 time.elapsed()<<std::endl;
1830 oocomm.communicator().barrier();
1831 if(
oocomm.communicator().rank()==0)
1832 std::cout<<
" Merging indices took "<<
1833 time.elapsed()<<std::endl;
1839 typedef typename std::set<GI>::const_iterator
SIter;
1841 outputIndexSet.add(*
g,LocalIndex(i, OwnerOverlapCopyAttributeSet::copy,
true));
1846#ifdef DUNE_ISTL_WITH_CHECKING
1848 typedef typename OOComm::ParallelIndexSet::const_iterator Iterator;
1850 for(Iterator index =
outputIndexSet.begin(); index != end; ++index) {
1851 if (OwnerSet::contains(index->local().attribute())) {
1866 if(
old->global()>index->global())
1872 oocomm.communicator().barrier();
1873 if(
oocomm.communicator().rank()==0)
1874 std::cout<<
" Adding overlap indices took "<<
1875 time.elapsed()<<std::endl;
1890 oocomm.communicator().barrier();
1891 if(
oocomm.communicator().rank()==0)
1892 std::cout<<
" Storing indexsets took "<<
1893 time.elapsed()<<std::endl;
1900 std::cout<<std::endl
1901 <<
mype<<
": WTime for step 1): "<<
t1
1913 template<
class G,
class P,
class T1,
class T2,
class R>
1914 bool graphRepartition(
const G& graph, P& oocomm,
int nparts,
1919 if(nparts!=oocomm.size())
1920 DUNE_THROW(NotImplemented,
"only available for MPI programs");
1924 template<
class G,
class P,
class T1,
class T2,
class R>
1930 if(nparts!=oocomm.size())
1931 DUNE_THROW(NotImplemented,
"only available for MPI programs");
Classes providing communication interfaces for overlapping Schwarz methods.
int globalOwnerVertices
Definition repartition.hh:167
Provides classes for building the matrix graph.
Matrix & mat
Definition matrixmatrix.hh:343
Definition basearray.hh:19
bool graphRepartition(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, idxtype nparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > *&outcomm, RedistributeInterface &redistInf, bool verbose=false)
execute a graph repartition for a giving graph and indexset.
Definition repartition.hh:1255
bool isValidGraph(std::size_t noVtx, std::size_t gnoVtx, idxtype noEdges, idxtype *xadj, idxtype *adjncy, bool checkSymmetry)
Definition repartition.hh:803
void fillIndexSetHoles(const G &graph, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm)
Fills the holes in an index set.
Definition repartition.hh:58
bool commGraphRepartition(const M &mat, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, idxtype nparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > *&outcomm, RedistributeInterface &redistInf, bool verbose=false)
Definition repartition.hh:846
bool buildCommunication(const G &graph, std::vector< int > &realparts, Dune::OwnerOverlapCopyCommunication< T1, T2 > &oocomm, Dune::OwnerOverlapCopyCommunication< T1, T2 > *&outcomm, RedistributeInterface &redistInf, bool verbose=false)
Definition repartition.hh:1481
void print_carray(S &os, T *array, std::size_t l)
Definition repartition.hh:797
Statistics about compression achieved in implicit mode.
Definition bcrsmatrix.hh:81
derive error class from the base class in common
Definition istlexception.hh:16
@ owner
Definition owneroverlapcopy.hh:60
The (undirected) graph of a matrix.
Definition graph.hh:49
Definition repartition.hh:255
void reserveSpaceForReceiveInterface(int proc, int size)
Definition repartition.hh:282
void buildReceiveInterface(std::vector< std::pair< TG, int > > &indices)
Definition repartition.hh:291
~RedistributeInterface()
Definition repartition.hh:300
void setCommunicator(MPI_Comm comm)
Definition repartition.hh:256
void buildSendInterface(const std::vector< int > &toPart, const IS &idxset)
Definition repartition.hh:261
void addReceiveIndex(int proc, std::size_t idx)
Definition repartition.hh:286