Go to the documentation of this file.
3 #ifndef DUNE_GRID_YASPGRID_TORUS_HH
4 #define DUNE_GRID_YASPGRID_TORUS_HH
17 #include <dune/common/binaryfunctions.hh>
42 template<
class CollectiveCommunication,
int d>
82 for (
int i=0; i<d; i++)
89 if (inc != _comm.size())
90 DUNE_THROW(Dune::Exception,
"Communicator size and result of the given load balancer do not match!");
127 CollectiveCommunication
comm ()
const
141 for (
int i=d-1; i>=0; i--)
142 if (c[i]<0 || c[i]>=_dims[i])
return false;
151 for (
int i=d-1; i>=0; i--)
162 for (
int i=0; i<d; i++)
coord[i] =
coord[i]%_dims[i];
164 for (
int i=0; i<d; i++)
rank +=
coord[i]*_increment[i];
172 coord[dir] = (
coord[dir]+_dims[dir]+cnt)%_dims[dir];
183 for (
int i=0; i<d; i++)
185 if (
coord[i]%2==1) c += power;
190 for (
int i=0; i<d; i++)
192 if (_dims[i]>1 &&
coord[i]==_dims[i]-1) c += power;
209 for (
int i=0; i<d; ++i)
219 for (
int i=0; i<d; i++)
224 if (
coord[i]==0 && periodic[i]==
false)
return false;
229 if (
coord[i]==_dims[i]-1 && periodic[i]==
false)
return false;
249 for (
int i=0; i<d; i++)
252 int m = size_in[i]/_dims[i];
253 int r = size_in[i]%_dims[i];
257 if (
coord[i]<_dims[i]-r)
259 origin_out[i] = origin_in[i] +
coord[i]*m;
265 origin_out[i] = origin_in[i] + (_dims[i]-r)*m + (
coord[i]-(_dims[i]-r))*(m+1);
270 return maxsize/(sz/_comm.size());
310 for (
int j=0; j<d; ++j)
336 typename std::deque<CommPartner>::const_iterator i;
342 return ProcListIterator(_sendlist.begin());
348 return ProcListIterator(_sendlist.end());
354 return ProcListIterator(_recvlist.begin());
360 return ProcListIterator(_recvlist.end());
368 task.buffer = buffer;
370 if (
rank!=_comm.rank())
371 _sendrequests.push_back(task);
373 _localsendrequests.push_back(task);
381 task.buffer = buffer;
383 if (
rank!=_comm.rank())
384 _recvrequests.push_back(task);
386 _localrecvrequests.push_back(task);
393 if (_localsendrequests.size()!=_localrecvrequests.size())
395 std::cout <<
"[" <<
rank() <<
"]: ERROR: local sends/receives do not match in exchange!" << std::endl;
398 for (
unsigned int i=0; i<_localsendrequests.size(); i++)
400 if (_localsendrequests[i].size!=_localrecvrequests[i].size)
402 std::cout <<
"[" <<
rank() <<
"]: ERROR: size in local sends/receive does not match in exchange!" << std::endl;
405 memcpy(_localrecvrequests[i].buffer,_localsendrequests[i].buffer,_localsendrequests[i].size);
407 _localsendrequests.clear();
408 _localrecvrequests.clear();
416 for (
unsigned int i=0; i<_sendrequests.size(); i++)
421 MPI_Isend(_sendrequests[i].buffer, _sendrequests[i].size, MPI_BYTE,
422 _sendrequests[i].
rank, _tag, _comm, &(_sendrequests[i].request));
423 _sendrequests[i].flag =
false;
428 for (
unsigned int i=0; i<_recvrequests.size(); i++)
433 MPI_Irecv(_recvrequests[i].buffer, _recvrequests[i].size, MPI_BYTE,
434 _recvrequests[i].
rank, _tag, _comm, &(_recvrequests[i].request));
435 _recvrequests[i].flag =
false;
442 for (
unsigned int i=0; i<_sendrequests.size(); i++)
443 if (!_sendrequests[i].flag)
446 MPI_Test( &(_sendrequests[i].request), &(_sendrequests[i].flag), &status);
447 if (_sendrequests[i].flag)
458 for (
unsigned int i=0; i<_recvrequests.size(); i++)
459 if (!_recvrequests[i].flag)
462 MPI_Test( &(_recvrequests[i].request), &(_recvrequests[i].flag), &status);
463 if (_recvrequests[i].flag)
473 _sendrequests.clear();
474 _recvrequests.clear();
482 _comm.template allreduce<Dune::Max<double>,
double>(&x, &res, 1);
489 s <<
"[" <<
rank() <<
"]: Torus " <<
procs() <<
" processor(s) arranged as " <<
dims() << std::endl;
492 s <<
"[" <<
rank() <<
"]: send to "
493 <<
"rank=" << i.rank()
494 <<
" index=" << i.index()
495 <<
" delta=" << i.delta() <<
" dist=" << i.distance() << std::endl;
499 s <<
"[" <<
rank() <<
"]: recv from "
500 <<
"rank=" << i.rank()
501 <<
" index=" << i.index()
502 <<
" delta=" << i.delta() <<
" dist=" << i.distance() << std::endl;
514 std::fill(delta.begin(), delta.end(), -1);
523 for (
int i=0; i<d; i++)
524 nb[i] = ( me[i]+_dims[i]+delta[i] ) % _dims[i];
530 for (
int i=0; i<d; i++)
536 _recvlist.push_back(cp);
537 cp.index = last-index;
538 _sendlist.push_front(cp);
545 for (
int i=0; i<d; i++)
560 CollectiveCommunication _comm;
565 std::deque<CommPartner> _sendlist;
566 std::deque<CommPartner> _recvlist;
568 mutable std::vector<CommTask> _sendrequests;
569 mutable std::vector<CommTask> _recvrequests;
570 mutable std::vector<CommTask> _localsendrequests;
571 mutable std::vector<CommTask> _localrecvrequests;
576 template <
class CollectiveCommunication,
int d>
int color(int rank) const
assign color to given rank
Definition: torus.hh:200
int color(const iTupel &coord) const
assign color to given coordinate
Definition: torus.hh:177
ProcListIterator(typename std::deque< CommPartner >::const_iterator iter)
make an iterator
Definition: torus.hh:282
iTupel coord() const
return own coordinates
Definition: torus.hh:103
Include standard header files.
Definition: agrid.hh:59
bool inside(iTupel c) const
return true if coordinate is inside torus
Definition: torus.hh:139
int neighbors() const
return the number of neighbors, which is
Definition: torus.hh:206
const iTupel & dims() const
return dimensions of torus
Definition: torus.hh:115
int tag() const
return tag used by torus
Definition: torus.hh:133
CollectiveCommunication comm() const
return communicator
Definition: torus.hh:127
ProcListIterator sendend() const
end of send list
Definition: torus.hh:346
int rank() const
return own rank
Definition: torus.hh:97
void send(int rank, void *buffer, int size) const
store a send request; buffers are sent in order; handles also local requests with memcpy
Definition: torus.hh:364
ProcListIterator & operator++()
Increment iterator to next cell.
Definition: torus.hh:329
Torus(CollectiveCommunication comm, int tag, iTupel size, const YLoadBalance< d > *lb)
make partitioner from communicator and coarse mesh size
Definition: torus.hh:74
a base class for the yaspgrid partitioning strategy The name might be irritating. It will probably ch...
Definition: partitioning.hh:23
ProcListIterator recvend() const
last process in receive list
Definition: torus.hh:358
void abs(const DofVectorPointer< int > &dofVector)
Definition: dofvector.hh:326
std::ostream & operator<<(std::ostream &out, const PartitionType &type)
write a PartitionType to a stream
Definition: gridenums.hh:70
iTupel rank_to_coord(int rank) const
map rank to coordinate in torus using lexicographic ordering
Definition: torus.hh:147
int procs() const
return number of processes
Definition: torus.hh:109
iTupel delta() const
return distance vector
Definition: torus.hh:294
int coord_to_rank(iTupel coord) const
map coordinate in torus to rank using lexicographic ordering
Definition: torus.hh:160
int index() const
return index in proclist
Definition: torus.hh:300
This file provides tools to partition YaspGrids. If you want to write your own partitioner,...
int rank() const
return rank of neighboring process
Definition: torus.hh:288
int rank_relative(int rank, int dir, int cnt) const
return rank of process where its coordinate in direction dir has offset cnt (handles periodic case)
Definition: torus.hh:169
ProcListIterator sendbegin() const
first process in send list
Definition: torus.hh:340
bool operator!=(const ProcListIterator &iter)
Return true when two iterators do not point to same member.
Definition: torus.hh:323
bool operator==(const ProcListIterator &iter)
Return true when two iterators point to same member.
Definition: torus.hh:316
bool is_neighbor(iTupel delta, std::bitset< d > periodic) const
return true if neighbor with given delta is a neighbor under the given periodicity
Definition: torus.hh:215
int distance() const
return 1-norm of distance vector
Definition: torus.hh:306
double global_max(double x) const
global max
Definition: torus.hh:479
Torus()
constructor making uninitialized object
Definition: torus.hh:70
std::array< int, d > iTupel
type used to pass tupels in and out
Definition: torus.hh:46
ProcListIterator recvbegin() const
first process in receive list
Definition: torus.hh:352
void print(std::ostream &s) const
print contents of torus object
Definition: torus.hh:487
void exchange() const
exchange messages stored in request buffers; clear request buffers afterwards
Definition: torus.hh:390
void recv(int rank, void *buffer, int size) const
store a receive request; buffers are received in order; handles also local requests with memcpy
Definition: torus.hh:377
virtual void loadbalance(const iTupel &, int, iTupel &) const =0
int dims(int i) const
return dimensions of torus in direction i
Definition: torus.hh:121
double partition(int rank, iTupel origin_in, iTupel size_in, iTupel &origin_out, iTupel &size_out) const
partition the given grid onto the torus and return the piece of the process with given rank; returns ...
Definition: torus.hh:242