parallel/pico/DomainCoupler.cpp

00001 // -*- C++ -*-
00002 //
00003 //  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00004 //
00005 //                                   Fehmi Cirak
00006 //                        California Institute of Technology
00007 //                           (C) 2004 All Rights Reserved
00008 //
00009 //  <LicenseText>
00010 //
00011 //  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
00012 //
00013 // EDITED BY MATT GALLOWAY FOR USE WITH DAPTA
00014 
00015 #include "DAPTA.hpp"
00016 
00017 #include <limits>
00018 #include <cassert>
00019 #include <cstdlib>
00020 
00021 namespace DAPTA { // Define namespace DAPTA
00022 
00023 namespace pico { // Define namespace pico
00024 
00025 template <typename V>
00026 void DomainCoupler<V>::registerSubDomain(const SolvType& type, const CoordArrayType lo, 
00027                                  const CoordArrayType up, const double& maxElemSize) {
00028    VertexType loV(lo);
00029    VertexType upV(up);
00030    
00031    SubDomain<V> *sd = new SubDomain<V>(loV, upV, maxElemSize);
00032    
00033    int rank;
00034    MPI_Comm_rank(_pComm, &rank);
00035    
00036    _subDomains[type][rank] = sd;
00037 }
00038 
00039 template <typename V>
00040 void DomainCoupler<V>::registerSubDomain(const SolvType& type, const CoordType * const coor, 
00041                                const int& nodes, const double& maxElemSize) {
00042    std::numeric_limits<CoordType> doubleLimits;
00043    CoordType max = doubleLimits.max();
00044    CoordType min = doubleLimits.min();
00045    
00046    CoordArrayType lo;
00047    CoordArrayType up;
00048    
00049    for(int i=0; i<V::dimension; ++i) {
00050       lo[i] = max;
00051       up[i] = min;
00052    }
00053    
00054    for(int i=0; i<nodes; ++i) {
00055       for(int j=0; j<V::dimension; ++j) {
00056          lo[j] = std::min(lo[j], coor[V::dimension*i+j]);
00057          up[j] = std::max(up[j], coor[V::dimension*i+j]);
00058       }
00059    }
00060    
00061    registerSubDomain(type, lo, up, maxElemSize);
00062 }
00063 
00064 template <typename V>
00065 void DomainCoupler<V>::gatherSubDomains() {
00066    const int numVarHalf = 8;
00067    const int numVar = 2*numVarHalf;
00068    double myRBuf[numVar];
00069    
00070    int rank;
00071    MPI_Comm_rank(_pComm, &rank);    
00072    
00073    VertexType loV, upV;
00074    for (int i=0; i<2; ++i) {
00075       
00076       _SubDomainIt its, itse = _subDomains[i].end();
00077       its = _subDomains[i].find(rank);
00078       
00079       int start = i*numVarHalf;
00080       
00081       if (its==itse) 
00082          myRBuf[start++] = -1;
00083       else 
00084          myRBuf[start++] = i;
00085       
00086       if (its==itse) continue;
00087       
00088       SubDomain<V> *sd = its->second;
00089       
00090       loV = sd->lowerPoint();
00091       upV = sd->upperPoint();
00092       
00093       for(int j=0; j<V::dimension; j++) {
00094          myRBuf[start++] = loV.coords[j];
00095          myRBuf[start++] = upV.coords[j];
00096       }
00097       myRBuf[start++] = sd->maxElemSize();
00098    }
00099    
00100    int numDomains;
00101    MPI_Comm_size(_pComm, &numDomains);
00102    
00103    double *rBuf = new double[numDomains*numVar];
00104    
00105    MPI_Allgather(myRBuf, numVar, MPI_DOUBLE, rBuf, numVar, MPI_DOUBLE, _pComm); 
00106    
00107    // create the subdomains on other processors
00108    for (int j=0; j<numDomains; ++j) {
00109       
00110       if (j==rank) continue;
00111       
00112       for (int i=0; i<2; ++i) {
00113          int start = j*numVar+i*numVarHalf;
00114          
00115          int domainType = static_cast<int>(rBuf[start++]);
00116          if (domainType == -1 ) continue;
00117          
00118          for(int k=0; k<V::dimension; k++) {
00119             loV.coords[k] = rBuf[start++];
00120             upV.coords[k] = rBuf[start++];
00121          }
00122          
00123          double elemSize = rBuf[start++];
00124          
00125          SubDomain<V> *sd = new SubDomain<V>(loV, upV, elemSize);
00126          
00127          _subDomains[domainType][j] = sd;
00128       }
00129    }
00130    
00131    if (rBuf) delete[] rBuf;
00132 }
00133 
00134 template <typename V>
00135 void DomainCoupler<V>::prepareCommunicationsInter() {
00136    // gather the bounding box data of all subdomains
00137    gatherSubDomains();
00138    
00139    int rank, otherType[2]={1, 0};
00140    MPI_Comm_rank(_pComm, &rank);  
00141    
00142    // check if there are two types of subDomains
00143    _SubDomainIt itt, itb[2], ite[2];
00144    for (int type=0; type<2; ++type) {
00145       itb[type] = _subDomains[type].begin();
00146       ite[type] = _subDomains[type].end();
00147       if (itb[type]==ite[type]) return;
00148    }
00149    
00150    // find all relevant subdomains of opposite type
00151    for (int type=0; type<2; ++type) {  
00152       
00153       itt = _subDomains[type].find(rank);
00154       if (itt==ite[type]) continue;
00155       SubDomain<V>* myDomain = itt->second;
00156       
00157       _SubDomainIt its = itb[otherType[type]], itse = ite[otherType[type]];
00158       
00159       // make my bounding box larger by the requested element overlapsize
00160       // think about it
00161       
00162       for ( ; its!=itse; ++its) {       
00163          SubDomain<V> *sd = its->second;      
00164          if (sd->checkOverlap(myDomain)) {
00165             _relevSubDomainsInter[otherType[type]][its->first] = sd;
00166          }
00167       }  
00168    }
00169 }
00170 
00171 template <typename V>
00172 void DomainCoupler<V>::prepareCommunicationsIntra(const SolvType& type) {
00173    // gather the bounding box data of all subdomains
00174    gatherSubDomains();
00175    
00176    int rank;
00177    MPI_Comm_rank(_pComm, &rank);  
00178    
00179    // check if there is a solver of the type type
00180    _SubDomainIt it, ite=_subDomains[type].end();
00181    
00182    it = _subDomains[type].find(rank);
00183    if (it==ite) return;
00184    SubDomain<V> *myDomain = it->second;
00185    
00186    // make my bounding box larger by the requested element overlapsize
00187    // this might be later neccessary for shells
00188    
00189    it = _subDomains[type].begin();
00190    for ( ; it!=ite; ++it) {
00191       SubDomain<V> *sd = it->second;
00192       if (sd==myDomain) continue;
00193       
00194       if (sd->checkOverlap(myDomain)) {
00195          _relevSubDomainsIntra[type][it->first] = sd;
00196       }
00197    }
00198 }
00199 
00200 template <typename V>
00201 void DomainCoupler<V>::exchangeBufferSizesIntra(const SolvType& type) {
00202    int rank;        
00203    MPI_Comm_rank(_pComm, &rank);  
00204    
00205    _SubDomainIt its, itse = _subDomains[type].end();
00206    its = _subDomains[type].find(rank);
00207    
00208    if (its==itse) return;    
00209    
00210    SubDomain<V>* myDomain = its->second;
00211    
00212    int mySBuf = myDomain->sendMsgSize();
00213    
00214    _SubDomainIt it, itb = _relevSubDomainsIntra[type].begin(), ite = _relevSubDomainsIntra[type].end();
00215    
00216    MPI_Status status;
00217    int tag=0, rBuf=0, myRBuf;
00218    for (it=itb; it!=ite; ++it) {
00219       MPI_Sendrecv(&mySBuf, 1, MPI_INT, it->first, tag, &myRBuf, 1, MPI_INT, it->first, tag, _pComm, &status); 
00220       (it->second)->setSendMsgSize(myRBuf);
00221       rBuf += myRBuf;
00222    }
00223    
00224    _subDomains[type][rank]->allocateReceiveBuffer(rBuf);     
00225    
00226    assert(rBuf>0);
00227 }
00228 
00229 template <typename V>
00230 void DomainCoupler<V>::exchangeSubdomainDataIntra(const SolvType& type) {    
00231    exchangeBufferSizesIntra(type);
00232    
00233    int rank;
00234    MPI_Comm_rank(_pComm, &rank);     
00235    SubDomain<V>* myDomain = _subDomains[type][rank];    
00236    char *recvBuf = myDomain->recvBuffer();
00237    
00238    MPI_Request *request = 
00239       (MPI_Request *) std::malloc(_relevSubDomainsIntra[type].size()*sizeof(MPI_Request));
00240    MPI_Status *status = 
00241       (MPI_Status *) std::malloc(_relevSubDomainsIntra[type].size()*sizeof(MPI_Status));
00242    
00243    _SubDomainIt it, itb = _relevSubDomainsIntra[type].begin();
00244    _SubDomainIt ite = _relevSubDomainsIntra[type].end();
00245    
00246    int i, tag=0, start = 0;
00247    for (i=0, it=itb; it!=ite; ++it, ++i) {
00248       int sbufsize = (it->second)->sendMsgSize();
00249       int number = it->first;
00250       assert(recvBuf != NULL);
00251       
00252       MPI_Irecv((void *) &(recvBuf[start]), sbufsize, (MPI_Datatype) MPI_BYTE, 
00253               number, tag, _pComm, &request[i]);
00254       
00255       start += sbufsize;
00256       assert (start<=myDomain->recvBufferSize());
00257    }
00258    
00259    // make sure that all the receives are posted before sending
00260    // think about defining a new communicator and synchronizing it only
00261    MPI_Barrier(_pComm);
00262    
00263    for (i=0, it=itb; it!=ite; ++it, ++i) {
00264       MPI_Rsend((void *) myDomain->sendBuffer(), myDomain->sendMsgSize(), 
00265               (MPI_Datatype) MPI_BYTE, it->first, tag, _pComm);
00266    }
00267    
00268    if (_relevSubDomainsIntra[type].size()) {
00269       MPI_Waitall(_relevSubDomainsIntra[type].size(), request, status);
00270    }
00271    
00272    if (request) std::free(request);
00273    if (status) std::free(status);
00274 }
00275 
00276 // helper - for debugging purposes
00277 template <typename V>
00278 template <typename Iterator>
00279 void DomainCoupler<V>::relevantSubDomainNumbers(Iterator domainNumbers, int t, char *tch) {
00280    std::string commT(tch);
00281    _SubDomainIt it, ite;
00282    
00283    if (commT== "Inter") {
00284       it = _relevSubDomainsInter[t].begin();
00285       ite = _relevSubDomainsInter[t].end();  
00286    } else if (commT=="Intra") {
00287       it = _relevSubDomainsIntra[t].begin();
00288       ite = _relevSubDomainsIntra[t].end();
00289    } else 
00290       return;
00291    
00292    for ( ; it!=ite; ++it) *(domainNumbers++) = it->first;
00293    
00294    return;
00295 }
00296 
00297 template <typename V>
00298 template <typename T>
00299 void DomainCoupler<V>::registerSendBuffer(const SolvType& type, T* const start, int size) {
00300    int rank;
00301    MPI_Comm_rank(_pComm, &rank);
00302    
00303    _SubDomainIt it, ite=_subDomains[type].end();
00304    
00305    it = _subDomains[type].find(rank);
00306    if (it==ite) return;
00307    
00308    SubDomain<V>* myDomain = it->second;
00309    
00310    size_t unit = sizeof(T);
00311    size *= unit;
00312    
00313    char *startCh = reinterpret_cast<char *>(start);
00314    
00315    myDomain->registerSendBuffer(startCh, size);
00316    
00317    return;
00318 }
00319 
00320 template <typename V>
00321 template <typename RT>
00322 RT* DomainCoupler<V>::recvBuffer(const SolvType& type, const RT& notUsed) {
00323    // notUsed is only "used" by the compiler
00324    char *tmp=NULL;
00325    int rank;
00326    MPI_Comm_rank(_pComm, &rank);
00327    
00328    _SubDomainIt it, ite=_subDomains[type].end();
00329    it = _subDomains[type].find(rank);
00330    if (it==ite) return NULL;
00331    SubDomain<V>* myDomain = it->second;
00332    
00333    tmp = myDomain->recvBuffer();
00334    
00335    RT *tmpRT = reinterpret_cast<RT *>(tmp);
00336    
00337    return tmpRT;
00338 }
00339 
00340 template <typename V>
00341 template <typename T>
00342 int DomainCoupler<V>::recvBufferSize(const SolvType& type) {
00343    int tmp=0, rank;
00344    MPI_Comm_rank(_pComm, &rank);
00345    
00346    _SubDomainIt it, ite=_subDomains[type].end();
00347    it = _subDomains[type].find(rank);
00348    if (it==ite) return tmp;
00349    SubDomain<V>* myDomain = it->second;
00350    
00351    tmp = myDomain->recvBufferSize();
00352    
00353    size_t unit = sizeof(T);
00354    tmp /= unit;
00355    
00356    return tmp;
00357 }
00358    
00359 } // namespace pico
00360 
00361 } // namespace DAPTA

Generated on Tue May 29 17:13:49 2007 for DAPTA by  doxygen 1.5.1