Actual source code: mpimesg.c

  1: /*$Id: mpimesg.c,v 1.14 2001/08/07 03:02:06 balay Exp $*/

 3:  #include petsc.h


  6: #undef __FUNCT__  
  8: /*@C
  9:   PetscGatherNumberOfMessages -  Computes the number of messages a node expects to receive

 11:   Collective on MPI_Comm

 13:   Input Parameters:
 14: + comm     - Communicator
 15: . iflags   - an array of integers of length sizeof(comm). A '1' in ilengths[i] represent a 
 16:              message from current node to ith node. Optionally PETSC_NULL
 17: - ilengths - Non zero ilengths[i] represent a message to i of length ilengths[i].
 18:              Optionally PETSC_NULL.

 20:   Output Parameters:
 21: . nrecvs    - number of messages received

 23:   Level: developer

 25:   Concepts: mpi utility

 27:   Notes:
 28:   With this info, the correct message lengths can be determined using
 29:   PetscGatherMessageLengths()

 31:   Either iflags or ilengths should be provided.  If iflags is not
 32:   provided (PETSC_NULL) it can be computed from ilengths. If iflags is
 33:   provided, ilengths is not required.

 35: .seealso: PetscGatherMessageLengths()
 36: @*/
 37: int PetscGatherNumberOfMessages(MPI_Comm comm,int *iflags,int *ilengths,int *nrecvs)
 38: {
 39:   int *recv_buf,size,rank,i,ierr,*iflags_local;


 43:   MPI_Comm_size(comm,&size);
 44:   MPI_Comm_rank(comm,&rank);


 47:   /* If iflags not provided, compute iflags from ilengths */
 48:   if (!iflags) {
 49:     if (!ilengths) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Either iflags or ilengths should be provided");
 50:     PetscMalloc(size*sizeof(int),&iflags_local);
 51:     for (i=0; i<size; i++) {
 52:       if (ilengths[i])  iflags_local[i] = 1;
 53:       else iflags_local[i] = 0;
 54:     }
 55:   } else {
 56:     iflags_local = iflags;
 57:   }

 59:   PetscMalloc(size*sizeof(int),&recv_buf);

 61:   /* Now post an allreduce to determine the numer of messages the current node will receive */
 62:   ierr    = MPI_Allreduce(iflags_local,recv_buf,size,MPI_INT,MPI_SUM,comm);
 63:   *nrecvs = recv_buf[rank];

 65:   if (!iflags) {
 66:     PetscFree(iflags_local);
 67:   }
 68:   PetscFree(recv_buf);
 69: 
 70:   return(0);
 71: }


 74: #undef __FUNCT__  
 76: /*@C
 77:   PetscGatherMessageLengths - Computes info about messages that a MPI-node will receive, 
 78:   including (from-id,length) pairs for each message.

 80:   Collective on MPI_Comm

 82:   Input Parameters:
 83: + comm      - Communicator
 84: . nsends    - number of messages that are to be sent.
 85: . nrecvs    - number of messages being received
 86: - ilengths  - an array of integers of length sizeof(comm)
 87:               a non zero ilengths[i] represent a message to i of length ilengths[i] 


 90:   Output Parameters:
 91: + onodes    - list of node-ids from which messages are expected
 92: - olengths  - corresponding message lengths

 94:   Level: developer

 96:   Concepts: mpi utility

 98:   Notes:
 99:   With this info, the correct MPI_Irecv() can be posted with the correct
100:   from-id, with a buffer with the right amount of memory required.

102:   The calling function deallocates the memory in onodes and olengths

104:   To determine nrecevs, one can use PetscGatherNumberOfMessages()

106: .seealso: PetscGatherNumberOfMessages()
107: @*/
108: int PetscGatherMessageLengths(MPI_Comm comm,int nsends,int nrecvs,int *ilengths,int **onodes,int **olengths)
109: {
110:   int         size,i,j,tag,ierr;
111:   MPI_Request *s_waits,*r_waits;
112:   MPI_Status  *w_status;


116:   MPI_Comm_size(comm,&size);
117:   PetscCommGetNewTag(comm,&tag);

119:   PetscMalloc((nrecvs+nsends+1)*sizeof(MPI_Request),&r_waits);
120:   s_waits = r_waits + nrecvs;

122:   /* Now post the Irecv to get the message length-info */
123:   PetscMalloc((nrecvs+1)*sizeof(int),olengths);
124:   for (i=0; i<nrecvs; i++) {
125:     MPI_Irecv((*olengths)+i,1,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits+i);
126:   }

128:   /* Now post the Isends with the message lenght-info */
129:   for (i=0,j=0; i<size; ++i) {
130:     if (ilengths[i]) {
131:       MPI_Isend(ilengths+i,1,MPI_INT,i,tag,comm,s_waits+j);
132:       j++;
133:     }
134:   }
135: 
136:   /* Now post waits on sends and receivs */
137:   PetscMalloc((nrecvs+nsends+1)*sizeof(MPI_Status),&w_status);
138:   MPI_Waitall(nrecvs+nsends,r_waits,w_status);

140: 
141:   /* Now pack up the received data */
142:   PetscMalloc((nrecvs+1)*sizeof(int),onodes);
143:   for (i=0; i<nrecvs; ++i) {
144:     (*onodes)[i] = w_status[i].MPI_SOURCE;
145:   }

147:   PetscFree(r_waits);
148:   PetscFree(w_status);
149: 
150:   return(0);
151: }

153: /*

155:   Allocate a bufffer sufficient to hold messages of size specified in olengths.
156:   And post Irecvs on these buffers using node info from onodes
157:   
158:  */
159: #undef __FUNCT__  
161: int PetscPostIrecvInt(MPI_Comm comm,int tag,int nrecvs,int *onodes,int *olengths,int ***rbuf,MPI_Request **r_waits)
162: {
163:   int         len=0,**rbuf_t,i,ierr;
164:   MPI_Request *r_waits_t;


168:   /* compute memory required for recv buffers */
169:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */
170:   len *= sizeof(int);
171:   len += (nrecvs+1)*sizeof(int*); /* Array of pointers for each message */

173:   /* allocate memory for recv buffers */
174:   ierr    = PetscMalloc(len,&rbuf_t);
175:   rbuf_t[0] = (int*)(rbuf_t + nrecvs);
176:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

178:   /* Post the receives */
179:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&r_waits_t);
180:   for (i=0; i<nrecvs; ++i) {
181:     MPI_Irecv(rbuf_t[i],olengths[i],MPI_INT,onodes[i],tag,comm,r_waits_t+i);
182:   }

184:   *rbuf    = rbuf_t;
185:   *r_waits = r_waits_t;
186:   return(0);
187: }

189: #undef __FUNCT__  
191: int PetscPostIrecvScalar(MPI_Comm comm,int tag,int nrecvs,int *onodes,int *olengths,PetscScalar ***rbuf,MPI_Request **r_waits)
192: {
193:   int         len=0,i,ierr;
194:   PetscScalar **rbuf_t;
195:   MPI_Request *r_waits_t;


199:   /* compute memory required for recv buffers */
200:   for (i=0; i<nrecvs; i++) len += olengths[i];  /* each message length */
201:   len *= sizeof(PetscScalar);
202:   len += (nrecvs+1)*sizeof(PetscScalar*); /* Array of pointers for each message */


205:   /* allocate memory for recv buffers */
206:   ierr    = PetscMalloc(len,&rbuf_t);
207:   rbuf_t[0] = (PetscScalar*)(rbuf_t + nrecvs);
208:   for (i=1; i<nrecvs; ++i) rbuf_t[i] = rbuf_t[i-1] + olengths[i-1];

210:   /* Post the receives */
211:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&r_waits_t);
212:   for (i=0; i<nrecvs; ++i) {
213:     MPI_Irecv(rbuf_t[i],olengths[i],MPIU_SCALAR,onodes[i],tag,comm,r_waits_t+i);
214:   }

216:   *rbuf    = rbuf_t;
217:   *r_waits = r_waits_t;
218:   return(0);
219: }