Actual source code: dasub.c

  1: /*$Id: dasub.c,v 1.33 2001/03/23 23:25:00 balay Exp $*/
  2: 
  3: /*
  4:   Code for manipulating distributed regular arrays in parallel.
  5: */

 7:  #include src/dm/da/daimpl.h

  9: #undef __FUNCT__  
 11: /*@C
 12:    DAGetProcessorSubset - Returns a communicator consisting only of the
 13:    processors in a DA that own a particular global x, y, or z grid point
 14:    (corresponding to a logical plane in a 3D grid or a line in a 2D grid).

 16:    Collective on DA

 18:    Input Parameters:
 19: +  da - the distributed array
 20: .  dir - Cartesian direction, either DA_X, DA_Y, or DA_Z
 21: -  gp - global grid point number in this direction

 23:    Output Parameters:
 24: .  comm - new communicator

 26:    Level: advanced

 28:    Notes:
 29:    This routine is particularly useful to compute boundary conditions
 30:    or other application-specific calculations that require manipulating
 31:    sets of data throughout a logical plane of grid points.

 33: .keywords: distributed array, get, processor subset
 34: @*/
 35: int DAGetProcessorSubset(DA da,DADirection dir,int gp,MPI_Comm *comm)
 36: {
 37:   MPI_Group group,subgroup;
 38:   int       ierr,i,ict,flag,size,*ranks,*owners,xs,xm,ys,ym,zs,zm;

 42:   flag = 0;
 43:   DAGetCorners(da,&xs,&xm,&ys,&ym,&zs,&zm);
 44:   MPI_Comm_size(da->comm,&size);
 45:   if (dir == DA_Z) {
 46:     if (da->dim < 3) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Z invalid for DA dim < 3");
 47:     if (gp < 0 || gp > da->P) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 48:     if (gp >= zs && gp < zs+zm) flag = 1;
 49:   } else if (dir == DA_Y) {
 50:     if (da->dim == 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"DA_Y invalid for DA dim = 1");
 51:     if (gp < 0 || gp > da->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 52:     if (gp >= ys && gp < ys+ym) flag = 1;
 53:   } else if (dir == DA_X) {
 54:     if (gp < 0 || gp > da->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"invalid grid point");
 55:     if (gp >= xs && gp < xs+xm) flag = 1;
 56:   } else SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid direction");

 58:   PetscMalloc(2*size*sizeof(int),&owners);
 59:   ranks = owners + size;
 60:   MPI_Allgather(&flag,1,MPI_INT,owners,1,MPI_INT,da->comm);
 61:   ict = 0;
 62:   PetscLogInfo(da,"DAGetProcessorSubset: dim=%d, direction=%d, procs: ",da->dim,(int)dir);
 63:   for (i=0; i<size; i++) {
 64:     if (owners[i]) {
 65:       ranks[ict] = i; ict++;
 66:       PetscLogInfo(da,"%d ",i);
 67:     }
 68:   }
 69:   PetscLogInfo(da,"n");
 70:   MPI_Comm_group(da->comm,&group);
 71:   MPI_Group_incl(group,ict,ranks,&subgroup);
 72:   MPI_Comm_create(da->comm,subgroup,comm);
 73:   PetscFree(owners);
 74:   return(0);
 75: }