Actual source code: isltog.c

  1: /*$Id: isltog.c,v 1.65 2001/05/21 14:16:29 bsmith Exp $*/

 3:  #include petscsys.h
 4:  #include src/vec/is/isimpl.h

  6: EXTERN int VecInitializePackage(char *);

  8: #undef __FUNCT__  
 10: /*@C
 11:     ISLocalToGlobalMappingGetSize - Gets the local size of a local to global mapping.

 13:     Not Collective

 15:     Input Parameter:
 16: .   ltog - local to global mapping

 18:     Output Parameter:
 19: .   n - the number of entries in the local mapping

 21:     Level: advanced

 23:     Concepts: mapping^local to global

 25: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 26: @*/
 27: int ISLocalToGlobalMappingGetSize(ISLocalToGlobalMapping mapping,int *n)
 28: {
 31:   *n = mapping->n;
 32:   return(0);
 33: }

 35: #undef __FUNCT__  
 37: /*@C
 38:     ISLocalToGlobalMappingView - View a local to global mapping

 40:     Not Collective

 42:     Input Parameters:
 43: +   ltog - local to global mapping
 44: -   viewer - viewer

 46:     Level: advanced

 48:     Concepts: mapping^local to global

 50: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 51: @*/
 52: int ISLocalToGlobalMappingView(ISLocalToGlobalMapping mapping,PetscViewer viewer)
 53: {
 54:   int        i,ierr,rank;
 55:   PetscTruth isascii;

 59:   if (!viewer) viewer = PETSC_VIEWER_STDOUT_(mapping->comm);

 62:   MPI_Comm_rank(mapping->comm,&rank);
 63:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
 64:   if (isascii) {
 65:     for (i=0; i<mapping->n; i++) {
 66:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] %d %dn",rank,i,mapping->indices[i]);
 67:     }
 68:     PetscViewerFlush(viewer);
 69:   } else {
 70:     SETERRQ1(1,"Viewer type %s not supported for ISLocalToGlobalMapping",((PetscObject)viewer)->type_name);
 71:   }

 73:   return(0);
 74: }

 76: #undef __FUNCT__  
 78: /*@C
 79:     ISLocalToGlobalMappingCreateIS - Creates a mapping between a local (0 to n)
 80:     ordering and a global parallel ordering.

 82:     Not collective

 84:     Input Parameter:
 85: .   is - index set containing the global numbers for each local

 87:     Output Parameter:
 88: .   mapping - new mapping data structure

 90:     Level: advanced

 92:     Concepts: mapping^local to global

 94: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate()
 95: @*/
 96: int ISLocalToGlobalMappingCreateIS(IS is,ISLocalToGlobalMapping *mapping)
 97: {
 98:   int      n,*indices,ierr;
 99:   MPI_Comm comm;


104:   PetscObjectGetComm((PetscObject)is,&comm);
105:   ISGetLocalSize(is,&n);
106:   ISGetIndices(is,&indices);
107:   ISLocalToGlobalMappingCreate(comm,n,indices,mapping);
108:   ISRestoreIndices(is,&indices);

110:   return(0);
111: }


114: #undef __FUNCT__  
116: /*@C
117:     ISLocalToGlobalMappingCreate - Creates a mapping between a local (0 to n)
118:     ordering and a global parallel ordering.

120:     Not Collective, but communicator may have more than one process

122:     Input Parameters:
123: +   comm - MPI communicator
124: .   n - the number of local elements
125: -   indices - the global index for each local element

127:     Output Parameter:
128: .   mapping - new mapping data structure

130:     Level: advanced

132:     Concepts: mapping^local to global

134: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreateNC()
135: @*/
136: int ISLocalToGlobalMappingCreate(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
137: {
138:   int *in,ierr;

143:   PetscMalloc((n+1)*sizeof(int),&in);
144:   PetscMemcpy(in,indices,n*sizeof(int));
145:   ISLocalToGlobalMappingCreateNC(cm,n,in,mapping);
146:   return(0);
147: }

149: #undef __FUNCT__  
151: /*@C
152:     ISLocalToGlobalMappingCreateNC - Creates a mapping between a local (0 to n)
153:     ordering and a global parallel ordering.

155:     Not Collective, but communicator may have more than one process

157:     Input Parameters:
158: +   comm - MPI communicator
159: .   n - the number of local elements
160: -   indices - the global index for each local element

162:     Output Parameter:
163: .   mapping - new mapping data structure

165:     Level: developer

167:     Notes: Does not copy the indices, just keeps the pointer to the indices. The ISLocalToGlobalMappingDestroy()
168:     will free the space so it must be obtained with PetscMalloc() and it must not be freed elsewhere.

170:     Concepts: mapping^local to global

172: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate()
173: @*/
174: int ISLocalToGlobalMappingCreateNC(MPI_Comm cm,int n,const int indices[],ISLocalToGlobalMapping *mapping)
175: {
179:   *mapping = PETSC_NULL;
180: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
181:   {int VecInitializePackage(PETSC_NULL);                                                                }
182: #endif

184:   PetscHeaderCreate(*mapping,_p_ISLocalToGlobalMapping,int,IS_LTOGM_COOKIE,0,"ISLocalToGlobalMapping",
185:                     cm,ISLocalToGlobalMappingDestroy,ISLocalToGlobalMappingView);
186:   PetscLogObjectCreate(*mapping);
187:   PetscLogObjectMemory(*mapping,sizeof(struct _p_ISLocalToGlobalMapping)+n*sizeof(int));

189:   (*mapping)->n       = n;
190:   (*mapping)->indices = (int*)indices;

192:   /*
193:       Do not create the global to local mapping. This is only created if 
194:      ISGlobalToLocalMapping() is called 
195:   */
196:   (*mapping)->globals = 0;
197:   return(0);
198: }

200: #undef __FUNCT__  
202: /*@C
203:     ISLocalToGlobalMappingBlock - Creates a blocked index version of an 
204:        ISLocalToGlobalMapping that is appropriate for MatSetLocalToGlobalMappingBlock()
205:        and VecSetLocalToGlobalMappingBlock().

207:     Not Collective, but communicator may have more than one process

209:     Input Parameters:
210: +    inmap - original point-wise mapping
211: -    bs - block size

213:     Output Parameter:
214: .   outmap - block based mapping

216:     Level: advanced

218:     Concepts: mapping^local to global

220: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreate(), ISLocalToGlobalMappingCreateIS()
221: @*/
222: int ISLocalToGlobalMappingBlock(ISLocalToGlobalMapping inmap,int bs,ISLocalToGlobalMapping *outmap)
223: {
224:   int ierr,*ii,i,n;


228:   if (bs > 1) {
229:     n    = inmap->n/bs;
230:     PetscMalloc(n*sizeof(int),&ii);
231:     for (i=0; i<n; i++) {
232:       ii[i] = inmap->indices[bs*i]/bs;
233:     }
234:     ISLocalToGlobalMappingCreate(inmap->comm,n,ii,outmap);
235:     PetscFree(ii);
236:   } else {
237:     *outmap = inmap;
238:     ierr    = PetscObjectReference((PetscObject)inmap);
239:   }
240:   return(0);
241: }
242: 
243: #undef __FUNCT__  
245: /*@
246:    ISLocalToGlobalMappingDestroy - Destroys a mapping between a local (0 to n)
247:    ordering and a global parallel ordering.

249:    Note Collective

251:    Input Parameters:
252: .  mapping - mapping data structure

254:    Level: advanced

256: .seealso: ISLocalToGlobalMappingCreate()
257: @*/
258: int ISLocalToGlobalMappingDestroy(ISLocalToGlobalMapping mapping)
259: {
263:   if (--mapping->refct > 0) return(0);
264:   if (mapping->refct < 0) {
265:     SETERRQ(1,"Mapping already destroyed");
266:   }

268:   PetscFree(mapping->indices);
269:   if (mapping->globals) {PetscFree(mapping->globals);}
270:   PetscLogObjectDestroy(mapping);
271:   PetscHeaderDestroy(mapping);
272:   return(0);
273: }
274: 
275: #undef __FUNCT__  
277: /*@
278:     ISLocalToGlobalMappingApplyIS - Creates from an IS in the local numbering
279:     a new index set using the global numbering defined in an ISLocalToGlobalMapping
280:     context.

282:     Not collective

284:     Input Parameters:
285: +   mapping - mapping between local and global numbering
286: -   is - index set in local numbering

288:     Output Parameters:
289: .   newis - index set in global numbering

291:     Level: advanced

293:     Concepts: mapping^local to global

295: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
296:           ISLocalToGlobalMappingDestroy(), ISGlobalToLocalMappingApply()
297: @*/
298: int ISLocalToGlobalMappingApplyIS(ISLocalToGlobalMapping mapping,IS is,IS *newis)
299: {
300:   int ierr,n,i,*idxin,*idxmap,*idxout,Nmax = mapping->n;


307:   ierr   = ISGetLocalSize(is,&n);
308:   ierr   = ISGetIndices(is,&idxin);
309:   idxmap = mapping->indices;
310: 
311:   PetscMalloc((n+1)*sizeof(int),&idxout);
312:   for (i=0; i<n; i++) {
313:     if (idxin[i] >= Nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Local index %d too large %d (max) at %d",idxin[i],Nmax,i);
314:     idxout[i] = idxmap[idxin[i]];
315:   }
316:   ISRestoreIndices(is,&idxin);
317:   ISCreateGeneral(PETSC_COMM_SELF,n,idxout,newis);
318:   PetscFree(idxout);
319:   return(0);
320: }

322: /*MC
323:    ISLocalToGlobalMappingApply - Takes a list of integers in a local numbering
324:    and converts them to the global numbering.

326:    Not collective

328:    Input Parameters:
329: +  mapping - the local to global mapping context
330: .  N - number of integers
331: -  in - input indices in local numbering

333:    Output Parameter:
334: .  out - indices in global numbering

336:    Synopsis:
337:    int ISLocalToGlobalMappingApply(ISLocalToGlobalMapping mapping,int N,int in[],int out[])

339:    Notes: 
340:    The in and out array parameters may be identical.

342:    Level: advanced

344: .seealso: ISLocalToGlobalMappingCreate(),ISLocalToGlobalMappingDestroy(), 
345:           ISLocalToGlobalMappingApplyIS(),AOCreateBasic(),AOApplicationToPetsc(),
346:           AOPetscToApplication(), ISGlobalToLocalMappingApply()

348:     Concepts: mapping^local to global

350: M*/

352: /* -----------------------------------------------------------------------------------------*/

354: #undef __FUNCT__  
356: /*
357:     Creates the global fields in the ISLocalToGlobalMapping structure
358: */
359: static int ISGlobalToLocalMappingSetUp_Private(ISLocalToGlobalMapping mapping)
360: {
361:   int ierr,i,*idx = mapping->indices,n = mapping->n,end,start,*globals;

364:   end   = 0;
365:   start = 100000000;

367:   for (i=0; i<n; i++) {
368:     if (idx[i] < 0) continue;
369:     if (idx[i] < start) start = idx[i];
370:     if (idx[i] > end)   end   = idx[i];
371:   }
372:   if (start > end) {start = 0; end = -1;}
373:   mapping->globalstart = start;
374:   mapping->globalend   = end;

376:   ierr             = PetscMalloc((end-start+2)*sizeof(int),&globals);
377:   mapping->globals = globals;
378:   for (i=0; i<end-start+1; i++) {
379:     globals[i] = -1;
380:   }
381:   for (i=0; i<n; i++) {
382:     if (idx[i] < 0) continue;
383:     globals[idx[i] - start] = i;
384:   }

386:   PetscLogObjectMemory(mapping,(end-start+1)*sizeof(int));
387:   return(0);
388: }

390: #undef __FUNCT__  
392: /*@
393:     ISGlobalToLocalMappingApply - Provides the local numbering for a list of integers
394:     specified with a global numbering.

396:     Not collective

398:     Input Parameters:
399: +   mapping - mapping between local and global numbering
400: .   type - IS_GTOLM_MASK - replaces global indices with no local value with -1
401:            IS_GTOLM_DROP - drops the indices with no local value from the output list
402: .   n - number of global indices to map
403: -   idx - global indices to map

405:     Output Parameters:
406: +   nout - number of indices in output array (if type == IS_GTOLM_MASK then nout = n)
407: -   idxout - local index of each global index, one must pass in an array long enough 
408:              to hold all the indices. You can call ISGlobalToLocalMappingApply() with 
409:              idxout == PETSC_NULL to determine the required length (returned in nout)
410:              and then allocate the required space and call ISGlobalToLocalMappingApply()
411:              a second time to set the values.

413:     Notes:
414:     Either nout or idxout may be PETSC_NULL. idx and idxout may be identical.

416:     This is not scalable in memory usage. Each processor requires O(Nglobal) size 
417:     array to compute these.

419:     Level: advanced

421:     Concepts: mapping^global to local

423: .seealso: ISLocalToGlobalMappingApply(), ISLocalToGlobalMappingCreate(),
424:           ISLocalToGlobalMappingDestroy()
425: @*/
426: int ISGlobalToLocalMappingApply(ISLocalToGlobalMapping mapping,ISGlobalToLocalMappingType type,
427:                                   int n,const int idx[],int *nout,int idxout[])
428: {
429:   int i,ierr,*globals,nf = 0,tmp,start,end;

432:   if (!mapping->globals) {
433:     ISGlobalToLocalMappingSetUp_Private(mapping);
434:   }
435:   globals = mapping->globals;
436:   start   = mapping->globalstart;
437:   end     = mapping->globalend;

439:   if (type == IS_GTOLM_MASK) {
440:     if (idxout) {
441:       for (i=0; i<n; i++) {
442:         if (idx[i] < 0) idxout[i] = idx[i];
443:         else if (idx[i] < start) idxout[i] = -1;
444:         else if (idx[i] > end)   idxout[i] = -1;
445:         else                     idxout[i] = globals[idx[i] - start];
446:       }
447:     }
448:     if (nout) *nout = n;
449:   } else {
450:     if (idxout) {
451:       for (i=0; i<n; i++) {
452:         if (idx[i] < 0) continue;
453:         if (idx[i] < start) continue;
454:         if (idx[i] > end) continue;
455:         tmp = globals[idx[i] - start];
456:         if (tmp < 0) continue;
457:         idxout[nf++] = tmp;
458:       }
459:     } else {
460:       for (i=0; i<n; i++) {
461:         if (idx[i] < 0) continue;
462:         if (idx[i] < start) continue;
463:         if (idx[i] > end) continue;
464:         tmp = globals[idx[i] - start];
465:         if (tmp < 0) continue;
466:         nf++;
467:       }
468:     }
469:     if (nout) *nout = nf;
470:   }

472:   return(0);
473: }

475: #undef __FUNCT__  
477: /*@C
478:     ISLocalToGlobalMappingGetInfo - Gets the neighbor information for each processor and 
479:      each index shared by more than one processor 

481:     Collective on ISLocalToGlobalMapping

483:     Input Parameters:
484: .   mapping - the mapping from local to global indexing

486:     Output Parameter:
487: +   nproc - number of processors that are connected to this one
488: .   proc - neighboring processors
489: .   numproc - number of indices for each subdomain (processor)
490: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

492:     Level: advanced

494:     Concepts: mapping^local to global

496: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
497:           ISLocalToGlobalMappingRestoreInfo()
498: @*/
499: int ISLocalToGlobalMappingGetInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
500: {
501:   int         i,n = mapping->n,ierr,Ng,ng,max = 0,*lindices = mapping->indices;
502:   int         size,rank,*nprocs,*owner,nsends,*sends,j,*starts,nmax,nrecvs,*recvs,proc;
503:   int         tag1,tag2,tag3,cnt,*len,*source,imdex,scale,*ownedsenders,*nownedsenders,rstart,nowned;
504:   int         node,nownedm,nt,*sends2,nsends2,*starts2,*lens2,*dest,nrecvs2,*starts3,*recvs2,k,*bprocs,*tmp;
505:   int         first_procs,first_numprocs,*first_indices;
506:   MPI_Request *recv_waits,*send_waits;
507:   MPI_Status  recv_status,*send_status,*recv_statuses;
508:   MPI_Comm    comm = mapping->comm;
509:   PetscTruth  debug = PETSC_FALSE;

512:   ierr   = MPI_Comm_size(comm,&size);
513:   ierr   = MPI_Comm_rank(comm,&rank);
514:   if (size == 1) {
515:     *nproc         = 0;
516:     *procs         = PETSC_NULL;
517:     ierr           = PetscMalloc(sizeof(int),numprocs);
518:     (*numprocs)[0] = 0;
519:     ierr           = PetscMalloc(sizeof(int*),indices);
520:     (*indices)[0]  = PETSC_NULL;
521:     return(0);
522:   }

524:   PetscOptionsHasName(PETSC_NULL,"-islocaltoglobalmappinggetinfo_debug",&debug);

526:   /*
527:     Notes on ISLocalToGlobalMappingGetInfo

529:     globally owned node - the nodes that have been assigned to this processor in global
530:            numbering, just for this routine.

532:     nontrivial globally owned node - node assigned to this processor that is on a subdomain
533:            boundary (i.e. is has more than one local owner)

535:     locally owned node - node that exists on this processors subdomain

537:     nontrivial locally owned node - node that is not in the interior (i.e. has more than one
538:            local subdomain
539:   */
540:   PetscObjectGetNewTag((PetscObject)mapping,&tag1);
541:   PetscObjectGetNewTag((PetscObject)mapping,&tag2);
542:   PetscObjectGetNewTag((PetscObject)mapping,&tag3);

544:   for (i=0; i<n; i++) {
545:     if (lindices[i] > max) max = lindices[i];
546:   }
547:   ierr   = MPI_Allreduce(&max,&Ng,1,MPI_INT,MPI_MAX,comm);
548:   Ng++;
549:   ierr   = MPI_Comm_size(comm,&size);
550:   ierr   = MPI_Comm_rank(comm,&rank);
551:   scale  = Ng/size + 1;
552:   ng     = scale; if (rank == size-1) ng = Ng - scale*(size-1); ng = PetscMax(1,ng);
553:   rstart = scale*rank;

555:   /* determine ownership ranges of global indices */
556:   PetscMalloc((2*size+1)*sizeof(int),&nprocs);
557:   PetscMemzero(nprocs,2*size*sizeof(int));

559:   /* determine owners of each local node  */
560:   PetscMalloc((n+1)*sizeof(int),&owner);
561:   for (i=0; i<n; i++) {
562:     proc             = lindices[i]/scale; /* processor that globally owns this index */
563:     nprocs[2*proc+1] = 1;                 /* processor globally owns at least one of ours */
564:     owner[i]         = proc;
565:     nprocs[2*proc]++;                     /* count of how many that processor globally owns of ours */
566:   }
567:   nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1];
568:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of global owners for my local data %dn",nsends);

570:   /* inform other processors of number of messages and max length*/
571:   PetscMaxSum(comm,nprocs,&nmax,&nrecvs);
572:   PetscLogInfo(0,"ISLocalToGlobalMappingGetInfo: Number of local owners for my global data %dn",nrecvs);

574:   /* post receives for owned rows */
575:   PetscMalloc((2*nrecvs+1)*(nmax+1)*sizeof(int),&recvs);
576:   PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);
577:   for (i=0; i<nrecvs; i++) {
578:     MPI_Irecv(recvs+2*nmax*i,2*nmax,MPI_INT,MPI_ANY_SOURCE,tag1,comm,recv_waits+i);
579:   }

581:   /* pack messages containing lists of local nodes to owners */
582:   ierr       = PetscMalloc((2*n+1)*sizeof(int),&sends);
583:   ierr       = PetscMalloc((size+1)*sizeof(int),&starts);
584:   starts[0]  = 0;
585:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}
586:   for (i=0; i<n; i++) {
587:     sends[starts[owner[i]]++] = lindices[i];
588:     sends[starts[owner[i]]++] = i;
589:   }
590:   PetscFree(owner);
591:   starts[0]  = 0;
592:   for (i=1; i<size; i++) { starts[i] = starts[i-1] + 2*nprocs[2*i-2];}

594:   /* send the messages */
595:   PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);
596:   PetscMalloc((nsends+1)*sizeof(int),&dest);
597:   cnt = 0;
598:   for (i=0; i<size; i++) {
599:     if (nprocs[2*i]) {
600:       ierr      = MPI_Isend(sends+starts[i],2*nprocs[2*i],MPI_INT,i,tag1,comm,send_waits+cnt);
601:       dest[cnt] = i;
602:       cnt++;
603:     }
604:   }
605:   PetscFree(starts);

607:   /* wait on receives */
608:   PetscMalloc((2*nrecvs+1)*sizeof(int),&source);
609:   len  = source + nrecvs;
610:   cnt  = nrecvs;
611:   PetscMalloc((ng+1)*sizeof(int),&nownedsenders);
612:   PetscMemzero(nownedsenders,ng*sizeof(int));
613:   while (cnt) {
614:     MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);
615:     /* unpack receives into our local space */
616:     ierr           = MPI_Get_count(&recv_status,MPI_INT,&len[imdex]);
617:     source[imdex]  = recv_status.MPI_SOURCE;
618:     len[imdex]     = len[imdex]/2;
619:     /* count how many local owners for each of my global owned indices */
620:     for (i=0; i<len[imdex]; i++) nownedsenders[recvs[2*imdex*nmax+2*i]-rstart]++;
621:     cnt--;
622:   }
623:   PetscFree(recv_waits);

625:   /* count how many globally owned indices are on an edge multiplied by how many processors own them. */
626:   nowned  = 0;
627:   nownedm = 0;
628:   for (i=0; i<ng; i++) {
629:     if (nownedsenders[i] > 1) {nownedm += nownedsenders[i]; nowned++;}
630:   }

632:   /* create single array to contain rank of all local owners of each globally owned index */
633:   ierr      = PetscMalloc((nownedm+1)*sizeof(int),&ownedsenders);
634:   ierr      = PetscMalloc((ng+1)*sizeof(int),&starts);
635:   starts[0] = 0;
636:   for (i=1; i<ng; i++) {
637:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
638:     else starts[i] = starts[i-1];
639:   }

641:   /* for each nontrival globally owned node list all arriving processors */
642:   for (i=0; i<nrecvs; i++) {
643:     for (j=0; j<len[i]; j++) {
644:       node = recvs[2*i*nmax+2*j]-rstart;
645:       if (nownedsenders[node] > 1) {
646:         ownedsenders[starts[node]++] = source[i];
647:       }
648:     }
649:   }

651:   if (debug) { /* -----------------------------------  */
652:     starts[0]    = 0;
653:     for (i=1; i<ng; i++) {
654:       if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
655:       else starts[i] = starts[i-1];
656:     }
657:     for (i=0; i<ng; i++) {
658:       if (nownedsenders[i] > 1) {
659:         PetscSynchronizedPrintf(comm,"[%d] global node %d local owner processors: ",rank,i+rstart);
660:         for (j=0; j<nownedsenders[i]; j++) {
661:           PetscSynchronizedPrintf(comm,"%d ",ownedsenders[starts[i]+j]);
662:         }
663:         PetscSynchronizedPrintf(comm,"n");
664:       }
665:     }
666:     PetscSynchronizedFlush(comm);
667:   }/* -----------------------------------  */

669:   /* wait on original sends */
670:   if (nsends) {
671:     PetscMalloc(nsends*sizeof(MPI_Status),&send_status);
672:     MPI_Waitall(nsends,send_waits,send_status);
673:     PetscFree(send_status);
674:   }
675:   PetscFree(send_waits);
676:   PetscFree(sends);
677:   PetscFree(nprocs);

679:   /* pack messages to send back to local owners */
680:   starts[0]    = 0;
681:   for (i=1; i<ng; i++) {
682:     if (nownedsenders[i-1] > 1) starts[i] = starts[i-1] + nownedsenders[i-1];
683:     else starts[i] = starts[i-1];
684:   }
685:   nsends2 = nrecvs;
686:   ierr    = PetscMalloc((nsends2+1)*sizeof(int),&nprocs); /* length of each message */
687:   for (i=0; i<nrecvs; i++) {
688:     nprocs[i] = 1;
689:     for (j=0; j<len[i]; j++) {
690:       node = recvs[2*i*nmax+2*j]-rstart;
691:       if (nownedsenders[node] > 1) {
692:         nprocs[i] += 2 + nownedsenders[node];
693:       }
694:     }
695:   }
696:   nt = 0; for (i=0; i<nsends2; i++) nt += nprocs[i];
697:   PetscMalloc((nt+1)*sizeof(int),&sends2);
698:   PetscMalloc((nsends2+1)*sizeof(int),&starts2);
699:   starts2[0] = 0; for (i=1; i<nsends2; i++) starts2[i] = starts2[i-1] + nprocs[i-1];
700:   /*
701:      Each message is 1 + nprocs[i] long, and consists of 
702:        (0) the number of nodes being sent back 
703:        (1) the local node number,
704:        (2) the number of processors sharing it,
705:        (3) the processors sharing it
706:   */
707:   for (i=0; i<nsends2; i++) {
708:     cnt = 1;
709:     sends2[starts2[i]] = 0;
710:     for (j=0; j<len[i]; j++) {
711:       node = recvs[2*i*nmax+2*j]-rstart;
712:       if (nownedsenders[node] > 1) {
713:         sends2[starts2[i]]++;
714:         sends2[starts2[i]+cnt++] = recvs[2*i*nmax+2*j+1];
715:         sends2[starts2[i]+cnt++] = nownedsenders[node];
716:         PetscMemcpy(&sends2[starts2[i]+cnt],&ownedsenders[starts[node]],nownedsenders[node]*sizeof(int));
717:         cnt += nownedsenders[node];
718:       }
719:     }
720:   }

722:   /* send the message lengths */
723:   for (i=0; i<nsends2; i++) {
724:     MPI_Send(&nprocs[i],1,MPI_INT,source[i],tag2,comm);
725:   }

727:   /* receive the message lengths */
728:   nrecvs2 = nsends;
729:   PetscMalloc((nrecvs2+1)*sizeof(int),&lens2);
730:   PetscMalloc((nrecvs2+1)*sizeof(int),&starts3);
731:   nt      = 0;
732:   for (i=0; i<nrecvs2; i++) {
733:      MPI_Recv(&lens2[i],1,MPI_INT,dest[i],tag2,comm,&recv_status);
734:     nt   += lens2[i];
735:   }
736:   starts3[0] = 0;
737:   for (i=0; i<nrecvs2-1; i++) {
738:     starts3[i+1] = starts3[i] + lens2[i];
739:   }
740:   PetscMalloc((nt+1)*sizeof(int),&recvs2);
741:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Request),&recv_waits);
742:   for (i=0; i<nrecvs2; i++) {
743:     MPI_Irecv(recvs2+starts3[i],lens2[i],MPI_INT,dest[i],tag3,comm,recv_waits+i);
744:   }
745: 
746:   /* send the messages */
747:   PetscMalloc((nsends2+1)*sizeof(MPI_Request),&send_waits);
748:   for (i=0; i<nsends2; i++) {
749:     MPI_Isend(sends2+starts2[i],nprocs[i],MPI_INT,source[i],tag3,comm,send_waits+i);
750:   }

752:   /* wait on receives */
753:   PetscMalloc((nrecvs2+1)*sizeof(MPI_Status),&recv_statuses);
754:   MPI_Waitall(nrecvs2,recv_waits,recv_statuses);
755:   PetscFree(recv_statuses);
756:   PetscFree(recv_waits);
757:   PetscFree(nprocs);

759:   if (debug) { /* -----------------------------------  */
760:     cnt = 0;
761:     for (i=0; i<nrecvs2; i++) {
762:       nt = recvs2[cnt++];
763:       for (j=0; j<nt; j++) {
764:         PetscSynchronizedPrintf(comm,"[%d] local node %d number of subdomains %d: ",rank,recvs2[cnt],recvs2[cnt+1]);
765:         for (k=0; k<recvs2[cnt+1]; k++) {
766:           PetscSynchronizedPrintf(comm,"%d ",recvs2[cnt+2+k]);
767:         }
768:         cnt += 2 + recvs2[cnt+1];
769:         PetscSynchronizedPrintf(comm,"n");
770:       }
771:     }
772:     PetscSynchronizedFlush(comm);
773:   } /* -----------------------------------  */

775:   /* count number subdomains for each local node */
776:   PetscMalloc(size*sizeof(int),&nprocs);
777:   PetscMemzero(nprocs,size*sizeof(int));
778:   cnt  = 0;
779:   for (i=0; i<nrecvs2; i++) {
780:     nt = recvs2[cnt++];
781:     for (j=0; j<nt; j++) {
782:       for (k=0; k<recvs2[cnt+1]; k++) {
783:         nprocs[recvs2[cnt+2+k]]++;
784:       }
785:       cnt += 2 + recvs2[cnt+1];
786:     }
787:   }
788:   nt = 0; for (i=0; i<size; i++) nt += (nprocs[i] > 0);
789:   *nproc    = nt;
790:   PetscMalloc((nt+1)*sizeof(int),procs);
791:   PetscMalloc((nt+1)*sizeof(int),numprocs);
792:   PetscMalloc((nt+1)*sizeof(int*),indices);
793:   PetscMalloc(size*sizeof(int),&bprocs);
794:   cnt       = 0;
795:   for (i=0; i<size; i++) {
796:     if (nprocs[i] > 0) {
797:       bprocs[i]        = cnt;
798:       (*procs)[cnt]    = i;
799:       (*numprocs)[cnt] = nprocs[i];
800:       ierr             = PetscMalloc(nprocs[i]*sizeof(int),&(*indices)[cnt]);
801:       cnt++;
802:     }
803:   }

805:   /* make the list of subdomains for each nontrivial local node */
806:   PetscMemzero(*numprocs,nt*sizeof(int));
807:   cnt  = 0;
808:   for (i=0; i<nrecvs2; i++) {
809:     nt = recvs2[cnt++];
810:     for (j=0; j<nt; j++) {
811:       for (k=0; k<recvs2[cnt+1]; k++) {
812:         (*indices)[bprocs[recvs2[cnt+2+k]]][(*numprocs)[bprocs[recvs2[cnt+2+k]]]++] = recvs2[cnt];
813:       }
814:       cnt += 2 + recvs2[cnt+1];
815:     }
816:   }
817:   PetscFree(bprocs);
818:   PetscFree(recvs2);

820:   /* sort the node indexing by their global numbers */
821:   nt = *nproc;
822:   for (i=0; i<nt; i++) {
823:     PetscMalloc(((*numprocs)[i])*sizeof(int),&tmp);
824:     for (j=0; j<(*numprocs)[i]; j++) {
825:       tmp[j] = lindices[(*indices)[i][j]];
826:     }
827:     PetscSortIntWithArray((*numprocs)[i],tmp,(*indices)[i]);
828:     PetscFree(tmp);
829:   }

831:   if (debug) { /* -----------------------------------  */
832:     nt = *nproc;
833:     for (i=0; i<nt; i++) {
834:       PetscSynchronizedPrintf(comm,"[%d] subdomain %d number of indices %d: ",rank,(*procs)[i],(*numprocs)[i]);
835:       for (j=0; j<(*numprocs)[i]; j++) {
836:         PetscSynchronizedPrintf(comm,"%d ",(*indices)[i][j]);
837:       }
838:       PetscSynchronizedPrintf(comm,"n");
839:     }
840:     PetscSynchronizedFlush(comm);
841:   } /* -----------------------------------  */

843:   /* wait on sends */
844:   if (nsends2) {
845:     PetscMalloc(nsends2*sizeof(MPI_Status),&send_status);
846:     MPI_Waitall(nsends2,send_waits,send_status);
847:     PetscFree(send_status);
848:   }

850:   PetscFree(starts3);
851:   PetscFree(dest);
852:   PetscFree(send_waits);

854:   PetscFree(nownedsenders);
855:   PetscFree(ownedsenders);
856:   PetscFree(starts);
857:   PetscFree(starts2);
858:   PetscFree(lens2);

860:   PetscFree(source);
861:   PetscFree(recvs);
862:   PetscFree(nprocs);
863:   PetscFree(sends2);

865:   /* put the information about myself as the first entry in the list */
866:   first_procs    = (*procs)[0];
867:   first_numprocs = (*numprocs)[0];
868:   first_indices  = (*indices)[0];
869:   for (i=0; i<*nproc; i++) {
870:     if ((*procs)[i] == rank) {
871:       (*procs)[0]    = (*procs)[i];
872:       (*numprocs)[0] = (*numprocs)[i];
873:       (*indices)[0]  = (*indices)[i];
874:       (*procs)[i]    = first_procs;
875:       (*numprocs)[i] = first_numprocs;
876:       (*indices)[i]  = first_indices;
877:       break;
878:     }
879:   }

881:   return(0);
882: }

884: #undef __FUNCT__  
886: /*@C
887:     ISLocalToGlobalMappingRestoreInfo - Frees the memory allocated by ISLocalToGlobalMappingGetInfo()

889:     Collective on ISLocalToGlobalMapping

891:     Input Parameters:
892: .   mapping - the mapping from local to global indexing

894:     Output Parameter:
895: +   nproc - number of processors that are connected to this one
896: .   proc - neighboring processors
897: .   numproc - number of indices for each processor
898: -   indices - indices of local nodes shared with neighbor (sorted by global numbering)

900:     Level: advanced

902: .seealso: ISLocalToGlobalMappingDestroy(), ISLocalToGlobalMappingCreateIS(), ISLocalToGlobalMappingCreate(),
903:           ISLocalToGlobalMappingGetInfo()
904: @*/
905: int ISLocalToGlobalMappingRestoreInfo(ISLocalToGlobalMapping mapping,int *nproc,int **procs,int **numprocs,int ***indices)
906: {
907:   int ierr,i;

910:   if (*procs) {PetscFree(*procs);}
911:   if (*numprocs) {PetscFree(*numprocs);}
912:   if (*indices) {
913:     if ((*indices)[0]) {PetscFree((*indices)[0]);}
914:     for (i=1; i<*nproc; i++) {
915:       if ((*indices)[i]) {PetscFree((*indices)[i]);}
916:     }
917:     PetscFree(*indices);
918:   }
919:   return(0);
920: }