Actual source code: part2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: part2d.c,v 1.14 2000/01/31 17:40:21 knepley Exp $";
  3: #endif

  5: #include "src/mesh/impls/triangular/2d/2dimpl.h"         /*I "mesh.h" I*/
  6: #ifdef PETSC_HAVE_PARMETIS
  7: EXTERN_C_BEGIN
  8: #include "parmetis.h"
  9: EXTERN_C_END
 10: #endif
 11: #include "part2d.h"

 13: #undef  __FUNCT__
 15: static int PartitionView_Triangular_2D_File(Partition p, PetscViewer viewer) {
 16:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
 17:   FILE                    *fd;
 18:   int                      numLocElements = p->numLocElements;
 19:   int                      numLocNodes    = q->numLocNodes;
 20:   int                      numLocEdges    = q->numLocEdges;
 21:   int                      i;
 22:   int                      ierr;

 25:   PetscViewerASCIIPrintf(viewer, "Partition Object:n");
 26:   PetscViewerASCIIPrintf(viewer, "  Partition of triangular 2D grid with %d elements and %d nodesn", p->numElements, q->numNodes);
 27:   PetscViewerASCIIGetPointer(viewer, &fd);
 28:   PetscSynchronizedFPrintf(p->comm, fd, "    Proc %d: %d elements %d nodes %d edgesn",
 29:                            p->rank, numLocElements, numLocNodes, numLocEdges);
 30:   PetscSynchronizedFlush(p->comm);
 31:   if (p->ordering != PETSC_NULL) {
 32:     PetscViewerASCIIPrintf(viewer, "  Global element renumbering:n");
 33:     AOView(p->ordering, viewer);
 34:   }
 35:   if (q->nodeOrdering != PETSC_NULL) {
 36:     PetscViewerASCIIPrintf(viewer, "  Global node renumbering:n");
 37:     AOView(q->nodeOrdering, viewer);
 38:   }
 39:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost elements on proc %dn", p->numOverlapElements - numLocElements, p->rank);
 40:   for(i = 0; i < p->numOverlapElements - numLocElements; i++)
 41:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %d %dn", i, p->ghostElements[i], p->ghostElementProcs[i]);
 42:   PetscSynchronizedFlush(p->comm);
 43:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost nodes on proc %dn", q->numOverlapNodes - numLocNodes, p->rank);
 44:   for(i = 0; i < q->numOverlapNodes - numLocNodes; i++)
 45:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %dn", i, q->ghostNodes[i]);
 46:   PetscSynchronizedFlush(p->comm);

 48:   return(0);
 49: }

 51: #undef  __FUNCT__
 53: static int PartitionView_Triangular_2D_Draw(Partition p, PetscViewer v) {
 55:   return(0);
 56: }

 58: #undef  __FUNCT__
 60: static int PartitionView_Triangular_2D(Partition p, PetscViewer viewer) {
 61:   PetscTruth isascii, isdraw;
 62:   int        ierr;

 65:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
 66:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW,  &isdraw);
 67:   if (isascii == PETSC_TRUE) {
 68:     PartitionView_Triangular_2D_File(p, viewer);
 69:   } else if (isdraw == PETSC_TRUE) {
 70:     PartitionView_Triangular_2D_Draw(p, viewer);
 71:   }
 72:   return(0);
 73: }

 75: #undef  __FUNCT__
 77: static int PartitionViewFromOptions_Private(Partition part, char *title) {
 78:   PetscViewer viewer;
 79:   PetscDraw   draw;
 80:   PetscTruth  opt;
 81:   int         ierr;

 84:   PetscOptionsHasName(part->prefix, "-part_view", &opt);
 85:   if (opt == PETSC_TRUE) {
 86:     PartitionView(part, PETSC_NULL);
 87:   }
 88:   PetscOptionsHasName(part->prefix, "-part_view_draw", &opt);
 89:   if (opt == PETSC_TRUE) {
 90:     PetscViewerDrawOpen(part->comm, 0, 0, 0, 0, 300, 300, &viewer);
 91:     PetscViewerDrawGetDraw(viewer, 0, &draw);
 92:     if (title != PETSC_NULL) {
 93:       PetscDrawSetTitle(draw, title);
 94:     }
 95:     PartitionView(part, viewer);
 96:     PetscViewerFlush(viewer);
 97:     PetscDrawPause(draw);
 98:     PetscViewerDestroy(viewer);
 99:   }
100:   return(0);
101: }

103: #undef  __FUNCT__
105: static int PartitionDestroy_Triangular_2D(Partition p) {
106:   Partition_Triangular_2D *s = (Partition_Triangular_2D *) p->data;
107:   int                      ierr;

110:   PetscFree(p->firstElement);
111:   if (p->ordering != PETSC_NULL)
112:     {AODestroy(p->ordering);                                                                      }
113:   if (p->ghostElements != PETSC_NULL)
114:     {PetscFree(p->ghostElements);                                                                 }
115:   if (p->ghostElementProcs != PETSC_NULL)
116:     {PetscFree(p->ghostElementProcs);                                                             }
117:   PetscFree(s->firstNode);
118:   PetscFree(s->firstBdNode);
119:   if (s->nodeOrdering != PETSC_NULL)
120:     {AODestroy(s->nodeOrdering);                                                                  }
121:   if (s->ghostNodes != PETSC_NULL)
122:     {PetscFree(s->ghostNodes);                                                                    }
123:   if (s->ghostNodeProcs != PETSC_NULL)
124:     {PetscFree(s->ghostNodeProcs);                                                                }
125:   if (s->ghostBdNodes != PETSC_NULL)
126:     {PetscFree(s->ghostBdNodes);                                                                  }
127:   PetscFree(s->firstEdge);
128:   if (s->edgeOrdering != PETSC_NULL)
129:     {AODestroy(s->edgeOrdering);                                                                  }
130:   PetscFree(s);

132:   return(0);
133: }

135: #undef  __FUNCT__
137: static int PartitionGhostNodeExchange_Triangular_2D(Partition part, InsertMode addv, ScatterMode mode, int *locVars, int *ghostVars) {
138:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) part->data;
139:   Mesh                     mesh = part->mesh;
140:   int                      ierr;

143:   PetscGhostExchange(part->comm, q->numOverlapNodes - mesh->numNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
144:                             q->firstNode, addv, mode, locVars, ghostVars);
145: 
146:   return(0);
147: }

149: #undef  __FUNCT__
151: static int PartitionGetTotalNodes_Triangular_2D(Partition p, int *size) {
152:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

155:   *size = q->numNodes;
156:   return(0);
157: }

159: #undef  __FUNCT__
161: static int PartitionGetStartNode_Triangular_2D(Partition p, int *node) {
162:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

165:   *node = q->firstNode[p->rank];
166:   return(0);
167: }

169: #undef  __FUNCT__
171: static int PartitionGetEndNode_Triangular_2D(Partition p, int *node) {
172:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

175:   *node = q->firstNode[p->rank+1];
176:   return(0);
177: }

179: #undef  __FUNCT__
181: static int PartitionGetNumNodes_Triangular_2D(Partition p, int *size) {
182:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

185:   *size = q->numLocNodes;
186:   return(0);
187: }

189: #undef  __FUNCT__
191: static int PartitionGetNumOverlapNodes_Triangular_2D(Partition p, int *size) {
192:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

195:   *size = q->numOverlapNodes;
196:   return(0);
197: }

199: #undef  __FUNCT__
201: int PartitionGhostNodeIndex_Private(Partition p, int node, int *gNode) {
202:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
203:   int                      low, high, mid;

206:   /* Use bisection since the array is assumed to be sorted */
207:   low  = 0;
208:   high = q->numOverlapNodes - (q->firstNode[p->rank+1] - q->firstNode[p->rank]) - 1;
209:   while (low <= high) {
210:     mid = (low + high)/2;
211:     if (node == q->ghostNodes[mid]) {
212:       *gNode = mid;
213:       return(0);
214:     } else if (node < q->ghostNodes[mid]) {
215:       high = mid - 1;
216:     } else {
217:       low  = mid + 1;
218:     }
219:   }
220:   *gNode = -1;
221:   /* Flag for ghost node not present */
222:   PetscFunctionReturn(1);
223: }

225: #undef  __FUNCT__
227: static int PartitionGlobalToLocalNodeIndex_Triangular_2D(Partition p, int node, int *locNode) {
228:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
229:   int                      numLocNodes = q->numLocNodes;
230:   int                      gNode; /* Local ghost node number */
231:   int                      ierr;

234:   if (node < 0) {
235:     *locNode = node;
236:     return(0);
237:   }
238:   /* Check for ghost node */
239:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
240:     /* Search for canonical number */
241:     PartitionGhostNodeIndex_Private(p, node, &gNode);
242:     *locNode = numLocNodes + gNode;
243:   } else {
244:     *locNode = node - q->firstNode[p->rank];
245:   }
246:   return(0);
247: }

249: #undef  __FUNCT__
251: static int PartitionLocalToGlobalNodeIndex_Triangular_2D(Partition p, int locNode, int *node) {
252:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
253:   int                      numLocNodes = q->numLocNodes;

256:   if (locNode < 0) {
257:     *node = locNode;
258:     return(0);
259:   }
260:   /* Check for ghost node */
261:   if (locNode >= numLocNodes) {
262:     *node = q->ghostNodes[locNode - numLocNodes];
263:   } else {
264:     *node = locNode + q->firstNode[p->rank];
265:   }
266:   return(0);
267: }

269: #undef  __FUNCT__
271: static int PartitionGlobalToGhostNodeIndex_Triangular_2D(Partition p, int node, int *ghostNode, int *ghostProc) {
272:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
273:   int                      ierr;

276:   if (node < 0) {
277:     *ghostNode = node;
278:     *ghostProc = -1;
279:     return(0);
280:   }
281:   /* Check for ghost node */
282:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
283:     PartitionGhostNodeIndex_Private(p, node, ghostNode);
284:     *ghostProc = q->ghostNodeProcs[*ghostNode];
285:   } else {
286:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Global node %d is not a ghost node", node);
287:   }
288:   return(0);
289: }

291: #undef  __FUNCT__
293: static int PartitionGhostToGlobalNodeIndex_Triangular_2D(Partition p, int ghostNode, int *node, int *ghostProc) {
294:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
295:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;

298:   if (ghostNode < 0) {
299:     *node      = ghostNode;
300:     *ghostProc = -1;
301:     return(0);
302:   }
303:   /* Check for ghost node */
304:   if (ghostNode < numGhostNodes) {
305:     *node      = q->ghostNodes[ghostNode];
306:     *ghostProc = q->ghostNodeProcs[ghostNode];
307:   } else {
308:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Ghost node %d does not exist", ghostNode);
309:   }
310:   return(0);
311: }

313: #undef  __FUNCT__
315: static int PartitionGetNodeOrdering_Triangular_2D(Partition p, AO *order) {
316:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

319:   *order = q->nodeOrdering;
320:   return(0);
321: }

323: #undef  __FUNCT__
325: static int PartitionGetTotalEdges_Triangular_2D(Partition p, int *size) {
326:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

329:   *size = q->numEdges;
330:   return(0);
331: }

333: #undef  __FUNCT__
335: static int PartitionGetStartEdge_Triangular_2D(Partition p, int *edge) {
336:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

339:   *edge = q->firstEdge[p->rank];
340:   return(0);
341: }

343: #undef  __FUNCT__
345: static int PartitionGetEndEdge_Triangular_2D(Partition p, int *edge) {
346:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

349:   *edge = q->firstEdge[p->rank+1];
350:   return(0);
351: }

353: #undef  __FUNCT__
355: static int PartitionGetNumEdges_Triangular_2D(Partition p, int *size) {
356:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

359:   *size = q->numLocEdges;
360:   return(0);
361: }

363: #undef  __FUNCT__
365: static int PartitionGetNumOverlapEdges_Triangular_2D(Partition p, int *size) {
366:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

369:   /* We do not maintain ghost edges */
370:   *size = q->numLocEdges;
371:   return(0);
372: }

374: #undef  __FUNCT__
376: static int PartitionGetEdgeOrdering_Triangular_2D(Partition p, AO *order) {
377:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

380:   *order = q->edgeOrdering;
381:   return(0);
382: }

384: static struct _PartitionOps POps = {/* Generic Operations */
385:                                     PETSC_NULL/* PartitionSetup */,
386:                                     PETSC_NULL/* PartitionSetFromOptions */,
387:                                     PartitionView_Triangular_2D,
388:                                     PETSC_NULL/* PartitionCopy */,
389:                                     PETSC_NULL/* PartitionDuplicate */,
390:                                     PartitionDestroy_Triangular_2D,
391:                                     /* Partition-Specific Operations */
392:                                     PartitionGhostNodeExchange_Triangular_2D,
393:                                     /* Node Query Functions */
394:                                     PartitionGetTotalNodes_Triangular_2D,
395:                                     PartitionGetStartNode_Triangular_2D,
396:                                     PartitionGetEndNode_Triangular_2D,
397:                                     PartitionGetNumNodes_Triangular_2D,
398:                                     PartitionGetNumOverlapNodes_Triangular_2D,
399:                                     PartitionGlobalToLocalNodeIndex_Triangular_2D,
400:                                     PartitionLocalToGlobalNodeIndex_Triangular_2D,
401:                                     PartitionGlobalToGhostNodeIndex_Triangular_2D,
402:                                     PartitionGhostToGlobalNodeIndex_Triangular_2D,
403:                                     PartitionGetNodeOrdering_Triangular_2D,
404:                                     /* Face Query Functions */
405:                                     PartitionGetTotalElements,
406:                                     PartitionGetStartElement,
407:                                     PartitionGetEndElement,
408:                                     PartitionGetNumElements,
409:                                     PartitionGetNumOverlapElements,
410:                                     PartitionGlobalToLocalElementIndex,
411:                                     PartitionLocalToGlobalElementIndex,
412:                                     PartitionGetElementOrdering,
413:                                     /* Edge Query Functions */
414:                                     PartitionGetTotalEdges_Triangular_2D,
415:                                     PartitionGetStartEdge_Triangular_2D,
416:                                     PartitionGetEndEdge_Triangular_2D,
417:                                     PartitionGetNumEdges_Triangular_2D,
418:                                     PartitionGetNumOverlapEdges_Triangular_2D,
419:                                     PETSC_NULL/* PartitionGlobalToLocalEdgeIndex */,
420:                                     PETSC_NULL/* PartitionLocalToGlobalEdgeIndex */,
421:                                     PartitionGetEdgeOrdering_Triangular_2D};

423: #undef  __FUNCT__
425: int PartitionCalcCut_Private(Partition p, int *cut)
426: {
427:   Mesh_Triangular *tri            = (Mesh_Triangular *) p->mesh->data;
428:   int              numLocElements = p->numLocElements;
429:   int             *neighbors      = tri->neighbors;
430:   int              locCut;        /* The number of edges of the dual crossing the partition from this domain */
431:   int              elem, neighbor;
432:   int              ierr;

435:   for(elem = 0, locCut = 0; elem < numLocElements; elem++) {
436:     for(neighbor = 0; neighbor < 3; neighbor++) {
437:       if (neighbors[elem*3+neighbor] >= numLocElements)
438:         locCut++;
439:     }
440:   }
441:   ierr  = MPI_Allreduce(&locCut, cut, 1, MPI_INT, MPI_SUM, p->comm);
442:   *cut /= 2;
443:   return(0);
444: }

446: int PartitionDebugAO_Private(Partition p, int *nodeProcs)
447: {
448:   Mesh                     mesh        = p->mesh;
449:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
450:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
451:   int                      numCorners  = mesh->numCorners;
452:   int                      numElements = mesh->numFaces;
453:   int                     *elements    = tri->faces;
454:   int                      numNodes    = q->numNodes;
455:   int                      numProcs    = p->numProcs;
456:   int                      rank        = p->rank;
457:   int                     *support;
458:   int                     *temp;
459:   int                      proc, nProc, elem, nElem, sElem, corner, nCorner, node, degree, index;
460:   int                      ierr;

463:   PetscMalloc(numProcs * sizeof(int), &temp);
464:   for(node = 0; node < numNodes; node++) {
465:     PetscSynchronizedPrintf(p->comm, " %d", nodeProcs[node]);
466:     PetscSynchronizedFlush(p->comm);
467:     PetscPrintf(p->comm, "n");
468:     MPI_Allgather(&nodeProcs[node], 1, MPI_INT, temp, 1, MPI_INT, p->comm);
469:     for(proc = 0, index = 0; proc < numProcs; proc++) {
470:       if (temp[proc] == proc) index++;
471:     }

473:     /* If a node is not scheduled for a unique domain */
474:     if (index != 1) {
475:       for(elem = 0; elem < numElements; elem++) {
476:         for(corner = 0; corner < numCorners; corner++) {
477:           /* Locate an element containing the node */
478:           if (node != elements[elem*numCorners+corner])
479:             continue;
480: 
481:           /* Check the support of the node */
482:           PetscPrintf(PETSC_COMM_SELF, "[%d]elem: %d corner: %d node: %dn", rank, elem, corner, node);
483:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
484:           for(sElem = 0; sElem < degree; sElem++) {
485:             nElem = support[sElem];
486:             PetscPrintf(PETSC_COMM_SELF, "[%d]support[%d] = %dn", rank, sElem, nElem);
487:             /* See if neighbor is in another domain */
488:             if (nElem >= numElements) {
489:               /* Check to see if node is contained in the neighboring element */
490:               for(nCorner = 0; nCorner < numCorners; nCorner++)
491:                 if (elements[nElem*numCorners+nCorner] == node) {
492:                   nProc = p->ghostElementProcs[nElem-numElements];
493:                   PetscPrintf(PETSC_COMM_SELF, "[%d]Found in corner %d proc: %dn", rank, nCorner, nProc);
494:                   break;
495:                 }
496:             }
497:           }
498:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
499:           if (nodeProcs[node] < 0)
500:             nodeProcs[node] = rank;
501:           PetscPrintf(PETSC_COMM_SELF, "[%d]nodeProcs[%d]: %dn", rank, node, nodeProcs[node]);
502:         }
503:       }
504:     }
505:   }
506:   PetscFree(temp);
507:   PetscBarrier((PetscObject) p);
508:   PetscFunctionReturn(1);
509: }

511: #undef  __FUNCT__
513: /*
514:   PartitionSortGhosts_Private - This function sorts the ghost array and
515:   removes any duplicates.

517:   Input Parameters:
518: . p          - The Partition
519: . numGhosts  - The number of ghosts
520: . ghostArray - The ghost indices

522:   Output Paramters:
523: . numGhosts  - The new size of the ghost array
524: . ghostArray - The sorted ghost indices

526: .seealso:
527: */
528: int PartitionSortGhosts_Private(Partition p, int *numGhosts, int *ghostArray, int **ghostPerm)
529: {
530:   int *perm, *temp;
531:   int  size;
532:   int  ghost, newGhost;
533:   int  ierr;

536:   size = *numGhosts;
537:   PetscMalloc(size * sizeof(int), &perm);
538:   PetscMalloc(size * sizeof(int), &temp);

540:   /* Sort ghosts */
541:   for(ghost = 0; ghost < size; ghost++) perm[ghost] = ghost;
542:   PetscSortIntWithPermutation(size, ghostArray, perm);

544:   /* Permute ghosts and eliminates duplicates */
545:   for(ghost = 0, newGhost = 0; ghost < size; ghost++) {
546:     if ((newGhost == 0) || (temp[newGhost-1] != ghostArray[perm[ghost]])) {
547:       /* Keep ghost */
548:       temp[newGhost++] = ghostArray[perm[ghost]];
549:     } else {
550:       /* Eliminate redundant ghost */
551:       PetscMemmove(&perm[ghost], &perm[ghost+1], (size - (ghost+1)) * sizeof(int));
552:       ghost--;
553:       size--;
554:     }
555:   }
556:   for(ghost = 0; ghost < size; ghost++) {
557:     ghostArray[ghost] = temp[ghost];
558:   }
559:   PetscFree(temp);

561:   *numGhosts = size;
562:   *ghostPerm = perm;
563:   return(0);
564: }

566: #undef  __FUNCT__
568: int PartitionGetNewGhostNodes_Serial(Partition p, int *newProcNodes, int *newNodes)
569: {
570:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
571:   int                      numLocNodes   = q->numLocNodes;
572:   int                      numGhostNodes = q->numOverlapNodes - numLocNodes;
573:   int                      numProcs      = p->numProcs;
574:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
575:   int                      numNewNodes;    /* Total number of new ghost nodes to add */
576:   int                     *temp;
577:   int                      proc, node, i;
578:   int                      ierr;

581:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++)
582:     numNewNodes += newProcNodes[proc];

584:   /* Add in new ghost nodes */
585:   if (numNewNodes > 0) {
586:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
587:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
588:     for(node = 0; node < numNewNodes; node++) {
589:       temp[numGhostNodes+node] = newNodes[node];
590:     }
591:     if (q->ghostNodes != PETSC_NULL) {
592:       PetscFree(q->ghostNodes);
593:     }
594:     q->ghostNodes = temp;

596:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
597:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
598:     for(proc = 0, node = 0; proc < numProcs; proc++) {
599:       for(i = 0; i < newProcNodes[proc]; i++)
600:         temp[numGhostNodes+(node++)] = proc;
601:     }
602:     if (q->ghostNodeProcs != PETSC_NULL) {
603:       PetscFree(q->ghostNodeProcs);
604:     }
605:     q->ghostNodeProcs = temp;

607:     /* Resort ghost nodes and remove duplicates */
608:     numGhostNodes += numNewNodes;
609:     PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
610:     q->numOverlapNodes = numLocNodes + numGhostNodes;
611:     PetscMalloc(numGhostNodes * sizeof(int), &temp);
612:     for(node = 0; node < numGhostNodes; node++) {
613:       temp[node] = q->ghostNodeProcs[nodePerm[node]];
614:     }
615:     PetscFree(q->ghostNodeProcs);
616:     q->ghostNodeProcs = temp;
617:     PetscFree(nodePerm);
618:   }
619: #ifdef PETSC_USE_BOPT_g
620:   /* Consistency check for ghost nodes */
621:   for(node = 0; node < numGhostNodes; node++) {
622:     if ((q->ghostNodes[node] <  q->firstNode[q->ghostNodeProcs[node]]) ||
623:         (q->ghostNodes[node] >= q->firstNode[q->ghostNodeProcs[node]+1])) {
624:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
625:     }
626:   }
627: #endif
628:   return(0);
629: }

631: #undef  __FUNCT__
633: int PartitionGetNewGhostNodes_Parallel(Partition p, int *sendGhostNodes, int *sendNodes, int *recvGhostNodes, int *recvNodes)
634: {
635:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
636:   Mesh                     mesh            = p->mesh;
637:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
638:   int                      numLocNodes     = q->numLocNodes;
639:   int                     *firstNode       = q->firstNode;
640:   double                  *nodes           = tri->nodes;
641:   int                     *markers         = tri->markers;
642:   int                     *degrees         = tri->degrees;
643:   int                      numProcs        = p->numProcs;
644:   int                      rank            = p->rank;
645:   int                      numGhostNodes;
646:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
647:   int                      numSendNodes;    /* Total number of new ghost nodes to receive */
648:   int                      numRecvNodes;    /* Total number of new ghost nodes to send */
649:   int                     *sumSendNodes;    /* Prefix sums of sendGhostNodes */
650:   int                     *sumRecvNodes;    /* Prefix sums of recvGhostNodes */
651:   int                     *sendGhostCoords; /* Number of new ghost nodes needed from a given processor */
652:   int                     *recvGhostCoords; /* Number of new ghost nodes needed by a given processor */
653:   int                     *sumSendCoords;   /* Prefix sums of sendGhostCoords */
654:   int                     *sumRecvCoords;   /* Prefix sums of recvGhostCoords */
655:   double                  *sendCoords;      /* Coordinates of ghost nodes for other domains */
656:   double                  *recvCoords;      /* Coordinates of ghost nodes for this domains */
657:   int                     *sendMarkers;     /* Markers of ghost nodes for other domains */
658:   int                     *recvMarkers;     /* Markers of ghost nodes for this domains */
659:   int                     *sendDegrees;     /* Degrees of ghost nodes for other domains */
660:   int                     *recvDegrees;     /* Degrees of ghost nodes for this domains */
661:   int                     *offsets;         /* Offsets into the send array for each destination proc */
662:   int                     *temp;
663:   double                  *temp2;
664:   int                      proc, node, locNode, i;
665:   int                      ierr;

668:   PetscMalloc(numProcs * sizeof(int), &sumSendNodes);
669:   PetscMalloc(numProcs * sizeof(int), &sumRecvNodes);
670:   PetscMalloc(numProcs * sizeof(int), &sendGhostCoords);
671:   PetscMalloc(numProcs * sizeof(int), &recvGhostCoords);
672:   PetscMalloc(numProcs * sizeof(int), &sumSendCoords);
673:   PetscMalloc(numProcs * sizeof(int), &sumRecvCoords);
674:   PetscMalloc(numProcs * sizeof(int), &offsets);
675:   PetscMemzero(sumSendNodes, numProcs * sizeof(int));
676:   PetscMemzero(sumRecvNodes, numProcs * sizeof(int));
677:   PetscMemzero(offsets,      numProcs * sizeof(int));

679:   /* Compute new ghost node offsets */
680:   for(proc = 1; proc < numProcs; proc++) {
681:     sumSendNodes[proc] = sumSendNodes[proc-1] + sendGhostNodes[proc-1];
682:     sumRecvNodes[proc] = sumRecvNodes[proc-1] + recvGhostNodes[proc-1];
683:   }
684:   numSendNodes = sumSendNodes[numProcs-1] + sendGhostNodes[numProcs-1];
685:   numRecvNodes = sumRecvNodes[numProcs-1] + recvGhostNodes[numProcs-1];

687:   /* Get numbers of ghost nodes to provide */
688:   MPI_Alltoallv(sendNodes, sendGhostNodes, sumSendNodes, MPI_INT,
689:                        recvNodes, recvGhostNodes, sumRecvNodes, MPI_INT, p->comm);
690: 

692:   /* Get node coordinates, markers, and degrees */
693:   for(proc = 0; proc < numProcs; proc++) {
694:     sendGhostCoords[proc] = sendGhostNodes[proc]*2;
695:     recvGhostCoords[proc] = recvGhostNodes[proc]*2;
696:     sumSendCoords[proc]   = sumSendNodes[proc]*2;
697:     sumRecvCoords[proc]   = sumRecvNodes[proc]*2;
698:   }
699:   if (numSendNodes) {
700:     PetscMalloc(numSendNodes*2 * sizeof(double), &recvCoords);
701:     PetscMalloc(numSendNodes   * sizeof(int),    &recvMarkers);
702:     PetscMalloc(numSendNodes   * sizeof(int),    &recvDegrees);
703:   }
704:   if (numRecvNodes) {
705:     PetscMalloc(numRecvNodes*2 * sizeof(double), &sendCoords);
706:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendMarkers);
707:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendDegrees);
708:     for(node = 0; node < numRecvNodes; node++) {
709:       locNode = recvNodes[node] - firstNode[rank];
710: #ifdef PETSC_USE_BOPT_g
711:       if ((locNode < 0) || (locNode >= numLocNodes)) {
712:         SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d should be in [0,%d)", locNode, numLocNodes);
713:       }
714: #endif
715:       for(i = 0; i < 2; i++)
716:         sendCoords[node*2+i] = nodes[locNode*2+i];
717:       sendMarkers[node] = markers[locNode];
718:       sendDegrees[node] = degrees[locNode];
719:     }
720:   }

722:   /* Communicate node coordinates and markers and degrees */
723:   MPI_Alltoallv(sendCoords,  recvGhostCoords, sumRecvCoords, MPI_DOUBLE,
724:                        recvCoords,  sendGhostCoords, sumSendCoords, MPI_DOUBLE, p->comm);
725: 
726:   MPI_Alltoallv(sendMarkers, recvGhostNodes,  sumRecvNodes,  MPI_INT,
727:                        recvMarkers, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
728: 
729:   MPI_Alltoallv(sendDegrees, recvGhostNodes,  sumRecvNodes,  MPI_INT,
730:                        recvDegrees, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
731: 

733:   /* Add in new ghost nodes */
734:   numGhostNodes = q->numOverlapNodes - numLocNodes;
735:   if (numSendNodes > 0) {
736:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
737:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
738:     for(node = 0; node < numSendNodes; node++)
739:       temp[numGhostNodes+node] = sendNodes[node];
740:     if (q->ghostNodes != PETSC_NULL) {
741:       PetscFree(q->ghostNodes);
742:     }
743:     q->ghostNodes = temp;

745:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
746:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
747:     for(proc = 0, node = 0; proc < numProcs; proc++) {
748:       for(i = 0; i < sendGhostNodes[proc]; i++)
749:         temp[numGhostNodes+(node++)] = proc;
750:     }
751:     if (q->ghostNodeProcs != PETSC_NULL) {
752:       PetscFree(q->ghostNodeProcs);
753:     }
754:     q->ghostNodeProcs = temp;

756:     PetscMalloc((q->numOverlapNodes + numSendNodes)*2 * sizeof(double), &temp2);
757:     PetscMemcpy(temp2, nodes, q->numOverlapNodes*2 * sizeof(double));
758:     for(node = 0; node < numSendNodes*2; node++)
759:       temp2[q->numOverlapNodes*2+node] = recvCoords[node];
760:     PetscFree(nodes);
761:     tri->nodes = temp2;

763:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
764:     PetscMemcpy(temp, markers, q->numOverlapNodes * sizeof(int));
765:     for(node = 0; node < numSendNodes; node++)
766:       temp[q->numOverlapNodes+node] = recvMarkers[node];
767:     PetscFree(markers);
768:     tri->markers = temp;

770:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
771:     PetscMemcpy(temp, degrees, q->numOverlapNodes * sizeof(int));
772:     for(node = 0; node < numSendNodes; node++)
773:       temp[q->numOverlapNodes+node] = recvDegrees[node];
774:     PetscFree(degrees);
775:     tri->degrees = temp;
776:   }

778:   /* Resort ghost nodes and remove duplicates */
779:   numGhostNodes     += numSendNodes;
780:   PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
781:   q->numOverlapNodes = numLocNodes + numGhostNodes;

783:   PetscMalloc(numGhostNodes * sizeof(int), &temp);
784:   for(node = 0; node < numGhostNodes; node++)
785:     temp[node] = q->ghostNodeProcs[nodePerm[node]];
786:   for(node = 0; node < numGhostNodes; node++)
787:     q->ghostNodeProcs[node] = temp[node];

789:   for(node = 0; node < numGhostNodes; node++)
790:     temp[node] = tri->markers[mesh->numNodes+nodePerm[node]];
791:   for(node = 0; node < numGhostNodes; node++)
792:     tri->markers[mesh->numNodes+node] = temp[node];

794:   for(node = 0; node < numGhostNodes; node++)
795:     temp[node] = tri->degrees[mesh->numNodes+nodePerm[node]];
796:   for(node = 0; node < numGhostNodes; node++)
797:     tri->degrees[mesh->numNodes+node] = temp[node];
798:   PetscFree(temp);

800:   PetscMalloc(numGhostNodes*2 * sizeof(double), &temp2);
801:   for(node = 0; node < numGhostNodes; node++) {
802:     temp2[node*2]   = tri->nodes[(mesh->numNodes+nodePerm[node])*2];
803:     temp2[node*2+1] = tri->nodes[(mesh->numNodes+nodePerm[node])*2+1];
804:   }
805:   for(node = 0; node < numGhostNodes; node++) {
806:     tri->nodes[(mesh->numNodes+node)*2]   = temp2[node*2];
807:     tri->nodes[(mesh->numNodes+node)*2+1] = temp2[node*2+1];
808:   }
809:   PetscFree(temp2);
810:   PetscFree(nodePerm);

812: #ifdef PETSC_USE_BOPT_g
813:   /* Consistency check for ghost nodes */
814:   for(node = 0; node < numGhostNodes; node++) {
815:     if ((q->ghostNodes[node] <  firstNode[q->ghostNodeProcs[node]]) ||
816:         (q->ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1])) {
817:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
818:     }
819:   }
820: #endif

822:   /* Cleanup */
823:   PetscFree(sumSendNodes);
824:   PetscFree(sendGhostCoords);
825:   PetscFree(sumSendCoords);
826:   PetscFree(offsets);
827:   if (numSendNodes) {
828:     PetscFree(recvCoords);
829:     PetscFree(recvMarkers);
830:     PetscFree(recvDegrees);
831:   }
832:   PetscFree(sumRecvNodes);
833:   PetscFree(recvGhostCoords);
834:   PetscFree(sumRecvCoords);
835:   if (numRecvNodes) {
836:     PetscFree(sendCoords);
837:     PetscFree(sendMarkers);
838:     PetscFree(sendDegrees);
839:   }
840:   return(0);
841: }

843: #undef  __FUNCT__
845: int PartitionGetNewGhostElements_Serial(Partition p, int *newProcElements, int *newElements)
846: {
847:   int  numLocElements   = p->numLocElements;
848:   int  numGhostElements = p->numOverlapElements - numLocElements;
849:   int  numProcs         = p->numProcs;
850:   int *elemPerm;        /* The new permutation for the sorted ghost elements */
851:   int  numNewElements;  /* Total number of new ghost elements to add */
852:   int *temp;
853:   int  proc, elem, i;
854:   int  ierr;

857:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++)
858:     numNewElements += newProcElements[proc];

860:   /* Add in new ghost elements */
861:   if (numNewElements > 0) {
862:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
863:     PetscMemcpy(temp, p->ghostElements, numGhostElements * sizeof(int));
864:     for(elem = 0; elem < numNewElements; elem++)
865:       temp[numGhostElements+elem] = newElements[elem];
866:     if (p->ghostElements != PETSC_NULL) {
867:       PetscFree(p->ghostElements);
868:     }
869:     p->ghostElements = temp;

871:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
872:     PetscMemcpy(temp, p->ghostElementProcs, numGhostElements * sizeof(int));
873:     for(proc = 0, elem = 0; proc < numProcs; proc++) {
874:       for(i = 0; i < newProcElements[proc]; i++)
875:         temp[numGhostElements+(elem++)] = proc;
876:     }
877:     if (p->ghostElementProcs != PETSC_NULL) {
878:       PetscFree(p->ghostElementProcs);
879:     }
880:     p->ghostElementProcs = temp;

882:     /* Resort ghost elements and remove duplicates */
883:     numGhostElements += numNewElements;
884:     PartitionSortGhosts_Private(p, &numGhostElements, p->ghostElements, &elemPerm);
885:     p->numOverlapElements = numLocElements + numGhostElements;
886:     PetscMalloc(numGhostElements * sizeof(int), &temp);
887:     for(elem = 0; elem < numGhostElements; elem++)
888:       temp[elem] = p->ghostElementProcs[elemPerm[elem]];
889:     PetscFree(p->ghostElementProcs);
890:     p->ghostElementProcs = temp;
891:     PetscFree(elemPerm);
892:   }
893: #ifdef PETSC_USE_BOPT_g
894:   /* Consistency check for ghost elements */
895:   for(elem = 0; elem < numGhostElements; elem++) {
896:     if ((p->ghostElements[elem] <  p->firstElement[p->ghostElementProcs[elem]]) ||
897:         (p->ghostElements[elem] >= p->firstElement[p->ghostElementProcs[elem]+1])) {
898:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost element source processor");
899:     }
900:   }
901: #endif
902:   return(0);
903: }

905: #undef  __FUNCT__
907: /*
908:   PartitionCreateElementMap_METIS - This function creates a map from elements to domains,
909:   using METIS to minimize the cut and approximately balance the sizes.

911:   Input Parameters:
912: + p           - The Partition
913: - numElements - The local number of elements

915:   Output Parameter:
916: . elementMap  - The map from nodes to domains

918: .seealso: PartitionNodes_Private()
919: */
920: int PartitionCreateElementMap_METIS(Partition p, int numElements, int **elementMap)
921: {
922: #ifdef PETSC_HAVE_PARMETIS
923:   Mesh mesh = p->mesh;
924:   int       *elemProcs;     /* The processor assigned to each element */
925:   int       *elemOffsets;   /* The offsets into elemNeighbors of each element row for dual in CSR format */
926:   int       *elemNeighbors; /* The list of element neighbors for dual in CSR format */
927:   int       *edgeWeights;   /* The list of edge weights for dual in CSR format */
928:   int        weight;        /* A weight for constrained nodes */
929:   int        options[5];    /* The option flags for METIS */
930:   PetscTruth opt;
931:   int        ierr;

934:   /* Create the dual graph in distributed CSR format */
935:   weight = 0;
936:   ierr   = PetscOptionsGetInt(mesh->prefix, "-mesh_partition_weight", &weight, &opt);
937:   ierr   = MeshCreateDualCSR(mesh, &elemOffsets, &elemNeighbors, &edgeWeights, weight);

939:   /* Partition graph */
940:   if (numElements != p->numLocElements) {
941:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d for ParMETIS, should be %d", numElements, p->numLocElements);
942:   }
943:   PetscMalloc(numElements * sizeof(int), &elemProcs);
944:   options[0] = 0;   /* Returns the edge cut */
945:   options[1] = 150; /* The folding factor, 0 = no folding */
946:   options[2] = 1;   /* Serial initial partition */
947:   options[3] = 0;   /* C style numbering */
948:   options[4] = 0;   /* No timing information */
949:   PARKMETIS(p->firstElement, elemOffsets, PETSC_NULL, elemNeighbors, PETSC_NULL, elemProcs, options, p->comm);

951:   /* Destroy dual graph */
952:   MeshDestroyDualCSR(mesh, elemOffsets, elemNeighbors, edgeWeights);

954:   *elementMap = elemProcs;
955:   return(0);
956: #else
957:   SETERRQ(PETSC_ERR_SUP, "You must obtain George Karypis' ParMETIS software")
958: #endif
959: }

961: #undef  __FUNCT__
963: /*
964:   PartitionCreateElementMap_NodeBased - This function creates a map from elements to domains,
965:   using a previous partition of the nodes.

967:   Input Parameters:
968: + p           - The Partition
969: - numElements - The global number of nodes

971:   Output Parameter:
972: . elementMap  - The map from nodes to domains

974: .seealso: PartitionNodes_Private()
975: */
976: int PartitionCreateElementMap_NodeBased(Partition p, int numElements, int **elementMap)
977: {
978:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
979:   Mesh                     mesh         = p->mesh;
980:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
981:   int                      numCorners   = mesh->numCorners;
982:   int                     *elements     = tri->faces;
983:   int                     *firstElement = p->firstElement;
984:   int                     *firstNode    = q->firstNode;
985:   int                      numProcs     = p->numProcs;
986:   int                      rank         = p->rank;
987:   int                      startElement = firstElement[rank];
988:   int                      endElement   = firstElement[rank+1];
989:   int                     *elemProcs;     /* The processor assigned to each element */
990:   int                      proc, elem, corner, node;
991:   int                      ierr;

994:   if (numElements != p->numLocElements) {
995:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d should be %d", numElements, p->numLocElements);
996:   }
997:   /* Count elements on this partition -- keep element if you are the lower numbered domain */
998:   PetscMalloc(numElements * sizeof(int), &elemProcs);
999:   for(elem = 0; elem < numElements; elem++) {
1000:     elemProcs[elem] = -1;
1001:   }

1003:   for(elem = startElement; elem < endElement; elem++) {
1004:     for(corner = 0; corner < numCorners; corner++) {
1005:       node = elements[elem*numCorners+corner];
1006:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1007:         /* Get the domain of the node */
1008:         for(proc = 0; proc < numProcs; proc++) {
1009:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1010:         }
1011:         if ((elemProcs[elem-startElement] < 0) || (proc < elemProcs[elem-startElement]))
1012:           elemProcs[elem-startElement] = proc;
1013:       }
1014:     }
1015:     /* If no one else claims it, take the element */
1016:     if (elemProcs[elem-startElement] < 0) {
1017:       elemProcs[elem-startElement] = rank;
1018:     }
1019:   }

1021:   *elementMap = elemProcs;
1022:   return(0);
1023: }

1025: /*
1026:   PartitionCreateElementPartition_Private - This function uses the element map to create
1027:   partition structures for element-based data.

1029:   Input Parameters:
1030: + p              - The Partition
1031: - elementMap     - The map from elements to domains

1033:   Output Parameters:
1034: . ordering       - The new element ordering

1036:   Output Parameters in Partition_Triangular_2D:
1037: + numLocElements - The number of local elements
1038: - firstElement   - The first elemnt in each domain

1040: .seealso: PartitionElements_Private()
1041: */
1042: #undef  __FUNCT__
1044: int PartitionCreateElementPartition_Private(Partition p, int *elementMap, AO *ordering)
1045: {
1046:   int              numLocElements = p->numLocElements; /* Number of local elements before partitioning */
1047:   int             *firstElement   = p->firstElement;
1048:   int              numProcs       = p->numProcs;
1049:   int              rank           = p->rank;
1050:   int             *partSendElements;     /* The number of elements sent to each processor for partitioning */
1051:   int             *sumSendElements;      /* The prefix sums of partSendElements */
1052:   int             *partRecvElements;     /* The number of elements received from each processor for partitioning */
1053:   int             *sumRecvElements;      /* The prefix sums of partRecvElements */
1054:   int             *offsets;              /* The offsets into the send and receive arrays */
1055:   int             *sendBuffer;
1056:   int             *AppOrdering, *PetscOrdering;
1057:   int              proc, elem;
1058:   int              ierr;

1061:   /* Initialize communication */
1062:   PetscMalloc(numProcs * sizeof(int), &partSendElements);
1063:   PetscMalloc(numProcs * sizeof(int), &sumSendElements);
1064:   PetscMalloc(numProcs * sizeof(int), &partRecvElements);
1065:   PetscMalloc(numProcs * sizeof(int), &sumRecvElements);
1066:   PetscMalloc(numProcs * sizeof(int), &offsets);
1067:   PetscMemzero(partSendElements,  numProcs * sizeof(int));
1068:   PetscMemzero(sumSendElements,   numProcs * sizeof(int));
1069:   PetscMemzero(partRecvElements,  numProcs * sizeof(int));
1070:   PetscMemzero(sumRecvElements,   numProcs * sizeof(int));
1071:   PetscMemzero(offsets,           numProcs * sizeof(int));

1073:   /* Get sizes of interior element number blocks to send to each processor */
1074:   for(elem = 0; elem < numLocElements; elem++) {
1075:     partSendElements[elementMap[elem]]++;
1076:   }

1078:   /* Get sizes of interior element number blocks to receive from each processor */
1079:   MPI_Alltoall(partSendElements, 1, MPI_INT, partRecvElements, 1, MPI_INT, p->comm);

1081:   /* Calculate offsets into the interior element number send array */
1082:   for(proc = 1; proc < numProcs; proc++) {
1083:     sumSendElements[proc] = sumSendElements[proc-1] + partSendElements[proc-1];
1084:     offsets[proc]         = sumSendElements[proc];
1085:   }

1087:   /* Calculate offsets into the interior element number receive array */
1088:   for(proc = 1; proc < numProcs; proc++) {
1089:     sumRecvElements[proc] = sumRecvElements[proc-1] + partRecvElements[proc-1];
1090:   }

1092:   /* Send interior element numbers to each processor -- could prevent copying elements already there I think */
1093:   p->numLocElements = sumRecvElements[numProcs-1] + partRecvElements[numProcs-1];
1094:   PetscMalloc(numLocElements    * sizeof(int), &sendBuffer);
1095:   PetscMalloc(p->numLocElements * sizeof(int), &AppOrdering);
1096:   PetscMalloc(p->numLocElements * sizeof(int), &PetscOrdering);
1097:   for(elem = 0; elem < numLocElements; elem++) {
1098:     sendBuffer[offsets[elementMap[elem]]++] = elem + firstElement[rank];
1099:   }
1100:   MPI_Alltoallv(sendBuffer,  partSendElements, sumSendElements, MPI_INT,
1101:                        AppOrdering, partRecvElements, sumRecvElements, MPI_INT, p->comm);
1102: 

1104:   /* If the mesh was intially distributed, we would need to send the elements themselves here */

1106:   /* Recompute size and offset of each domain */
1107:   MPI_Allgather(&p->numLocElements, 1, MPI_INT, &firstElement[1], 1, MPI_INT, p->comm);
1108:   for(proc = 1, firstElement[0] = 0; proc <= numProcs; proc++) {
1109:     firstElement[proc] = firstElement[proc] + firstElement[proc-1];
1110:   }

1112:   /* Create the global element reordering */
1113:   for(elem = 0; elem < p->numLocElements; elem++) {
1114:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1115:     PetscOrdering[elem] = elem + firstElement[rank];
1116:   }

1118:   /* Cleanup */
1119:   PetscFree(partSendElements);
1120:   PetscFree(sumSendElements);
1121:   PetscFree(partRecvElements);
1122:   PetscFree(sumRecvElements);
1123:   PetscFree(offsets);
1124:   PetscFree(sendBuffer);

1126:   /* Create the global element reordering */
1127:   AOCreateBasic(p->comm, p->numLocElements, AppOrdering, PetscOrdering, ordering);
1128:   PetscLogObjectParent(p, p->ordering);

1130:   PetscFree(AppOrdering);
1131:   PetscFree(PetscOrdering);
1132:   return(0);
1133: }

1135: #undef  __FUNCT__
1137: /*
1138:   PartitionPermuteElements_Private - This function permutes the data which is implicitly
1139:   indexed by element number

1141:   Input Parameter:
1142: . p         - The Partition

1144:   Output Parameter in Mesh_Triangular:
1145: + faces     - The nodes on each element
1146: - neighbors - The neighbors of each element

1148: .seealso: PartitionElements_Private()
1149: */
1150: int PartitionPermuteElements_Private(Partition p)
1151: {
1152:   Mesh             mesh = p->mesh;
1153:   Mesh_Triangular *tri  = (Mesh_Triangular *) mesh->data;
1154:   int              ierr;

1157:   AOApplicationToPetscPermuteInt(p->ordering, mesh->numCorners, tri->faces);
1158:   AOApplicationToPetscPermuteInt(p->ordering, 3,                tri->neighbors);
1159:   return(0);
1160: }

1162: #undef  __FUNCT__
1164: /*
1165:   PartitionRenumberElements_Private - This function renumbers the element-based data globally in
1166:   order to make the canonical numbers sequential in each domain.

1168:   Input Parameter:
1169: . p         - The Partition

1171:   Output Parameter in Mesh_Triangular:
1172: . neighbors - The neighbors of each element

1174: .seealso: PartitionElements_Private()
1175: */
1176: int PartitionRenumberElements_Private(Partition p)
1177: {
1178:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
1179:   int                      numElements = p->numElements;
1180:   int                     *neighbors   = tri->neighbors;
1181:   int                      ierr;

1184:   AOApplicationToPetsc(p->ordering, numElements*3, neighbors);
1185:   return(0);
1186: }

1188: #undef  __FUNCT__
1190: /*
1191:   PartitionCalcGhostElements_Private - This function calculates the ghost elements.

1193:   Input Parameters:
1194: . p         - The Partition

1196:   Output Parameters in Partition_Triangular_2D:

1198: .seealso: PartitionElements_Private()
1199: */
1200: int PartitionCalcGhostElements_Private(Partition p)
1201: {
1202:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
1203:   Mesh                     mesh         = p->mesh;
1204:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
1205:   int                      numCorners   = mesh->numCorners;
1206:   int                      numElements  = p->numElements;
1207:   int                     *elements     = tri->faces;
1208:   int                     *neighbors    = tri->neighbors;
1209:   int                     *firstElement = p->firstElement;
1210:   int                      numProcs     = p->numProcs;
1211:   int                      rank         = p->rank;
1212:   int                      numLocNodes  = q->numLocNodes;
1213:   int                      startNode    = q->firstNode[rank];
1214:   PetscTruth               isNodePart   = q->isNodePartitioned;
1215:   int                     *newProcElements; /* The number of new ghost elements from each processor */
1216:   int                      numNewElements;  /* The number of new ghost elements */
1217:   int                     *newElements;     /* The new ghost elements */
1218:   int                     *offsets;         /* The offsets into the send and receive arrays */
1219:   int                      degree;          /* The degree of a vertex */
1220:   int                     *support;         /* The list of elements in the support of a basis function */
1221:   int                     *elemMap;
1222:   int                      proc, elem, bElem, sElem, nElem, corner, neighbor, node;
1223:   int                      ierr;

1226:   PetscMalloc(numElements  * sizeof(int), &elemMap);
1227:   PetscMalloc(numProcs     * sizeof(int), &newProcElements);
1228:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1229:   PetscMemzero(newProcElements,  numProcs     * sizeof(int));
1230:   PetscMemzero(offsets,          (numProcs+1) * sizeof(int));
1231:   for(elem = 0; elem < numElements; elem++) {
1232:     elemMap[elem] = -1;
1233:   }
1234:   for(elem = 0; elem < numElements; elem++) {
1235:     if ((elem >= firstElement[rank]) && (elem < firstElement[rank+1])) {
1236:       /* Find a boundary element */
1237:       for(neighbor = 0; neighbor < 3; neighbor++) {
1238:         bElem = neighbors[elem*3+neighbor];
1239:         if ((bElem >= 0) && ((bElem < firstElement[rank]) || (bElem >= firstElement[rank+1])))
1240:           break;
1241:       }

1243:       if (neighbor < 3) {
1244:         /* Check the support of each vertex for off-processor elements */
1245:         for(corner = 0; corner < numCorners; corner++) {
1246:           node = elements[elem*numCorners+corner];
1247:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1248:           for(sElem = 0; sElem < degree; sElem++) {
1249:             nElem = support[sElem];
1250:             if (elemMap[nElem] >= 0) continue;
1251:             for(proc = 0; proc < numProcs; proc++) {
1252:               if ((proc != rank) && (nElem >= firstElement[proc]) && (nElem < firstElement[proc+1])) {
1253:                 elemMap[nElem] = proc;
1254:                 break;
1255:               }
1256:             }
1257:           }
1258:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
1259:         }
1260:       }
1261:     } else if (isNodePart == PETSC_TRUE) {
1262:       if (elemMap[elem] >= 0) continue;
1263:       /* We may also need elements on which we have nodes, but are not attached to */
1264:       for(corner = 0; corner < numCorners; corner++) {
1265:         node = elements[elem*numCorners+corner] - startNode;
1266:         if ((node >= 0) && (node < numLocNodes)) {
1267:           for(proc = 0; proc < numProcs; proc++) {
1268:             if ((elem >= firstElement[proc]) && (elem < firstElement[proc+1])) {
1269:               elemMap[elem] = proc;
1270:               break;
1271:             }
1272:           }
1273:         }
1274:       }
1275:     }
1276:   }

1278:   /* Compute new ghost element offsets */
1279:   for(elem = 0; elem < numElements; elem++) {
1280:     if (elemMap[elem] >= 0) {
1281:       newProcElements[elemMap[elem]]++;
1282:     }
1283:   }
1284:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++) {
1285:     numNewElements  += newProcElements[proc];
1286:     offsets[proc+1]  = offsets[proc] + newProcElements[proc];
1287:   }

1289:   /* Get ghost nodes */
1290:   if (numNewElements > 0) {
1291:     PetscMalloc(numNewElements * sizeof(int), &newElements);
1292:     for(elem = 0; elem < numElements; elem++) {
1293:       if (elemMap[elem] >= 0) {
1294:         newElements[offsets[elemMap[elem]]++] = elem;
1295:       }
1296:     }
1297:   }
1298:   for(proc = 1; proc < numProcs-1; proc++) {
1299:     if (offsets[proc] - offsets[proc-1]  != newProcElements[proc]) {
1300:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc %d should be %d",
1301:                offsets[proc] - offsets[proc-1], proc, newProcElements[proc]);
1302:     }
1303:   }
1304:   if (offsets[0] != newProcElements[0]) {
1305:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc 0 should be %d",
1306:              offsets[0], newProcElements[0]);
1307:   }

1309:   /* Add new ghosts */
1310:   p->numOverlapElements = p->numLocElements;
1311:   PartitionGetNewGhostElements_Serial(p, newProcElements, newElements);

1313:   /* Cleanup */
1314:   PetscFree(elemMap);
1315:   PetscFree(newProcElements);
1316:   PetscFree(offsets);
1317:   if (numNewElements > 0) {
1318:     PetscFree(newElements);
1319:   }
1320:   return(0);
1321: }

1323: #undef  __FUNCT__
1325: /*
1326:   PartitionDistributeElements_Private - This function distributes the element-based data, and
1327:   permutes arrays which are implicitly indexed by element number.

1329:   Input Parameters:
1330: . p         - The Partition

1332:   Output Parameters in Mesh_Triangular:
1333: + faces     - The nodes on each element
1334: - neighbors - The element neighbors

1336: .seealso: PartitionElements_Private()
1337: */
1338: int PartitionDistributeElements_Private(Partition p)
1339: {
1340:   Mesh             mesh               = p->mesh;
1341:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1342:   int              numLocElements     = p->numLocElements;
1343:   int              numOverlapElements = p->numOverlapElements;
1344:   int              numGhostElements   = numOverlapElements - numLocElements;
1345:   int              numCorners         = mesh->numCorners;
1346:   int             *firstElement       = p->firstElement;
1347:   int             *ghostElements      = p->ghostElements;
1348:   int              rank               = p->rank;
1349:   int             *temp;
1350:   int              elem, corner, neighbor;
1351:   int              ierr;

1353:   /* Note here that we can use PetscMemcpy() for the interior variables because we already permuted the
1354:      arrays so that ghost elements could be computed.
1355:   */
1357:   mesh->numFaces = numLocElements;
1358:   PetscMalloc(numOverlapElements*numCorners * sizeof(int), &temp);
1359:   /* Interior faces */
1360:   PetscMemcpy(temp, &tri->faces[firstElement[rank]*numCorners], numLocElements*numCorners * sizeof(int));
1361:   /* Ghost faces */
1362:   for(elem = 0; elem < numGhostElements; elem++) {
1363:     for(corner = 0; corner < numCorners; corner++) {
1364:       temp[(numLocElements+elem)*numCorners+corner] = tri->faces[ghostElements[elem]*numCorners+corner];
1365:     }
1366:   }
1367:   PetscFree(tri->faces);
1368:   tri->faces = temp;
1369:   PetscLogObjectMemory(p, numGhostElements*numCorners * sizeof(int));

1371:   PetscMalloc(numOverlapElements*3 * sizeof(int), &temp);
1372:   /* Interior neighbors */
1373:   PetscMemcpy(temp, &tri->neighbors[firstElement[rank]*3], numLocElements*3 * sizeof(int));
1374:   /* Ghost neighbors */
1375:   for(elem = 0; elem < numGhostElements; elem++) {
1376:     for(neighbor = 0; neighbor < 3; neighbor++) {
1377:       temp[(numLocElements+elem)*3+neighbor] = tri->neighbors[ghostElements[elem]*3+neighbor];
1378:     }
1379:   }
1380:   PetscFree(tri->neighbors);
1381:   tri->neighbors = temp;
1382:   PetscLogObjectMemory(p, numGhostElements*3 * sizeof(int));

1384:   return(0);
1385: }

1387: #undef  __FUNCT__
1389: int PartitionElementGlobalToLocal_Private(Partition p)
1390: {
1391:   Mesh_Triangular *tri                = (Mesh_Triangular *) p->mesh->data;
1392:   int              numOverlapElements = p->numOverlapElements;
1393:   int             *neighbors          = tri->neighbors;
1394:   int              neighbor;
1395:   int              ierr;

1398:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
1399:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
1400:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
1401:   }
1402:   return(0);
1403: }

1405: #undef  __FUNCT__
1407: int PartitionGetNewGhostNodes_Element(Partition p)
1408: {
1409:   Partition_Triangular_2D *q                = (Partition_Triangular_2D *) p->data;
1410:   Mesh                     mesh             = p->mesh;
1411:   Mesh_Triangular         *tri              = (Mesh_Triangular *) mesh->data;
1412:   int                      numCorners       = mesh->numCorners;
1413:   int                     *elements         = tri->faces;
1414:   int                     *firstElement     = p->firstElement;
1415:   int                      numGhostElements = p->numOverlapElements - p->numLocElements;
1416:   int                     *ghostElements    = p->ghostElements;
1417:   int                      numNodes         = q->numNodes;
1418:   int                     *firstNode        = q->firstNode;
1419:   int                      numProcs         = p->numProcs;
1420:   int                      rank             = p->rank;
1421:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1422:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1423:   int                     *newNodes;     /* New ghost nodes for this domain */
1424:   int                     *offsets;      /* The offsets into newNodes[] */
1425:   int                     *nodeMap;      /* The map of nodes to processors */
1426:   int                      proc, elem, corner, node, gNode;
1427:   int                      ierr;

1430:   if (q->isNodePartitioned == PETSC_FALSE)
1431:     return(0);
1432:   /* Initialize communication */
1433:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1434:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1435:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1436:   PetscMemzero(newProcNodes, numProcs      * sizeof(int));
1437:   PetscMemzero(offsets,     (numProcs+1)   * sizeof(int));
1438:   for(node = 0; node < numNodes; node++) {
1439:     nodeMap[node] = -1;
1440:   }

1442:   /* Check for new ghost nodes */
1443:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1444:     for(corner = 0; corner < numCorners; corner++) {
1445:       node = elements[elem*numCorners+corner];
1446:       if (nodeMap[node] >= 0) continue;

1448:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1449:         /* Get the domain of the node */
1450:         for(proc = 0; proc < numProcs; proc++) {
1451:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1452:         }
1453:         /* Check for the presence of this node */
1454:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1455:           nodeMap[node] = proc;
1456:         }
1457:       }
1458:     }
1459:   }
1460:   for(elem = 0; elem < numGhostElements; elem++) {
1461:     for(corner = 0; corner < numCorners; corner++) {
1462:       node = elements[ghostElements[elem]*numCorners+corner];
1463:       if (nodeMap[node] >= 0) continue;

1465:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1466:         /* Get the domain of the node */
1467:         for(proc = 0; proc < numProcs; proc++) {
1468:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1469:         }
1470:         /* Check for the presence of this node */
1471:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1472:           nodeMap[node] = proc;
1473:         }
1474:       }
1475:     }
1476:   }

1478:   /* Compute new ghost node offsets */
1479:   for(node = 0; node < numNodes; node++) {
1480:     if (nodeMap[node] >= 0) {
1481:       newProcNodes[nodeMap[node]]++;
1482:     }
1483:   }
1484:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1485:     numNewNodes   += newProcNodes[proc];
1486:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1487:   }

1489:   /* Get ghost nodes */
1490:   if (numNewNodes > 0) {
1491:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1492:     for(node = 0; node < numNodes; node++) {
1493:       if (nodeMap[node] >= 0) {
1494:         newNodes[offsets[nodeMap[node]]++] = node;
1495:       }
1496:     }
1497:   }
1498:   for(proc = 1; proc < numProcs-1; proc++) {
1499:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1500:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1501:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1502:     }
1503:   }
1504:   if (offsets[0] != newProcNodes[0]) {
1505:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1506:   }

1508:   /* Add new ghosts */
1509:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1511:   /* Cleanup */
1512:   PetscFree(nodeMap);
1513:   PetscFree(newProcNodes);
1514:   PetscFree(offsets);
1515:   if (numNewNodes) {
1516:     PetscFree(newNodes);
1517:   }
1518:   return(0);
1519: }

1521: #undef  __FUNCT__
1523: int PartitionGetNewGhostNodes_Edge(Partition p)
1524: {
1525:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1526:   Mesh_Triangular         *tri       = (Mesh_Triangular *) p->mesh->data;
1527:   int                     *edges     = tri->edges;
1528:   int                     *firstEdge = q->firstEdge;
1529:   int                      numNodes  = q->numNodes;
1530:   int                     *firstNode = q->firstNode;
1531:   int                      numProcs  = p->numProcs;
1532:   int                      rank      = p->rank;
1533:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1534:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1535:   int                     *newNodes;     /* New ghost nodes for this domain */
1536:   int                     *offsets;      /* The offsets into newNodes[] */
1537:   int                     *nodeMap;      /* The map of nodes to processors */
1538:   int                      proc, edge, node, startNode, endNode, ghostNode, gNode;
1539:   int                      ierr;

1542:   /* Initialize communication */
1543:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1544:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1545:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1546:   PetscMemzero(newProcNodes, numProcs    * sizeof(int));
1547:   PetscMemzero(offsets,     (numProcs+1) * sizeof(int));
1548:   for(node = 0; node < numNodes; node++) {
1549:     nodeMap[node] = -1;
1550:   }

1552:   /* Check for new ghost nodes */
1553:   for(edge = firstEdge[rank]; edge < firstEdge[rank+1]; edge++) {
1554:     /* Check for new ghost node */
1555:     startNode = edges[edge*2];
1556:     endNode   = edges[edge*2+1];
1557:     ghostNode = -1;
1558:     if ((startNode < firstNode[rank]) || (startNode >= firstNode[rank+1])) {
1559:       ghostNode = startNode;
1560:     } else if ((endNode < firstNode[rank]) || (endNode >= firstNode[rank+1])) {
1561:       ghostNode = endNode;
1562:     }
1563:     if (ghostNode >= 0) {
1564:       /* Get the domain of the node */
1565:       for(proc = 0; proc < numProcs; proc++) {
1566:         if ((ghostNode >= firstNode[proc]) && (ghostNode < firstNode[proc+1])) break;
1567:       }
1568:       /* Check for the presence of this ghost node */
1569:       if (PartitionGhostNodeIndex_Private(p, ghostNode, &gNode) && ((nodeMap[ghostNode] < 0) || (proc < nodeMap[ghostNode]))) {
1570:         /* We must add this node as a ghost node */
1571:         nodeMap[ghostNode] = proc;
1572:       }
1573:     }
1574:   }

1576:   /* Compute new ghost node offsets */
1577:   for(node = 0; node < numNodes; node++) {
1578:     if (nodeMap[node] >= 0) {
1579:       newProcNodes[nodeMap[node]]++;
1580:     }
1581:   }
1582:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1583:     numNewNodes   += newProcNodes[proc];
1584:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1585:   }

1587:   /* Get ghost nodes */
1588:   if (numNewNodes > 0) {
1589:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1590:     for(node = 0; node < numNodes; node++) {
1591:       if (nodeMap[node] >= 0) {
1592:         newNodes[offsets[nodeMap[node]]++] = node;
1593:       }
1594:     }
1595:   }
1596:   for(proc = 1; proc < numProcs-1; proc++) {
1597:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1598:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1599:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1600:     }
1601:   }
1602:   if (offsets[0] != newProcNodes[0]) {
1603:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1604:   }

1606:   /* Add new ghosts */
1607:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1609:   /* Cleanup */
1610:   PetscFree(nodeMap);
1611:   PetscFree(newProcNodes);
1612:   PetscFree(offsets);
1613:   if (numNewNodes) {
1614:     PetscFree(newNodes);
1615:   }
1616:   return(0);
1617: }

1619: #undef  __FUNCT__
1621: int PartitionElements_Private(Partition p)
1622: {
1623:   int (*f)(Partition, int, int **);
1624:   int  *elementMap; /* The map from elements to domains */
1625:   int   ierr;

1628:   /* Create a new map of elements to domains */
1629:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap", (void (**)(void)) &f);
1630:   (*f)(p, p->numLocElements, &elementMap);

1632: #if 0
1633:   /* Communicate interior elements */
1634:   GridInteriorExchange(numLocElements, elementMap, p->firstElement);
1635: #endif
1636:   /* Create the element partition */
1637:   PartitionCreateElementPartition_Private(p, elementMap, &p->ordering);
1638:   PetscFree(elementMap);

1640:   /* Permute arrays implicitly indexed by element number */
1641:   PartitionPermuteElements_Private(p);

1643:   /* Globally renumber the elements to make canonical numbers sequential in each domain */
1644:   PartitionRenumberElements_Private(p);

1646:   /* Calculate ghosts */
1647:   PartitionCalcGhostElements_Private(p);
1648: 
1649:   /* Check for new ghost nodes created by the element partition */
1650:   PartitionGetNewGhostNodes_Element(p);

1652:   p->isElementPartitioned = PETSC_TRUE;
1653:   return(0);
1654: }

1656: #undef  __FUNCT__
1658: /*
1659:   PartitionCreateNodeMap_Simple_Seq - This function creates a map from nodes to domains,
1660:   using a reordering of the serial mesh to reduce the bandwidth.

1662:   Input Parameters:
1663: + p        - The Partition
1664: - numNodes - The global number of nodes

1666:   Output Parameter:
1667: . nodeMap  - The map from nodes to domains

1669: .seealso: PartitionNodes_Private()
1670: */
1671: int PartitionCreateNodeMap_Simple_Seq(Partition p, int numNodes, int **nodeMap)
1672: {
1673:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1674:   int                     *firstNode = q->firstNode;
1675:   int                      rank      = p->rank;
1676:   int                     *nodeProcs; /* The processor which each node will lie on */
1677:   int                      node;
1678:   int                      ierr;

1681:   /* Use the existing interior nodes */
1682:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1683:   for(node = 0; node < numNodes; node++) {
1684:     nodeProcs[node] = -1;
1685:   }
1686:   for(node = firstNode[rank]; node < firstNode[rank+1]; node++) {
1687:     nodeProcs[node] = rank;
1688:   }

1690:   *nodeMap = nodeProcs;
1691:   return(0);
1692: }

1694: #undef  __FUNCT__
1696: /*
1697:   PartitionCreateNodeMap_ElementBased - This function creates a map from nodes to domains,
1698:   based upon a prior partition of the elements.

1700:   Input Parameters:
1701: + p        - The Partition
1702: - numNodes - The global number of nodes

1704:   Output Parameter:
1705: . nodeMap  - The map from nodes to domains

1707: .seealso: PartitionNodes_Private()
1708: */
1709: int PartitionCreateNodeMap_ElementBased(Partition p, int numNodes, int **nodeMap)
1710: {
1711:   Mesh             mesh               = p->mesh;
1712:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1713:   int              numLocElements     = p->numLocElements;
1714:   int              numGhostElements   = p->numOverlapElements - p->numLocElements;
1715:   int              numCorners         = mesh->numCorners;
1716:   int             *elements           = tri->faces;
1717:   int             *firstElement       = p->firstElement;
1718:   int             *ghostElements      = p->ghostElements;
1719:   int             *ghostElementProcs  = p->ghostElementProcs;
1720:   int              rank               = p->rank;
1721:   int             *nodeProcs;     /* The processor which each node will lie on */
1722:   int             *support;
1723:   int              nProc, elem, sElem, nElem, nLocElem, gElem, corner, nCorner, node, degree;
1724:   int              ierr;

1727:   /* Count nodes on this partition -- keep node if you are the lower numbered domain */
1728:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1729:   for(node = 0; node < numNodes; node++) {
1730:     nodeProcs[node] = -1;
1731:   }

1733:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1734:     for(corner = 0; corner < numCorners; corner++) {
1735:       node = elements[elem*numCorners+corner];

1737:       /* Check the support of the node */
1738:       MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1739:       for(sElem = 0; sElem < degree; sElem++) {
1740:         nElem = support[sElem];
1741:         /* See if neighbor is in another domain */
1742:         if ((nElem < firstElement[rank]) || (nElem >= firstElement[rank+1])) {
1743:           /* Check to see if node is contained in the neighboring element */
1744:           for(nCorner = 0; nCorner < numCorners; nCorner++) {
1745:             if (elements[nElem*numCorners+nCorner] == node) {
1746:               ierr  = PartitionGlobalToLocalElementIndex(p, nElem, &nLocElem);
1747:               nProc = ghostElementProcs[nLocElem-numLocElements];
1748:               /* Give the node to the lowest numbered domain */
1749:               if ((nProc < rank) && ((nodeProcs[node] < 0) || (nProc < nodeProcs[node]))) {
1750:                 nodeProcs[node] = nProc;
1751:               }
1752:               break;
1753:             }
1754:           }
1755:         }
1756:       }
1757:       MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);

1759:       /* If no one else claims it, take the node */
1760:       if (nodeProcs[node] < 0) {
1761:         nodeProcs[node] = rank;
1762:       }
1763:     }
1764:   }

1766:   /* Now assign the ghost nodes from ghost elements (which we can never own) */
1767:   for(gElem = 0; gElem < numGhostElements; gElem++) {
1768:     for(corner = 0; corner < numCorners; corner++) {
1769:       node = elements[ghostElements[gElem]*numCorners+corner];
1770:       if (nodeProcs[node] < 0)
1771:         nodeProcs[node] = ghostElementProcs[gElem];
1772:     }
1773:   }

1775:   *nodeMap = nodeProcs;
1776:   return(0);
1777: }

1779: /*
1780:   PartitionCreateNodePartition_Private - This function uses the node map to create
1781:   partition structures for node-based data.

1783:   Input Parameters:
1784: + p               - The Partition
1785: - nodeMap         - The map from nodes to domains

1787:   Output Parameter:
1788: . ordering        - The new node ordering

1790:   Output Parameters in Partition_Triangular_2D:
1791: + numLocNodes     - The number of local nodes
1792: . numOverlapNodes - The number of local + ghost nodes
1793: . firstNode       - The first node in each domain
1794: - ghostNodes      - The global number for each ghost node

1796: .seealso: PartitionNodes_Private()
1797: */
1798: #undef  __FUNCT__
1800: int PartitionCreateNodePartition_Private(Partition p, int *nodeMap, AO *ordering)
1801: {
1802:   Partition_Triangular_2D *q        = (Partition_Triangular_2D *) p->data;
1803:   int                      numNodes = q->numNodes;
1804:   int                      numProcs = p->numProcs;
1805:   int                      rank     = p->rank;
1806:   int                      numGhostNodes; /* Number of ghost nodes for this domain */
1807:   int                     *AppOrdering, *PetscOrdering;
1808:   int                      proc, node, index, index2;
1809:   int                      ierr;

1812:   /* Determine local and ghost sizes */
1813:   for(node = 0, q->numLocNodes = 0, numGhostNodes = 0; node < numNodes; node++) {
1814:     if (nodeMap[node] == rank) {
1815:       q->numLocNodes++;
1816:     } else if (nodeMap[node] >= 0) {
1817:       numGhostNodes++;
1818:     }
1819:   }

1821:   /* Recompute size and offset of each domain */
1822:   MPI_Allgather(&q->numLocNodes, 1, MPI_INT, &q->firstNode[1], 1, MPI_INT, p->comm);
1823:   for(proc = 1, q->firstNode[0] = 0; proc <= numProcs; proc++) {
1824:     q->firstNode[proc] = q->firstNode[proc] + q->firstNode[proc-1];
1825:   }
1826:   if (q->firstNode[numProcs] != numNodes) {
1827:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of nodes %d should be %d", q->firstNode[numProcs], numNodes);
1828:   }

1830:   /* Setup ghost node structures */
1831:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;
1832:   if (numGhostNodes > 0) {
1833:     PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
1834:   }

1836:   /* Get indices for reordering */
1837:   PetscMalloc(q->numLocNodes * sizeof(int), &AppOrdering);
1838:   PetscMalloc(q->numLocNodes * sizeof(int), &PetscOrdering);
1839:   for(node = 0; node < q->numLocNodes; node++) {
1840:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1841:     PetscOrdering[node] = q->firstNode[rank] + node;
1842:   }
1843:   for(node = 0, index = 0, index2 = 0; node < numNodes; node++) {
1844:     if (nodeMap[node] == rank) {
1845:       AppOrdering[index++]    = node;
1846:     } else if (nodeMap[node] >= 0) {
1847:       q->ghostNodes[index2++] = node;
1848:     }
1849:   }
1850:   if (index  != q->numLocNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node renumbering");
1851:   if (index2 != numGhostNodes)  SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node renumbering");

1853:   /* Create the global node reordering */
1854:   AOCreateBasic(p->comm, q->numLocNodes, AppOrdering, PetscOrdering, ordering);
1855:   if (ierr) {
1856:     PartitionDebugAO_Private(p, nodeMap);
1857:   }

1859:   PetscFree(AppOrdering);
1860:   PetscFree(PetscOrdering);
1861:   return(0);
1862: }

1864: #undef  __FUNCT__
1866: /*
1867:   PartitionPermuteNodes_Private - This function permutes the data which is implicitly
1868:   indexed by node number

1870:   Input Parameter:
1871: . p       - The Partition

1873:   Output Parameter in Mesh_Triangular:
1874: + nodes   - The coordinates on each node
1875: . markers - The node markers
1876: - degrees - The degree of each node

1878: .seealso: PartitionNodes_Private()
1879: */
1880: int PartitionPermuteNodes_Private(Partition p)
1881: {
1882:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
1883:   Mesh                     mesh = p->mesh;
1884:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
1885:   int                      ierr;

1888:   AOApplicationToPetscPermuteReal(q->nodeOrdering, mesh->dim, tri->nodes);
1889:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->markers);
1890:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->degrees);
1891:   return(0);
1892: }

1894: #undef  __FUNCT__
1896: /*
1897:   PartitionRenumberNodes_Private - This function renumbers the node-based data globally in
1898:   order to make the canonical numbers sequential in each domain.

1900:   Input Parameter:
1901: . p          - The Partition

1903:   Output Parameters in Mesh_Triangular:
1904: + faces      - The nodes in each element
1905: . edges      - The nodes on each edge
1906: - bdNodes    - The nodes on each boundary

1908:   Output Parameter in Partition_Triangular_2D:
1909: . ghostNodes - The global number of each ghost node

1911: .seealso: PartitionNodes_Private()
1912: */
1913: int PartitionRenumberNodes_Private(Partition p)
1914: {
1915:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1916:   Mesh                     mesh          = p->mesh;
1917:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
1918:   int                      numElements   = p->numElements;
1919:   int                      numCorners    = mesh->numCorners;
1920:   int                     *faces         = tri->faces;
1921:   int                      numEdges      = q->numEdges;
1922:   int                     *edges         = tri->edges;
1923:   int                      numBdNodes    = q->numBdNodes;
1924:   int                     *bdNodes       = tri->bdNodes;
1925:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1926:   int                     *ghostNodes    = q->ghostNodes;
1927:   int                      ierr;

1930:   AOApplicationToPetsc(q->nodeOrdering, numEdges*2,             edges);
1931:   AOApplicationToPetsc(q->nodeOrdering, numElements*numCorners, faces);
1932:   AOApplicationToPetsc(q->nodeOrdering, numBdNodes,             bdNodes);
1933:   AOApplicationToPetsc(q->nodeOrdering, numGhostNodes,          ghostNodes);
1934:   return(0);
1935: }

1937: #undef  __FUNCT__
1939: /*
1940:   PartitionDistributeNodes_Private - This function distributes the node-based data, and
1941:   permutes arrays which are implicitly indexed by node number.

1943:   Input Parameter:
1944: . p       - The Partition

1946:   Output Parameters in Mesh_Triangular:
1947: + nodes   - The node coordinates
1948: . markers - The node markers
1949: - degrees - The node degrees

1951: .seealso: PartitionNodes_Private()
1952: */
1953: int PartitionDistributeNodes_Private(Partition p)
1954: {
1955:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
1956:   Mesh                     mesh            = p->mesh;
1957:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
1958:   int                      dim             = mesh->dim;
1959:   int                      numLocNodes     = q->numLocNodes;
1960:   int                      numOverlapNodes = q->numOverlapNodes;
1961:   int                      numGhostNodes   = numOverlapNodes - numLocNodes;
1962:   int                     *firstNode       = q->firstNode;
1963:   int                     *ghostNodes      = q->ghostNodes;
1964:   int                      rank            = p->rank;
1965:   int                     *temp;
1966:   double                  *temp2;
1967:   int                      node, c;
1968:   int                      ierr;

1971:   mesh->numNodes = numLocNodes;
1972:   PetscMalloc(numOverlapNodes*dim * sizeof(double), &temp2);
1973:   /* Interior nodes */
1974:   PetscMemcpy(temp2, &tri->nodes[firstNode[rank]*dim], numLocNodes*dim * sizeof(double));
1975:   /* Ghost nodes */
1976:   for(node = 0; node < numGhostNodes; node++) {
1977:     for(c = 0; c < dim; c++) {
1978:       temp2[(numLocNodes+node)*dim+c] = tri->nodes[ghostNodes[node]*dim+c];
1979:     }
1980:   }
1981:   PetscFree(tri->nodes);
1982:   tri->nodes = temp2;
1983:   PetscLogObjectMemory(p, numGhostNodes*dim * sizeof(double));

1985:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1986:   /* Interior markers */
1987:   PetscMemcpy(temp, &tri->markers[firstNode[rank]], numLocNodes * sizeof(int));
1988:   /* Ghost markers */
1989:   for(node = 0; node < numGhostNodes; node++) {
1990:     temp[numLocNodes+node] = tri->markers[ghostNodes[node]];
1991:   }
1992:   PetscFree(tri->markers);
1993:   tri->markers = temp;
1994:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1996:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1997:   /* Interior degrees */
1998:   PetscMemcpy(temp, &tri->degrees[firstNode[rank]], numLocNodes * sizeof(int));
1999:   /* Ghost degrees */
2000:   for(node = 0; node < numGhostNodes; node++) {
2001:     temp[numLocNodes+node] = tri->degrees[ghostNodes[node]];
2002:   }
2003:   PetscFree(tri->degrees);
2004:   tri->degrees = temp;
2005:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

2007:   return(0);
2008: }

2010: #undef  __FUNCT__
2012: /*
2013:   PartitionNodeCalcGhostProcs_Private - This function determines the processor from
2014:   which each ghost node comes.

2016:   Input Parameter:
2017: . p              - The Partition

2019:   Output Parameter in Partition_Triangular_2D:
2020: . ghostNodeProcs - The domain of each ghost node

2022: .seealso: PartitionNodes_Private()
2023: */
2024: int PartitionNodeCalcGhostProcs_Private(Partition p)
2025: {
2026:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2027:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2028:   int                     *ghostNodes    = q->ghostNodes;
2029:   int                     *firstNode     = q->firstNode;
2030:   int                      numProcs      = p->numProcs;
2031:   int                     *nodePerm;
2032:   int                      proc, node;
2033:   int                      ierr;

2036:   if (numGhostNodes == 0)
2037:     return(0);

2039:   /* Resort ghost nodes after renumbering */
2040:   PartitionSortGhosts_Private(p, &numGhostNodes, ghostNodes, &nodePerm);
2041:   PetscFree(nodePerm);
2042:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;

2044:   /* calculate ghost node domains */
2045:   PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2046:   for(node = 0; node < numGhostNodes; node++) {
2047:     for(proc = 0; proc < numProcs; proc++) {
2048:       if ((ghostNodes[node] >= firstNode[proc]) && (ghostNodes[node] <  firstNode[proc+1])) {
2049:         q->ghostNodeProcs[node] = proc;
2050:         break;
2051:       }
2052:     }
2053:     if (proc == numProcs) SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d, global number %d", node, ghostNodes[node]);
2054:   }
2055: #ifdef PETSC_USE_BOPT_g
2056:   for(node = 0; node < numGhostNodes; node++) {
2057:     if ((ghostNodes[node] < firstNode[q->ghostNodeProcs[node]]) || (ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1]))
2058:       SETERRQ2(PETSC_ERR_LIB, "Invalid source processor %d on ghost node %d", q->ghostNodeProcs[node], node);
2059:   }
2060: #endif
2061:   return(0);
2062: }

2064: #undef  __FUNCT__
2066: int PartitionNodes_Private(Partition p)
2067: {
2068:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2069:   int                    (*f)(Partition, int, int **);
2070:   int                     *nodeMap; /* The map from nodes to domains */
2071:   int                      ierr;

2074:   /* Create a new map of nodes to domains */
2075:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap", (void (**)(void)) &f);
2076:   (*f)(p, q->numNodes, &nodeMap);

2078:   /* Create the node partition */
2079:   PartitionCreateNodePartition_Private(p, nodeMap, &q->nodeOrdering);
2080:   PetscFree(nodeMap);

2082:   /* Permute arrays implicitly indexed by node number */
2083:   PartitionPermuteNodes_Private(p);

2085:   /* Globally renumber the nodes to make canonical numbers sequential in each domain */
2086:   /* WARNING: We must resort ghost nodes after renumbering, but this is done anyway in edge partitioning */
2087:   PartitionRenumberNodes_Private(p);

2089:   /* Assign ghost node source processors */
2090:   PartitionNodeCalcGhostProcs_Private(p);

2092:   q->isNodePartitioned = PETSC_TRUE;
2093:   return(0);
2094: }

2096: #undef  __FUNCT__
2098: /*
2099:   PartitionCreateEdgeMap_NodeBased - This function creates a map from edges to domains,
2100:   using a previous partition of the nodes.

2102:   Input Parameters:
2103: + p        - The Partition
2104: - numEdges - The global number of edges

2106:   Output Parameter:
2107: . edgeMap  - The map from edges to domains

2109: .seealso: PartitionEdges_Private()
2110: */
2111: int PartitionCreateEdgeMap_NodeBased(Partition p, int numEdges, int **edgeMap)
2112: {
2113:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
2114:   Mesh                     mesh      = p->mesh;
2115:   Mesh_Triangular         *tri       = (Mesh_Triangular *) mesh->data;
2116:   int                     *edges     = tri->edges;
2117:   int                     *firstNode = q->firstNode;
2118:   int                      numProcs  = p->numProcs;
2119:   int                      rank      = p->rank;
2120:   int                      startProc = -1;
2121:   int                      endProc   = -1;
2122:   int                     *edgeProcs;     /* The processor assigned to each edge */
2123:   int                      proc, edge, startNode, endNode;
2124:   int                      ierr;

2127:   PetscMalloc(numEdges * sizeof(int), &edgeProcs);
2128:   for(edge = 0; edge < numEdges; edge++) {
2129:     edgeProcs[edge] = -1;
2130:   }

2132:   /* Count edges on this partition -- keep edge if you are the lower numbered domain */
2133:   for(edge = 0; edge < numEdges; edge++) {
2134:     startNode = edges[edge*2];
2135:     endNode   = edges[edge*2+1];

2137:     if ((startNode >= firstNode[rank]) && (startNode < firstNode[rank+1])) {
2138:       /* startNode is local */
2139:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2140:         /* endNode is local */
2141:         edgeProcs[edge] = rank;
2142:       } else {
2143:         /* endNode is not local */
2144:         for(proc = 0; proc < numProcs; proc++) {
2145:           if ((endNode >= firstNode[proc]) && (endNode < firstNode[proc+1])) {
2146:             endProc = proc;
2147:             break;
2148:           }
2149:         }
2150:         if (rank < endProc) {
2151:           edgeProcs[edge] = rank;
2152:         }
2153:       }
2154:     } else {
2155:       /* startNode is not local */
2156:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2157:         /* endNode is local */
2158:         for(proc = 0; proc < numProcs; proc++) {
2159:           if ((startNode >= firstNode[proc]) && (startNode < firstNode[proc+1])) {
2160:             startProc = proc;
2161:             break;
2162:           }
2163:         }
2164:         if (rank < startProc) {
2165:           edgeProcs[edge] = rank;
2166:         }
2167:       }
2168:     }
2169:   }

2171:   *edgeMap = edgeProcs;
2172:   return(0);
2173: }

2175: #undef  __FUNCT__
2177: int PartitionCreateEdgePartition_Private(Partition p, int *edgeMap, AO *ordering)
2178: {
2179:   Mesh                     mesh          = p->mesh;
2180:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2181:   int                      numEdges      = mesh->numEdges;
2182:   int                      numProcs      = p->numProcs;
2183:   int                      rank          = p->rank;
2184:   int                     *AppOrdering   = PETSC_NULL;
2185:   int                     *PetscOrdering = PETSC_NULL;
2186:   int                      proc, edge, index;
2187:   int                      ierr;

2190:   /* Determine local edges and new ghost nodes */
2191:   for(edge = 0, q->numLocEdges = 0; edge < numEdges; edge++) {
2192:     if (edgeMap[edge] == rank) {
2193:       q->numLocEdges++;
2194:     }
2195:   }

2197:   /* Recompute size and offset of each domain */
2198:   MPI_Allgather(&q->numLocEdges, 1, MPI_INT, &q->firstEdge[1], 1, MPI_INT, p->comm);
2199:   for(proc = 1, q->firstEdge[0] = 0; proc <= numProcs; proc++)
2200:     q->firstEdge[proc] = q->firstEdge[proc] + q->firstEdge[proc-1];
2201:   if (q->firstEdge[numProcs] != q->numEdges) {
2202:     SETERRQ2(PETSC_ERR_PLIB, "Invalid global number of edges %d should be %d", q->firstEdge[numProcs], q->numEdges);
2203:   }

2205:   /* Get indices for reordering */
2206:   if (q->numLocEdges > 0) {
2207:     PetscMalloc(q->numLocEdges * sizeof(int), &AppOrdering);
2208:     PetscMalloc(q->numLocEdges * sizeof(int), &PetscOrdering);
2209:   }
2210:   for(edge = 0; edge < q->numLocEdges; edge++) {
2211:     PetscOrdering[edge] = q->firstEdge[rank] + edge;
2212:   }
2213:   for(edge = 0, index = 0; edge < numEdges; edge++) {
2214:     if (edgeMap[edge] == rank) {
2215:       AppOrdering[index++] = edge;
2216:     }
2217:   }
2218:   if (index != q->numLocEdges) {
2219:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of local edges %d should be %d", index, q->numLocEdges);
2220:   }

2222:   /* Create the global edge reordering */
2223:   AOCreateBasic(p->comm, q->numLocEdges, AppOrdering, PetscOrdering, ordering);

2225:   if (q->numLocEdges > 0) {
2226:     PetscFree(AppOrdering);
2227:     PetscFree(PetscOrdering);
2228:   }
2229:   return(0);
2230: }

2232: #undef  __FUNCT__
2234: /*
2235:   PartitionDistributeEdges_Private - This function distributes the edge-based data, and
2236:   permutes arrays which are implicitly indexed by edge number.

2238:   Input Parameter:
2239: . p           - The Partition

2241:   Output Parameters in Mesh_Triangular:
2242: + edges       - The nodes on each edge
2243: - edgemarkers - The edge markers

2245: .seealso: PartitionEdges_Private()
2246: */
2247: int PartitionDistributeEdges_Private(Partition p) {
2248:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2249:   Mesh                     mesh        = p->mesh;
2250:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
2251:   int                      numLocEdges = q->numLocEdges;
2252:   int                     *firstEdge   = q->firstEdge;
2253:   int                      rank        = p->rank;
2254:   int                     *temp        = PETSC_NULL;
2255:   int                      ierr;

2258:   mesh->numEdges = numLocEdges;
2259:   if (numLocEdges > 0) {
2260:     PetscMalloc(numLocEdges*2 * sizeof(int), &temp);
2261:     PetscMemcpy(temp, &tri->edges[firstEdge[rank]*2], numLocEdges*2 * sizeof(int));
2262:     PetscFree(tri->edges);
2263:   }
2264:   tri->edges = temp;

2266:   if (numLocEdges > 0) {
2267:     PetscMalloc(numLocEdges * sizeof(int), &temp);
2268:     PetscMemcpy(temp, &tri->edgemarkers[firstEdge[rank]], numLocEdges * sizeof(int));
2269:     PetscFree(tri->edgemarkers);
2270:   }
2271:   tri->edgemarkers = temp;

2273:   return(0);
2274: }

2276: #undef  __FUNCT__
2278: /*
2279:   PartitionPermuteEdges_Private - This function permutes the data which is implicitly
2280:   indexed by edge number

2282:   Input Parameter:
2283: . p           - The Partition

2285:   Output Parameter in Mesh_Triangular:
2286: + edges       - The nodes on each edge
2287: - edgemarkers - The edge markers

2289: .seealso: PartitionEdges_Private()
2290: */
2291: int PartitionPermuteEdges_Private(Partition p)
2292: {
2293:   Partition_Triangular_2D *q   = (Partition_Triangular_2D *) p->data;
2294:   Mesh_Triangular         *tri = (Mesh_Triangular *) p->mesh->data;
2295:   int                      ierr;

2298:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 2, tri->edges);
2299:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 1, tri->edgemarkers);
2300:   return(0);
2301: }

2303: #undef  __FUNCT__
2305: /*
2306:   PartitionRenumberEdges_Private - This function renumbers the edge-based data globally in
2307:   order to make the canonical numbers sequential in each domain.

2309:   Input Parameter:
2310: . p       - The Partition

2312:   Output Parameter in Mesh_Triangular:
2313: . bdEdges - The edges on each boundary

2315: .seealso: PartitionEdges_Private()
2316: */
2317: int PartitionRenumberEdges_Private(Partition p)
2318: {
2319:   Partition_Triangular_2D *q          = (Partition_Triangular_2D *) p->data;
2320:   Mesh                     mesh       = p->mesh;
2321:   Mesh_Triangular         *tri        = (Mesh_Triangular *) mesh->data;
2322:   int                      numBdEdges = mesh->numBdEdges;
2323:   int                     *bdEdges    = tri->bdEdges;
2324:   int                      ierr;

2327:   AOApplicationToPetsc(q->edgeOrdering, numBdEdges, bdEdges);
2328:   return(0);
2329: }

2331: #undef  __FUNCT__
2333: int PartitionEdgeGlobalToLocal_Private(Partition p)
2334: {
2335:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2336:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
2337:   int                      numLocEdges = q->numLocEdges;
2338:   int                     *edges       = tri->edges;
2339:   int                      node;
2340:   int                      ierr;

2343:   for(node = 0; node < numLocEdges*2; node++) {
2344:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2345:   }
2346:   return(0);
2347: }

2349: #undef  __FUNCT__
2351: int PartitionEdges_Private(Partition p)
2352: {
2353:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2354:   int                    (*f)(Partition, int, int **);
2355:   int                     *edgeMap; /* The map from edges to domains */
2356:   int                      ierr;

2359:   /* Create a new map of nodes to domains */
2360:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap", (void (**)(void)) &f);
2361:   (*f)(p, q->numEdges, &edgeMap);

2363:   /* Create the edge partition */
2364:   PartitionCreateEdgePartition_Private(p, edgeMap, &q->edgeOrdering);
2365:   PetscFree(edgeMap);

2367:   /* Permute arrays implicitly indexed by edge number */
2368:   PartitionPermuteEdges_Private(p);

2370:   /* Globally renumber the edges to make canonical numbers sequential in each domain */
2371:   PartitionRenumberEdges_Private(p);

2373:   /* Check for new ghost nodes created by the element partition */
2374:   PartitionGetNewGhostNodes_Edge(p);

2376:   q->isEdgePartitioned = PETSC_TRUE;
2377:   return(0);
2378: }

2380: #undef  __FUNCT__
2382: int PartitionBoundaryNodes_Private(Partition p)
2383: {
2384:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2385:   Mesh                     mesh          = p->mesh;
2386:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
2387:   int                      numBdNodes    = mesh->numBdNodes;
2388:   int                     *bdNodes       = tri->bdNodes;
2389:   int                     *firstNode     = q->firstNode;
2390:   int                      numProcs      = p->numProcs;
2391:   int                      rank          = p->rank;
2392:   int                      proc, node;
2393:   int                      ierr;

2396:   q->numLocBdNodes = 0;
2397:   for(node = 0; node < numBdNodes; node++) {
2398:     if ((bdNodes[node] >= firstNode[rank]) && (bdNodes[node] < firstNode[rank+1]))
2399:       q->numLocBdNodes++;
2400:   }
2401:   MPI_Allgather(&q->numLocBdNodes, 1, MPI_INT, &q->firstBdNode[1], 1, MPI_INT, p->comm);
2402:   q->firstBdNode[0] = 0;
2403:   for(proc = 1; proc <= numProcs; proc++) {
2404:     q->firstBdNode[proc] = q->firstBdNode[proc] + q->firstBdNode[proc-1];
2405:   }
2406:   if (q->firstBdNode[numProcs] != q->numBdNodes) {
2407:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of boundary nodes %d should be %d", q->firstBdNode[numProcs], q->numBdNodes);
2408:   }
2409:   return(0);
2410: }

2412: #undef  __FUNCT__
2414: /*
2415:   PartitionDistributeBdNodes_Private - This function distributes the edge-based data, and
2416:   permutes arrays which are implicitly indexed by edge number.

2418:   Input Parameter:
2419: . p           - The Partition

2421:   Output Parameters in Mesh_Triangular:
2422: + edges       - The nodes on each edge
2423: - edgemarkers - The edge markers

2425: .seealso: PartitionBdNodes_Private()
2426: */
2427: int PartitionDistributeBdNodes_Private(Partition p)
2428: {
2429:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2430:   Mesh_Triangular         *tri           = (Mesh_Triangular *) p->mesh->data;
2431:   int                      numLocNodes   = q->numLocNodes;
2432:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2433:   int                     *markers       = tri->markers;
2434:   int                      numLocBdNodes = q->numLocBdNodes;
2435:   int                      node, bdNode;
2436:   int                      ierr;

2439:   /* Process ghost boundary nodes */
2440:   q->numOverlapBdNodes = numLocBdNodes;
2441:   for(node = 0; node < numGhostNodes; node++) {
2442:     if (markers[numLocNodes+node] != 0)
2443:       q->numOverlapBdNodes++;
2444:   }
2445:   if (q->numOverlapBdNodes > numLocBdNodes) {
2446:     PetscMalloc((q->numOverlapBdNodes - numLocBdNodes) * sizeof(int), &q->ghostBdNodes);
2447:     for(node = 0, bdNode = 0; node < numGhostNodes; node++) {
2448:       if (markers[numLocNodes+node] != 0)
2449:         q->ghostBdNodes[bdNode++] = node;
2450:     }
2451:   }
2452:   return(0);
2453: }

2455: #undef  __FUNCT__
2457: int PartitionDistribute_Private(Partition p)
2458: {

2462:   /* Redistribute the elements and arrays implicitly numbered by element numbers */
2463:   PartitionDistributeElements_Private(p);

2465:   /* Redistribute the nodes and permute arrays implicitly numbered by node numbers */
2466:   PartitionDistributeNodes_Private(p);

2468:   /* Redistribute the edges and permute arrays implicitly numbered by edge numbers */
2469:   PartitionDistributeEdges_Private(p);

2471:   /* Store ghost boundary nodes */
2472:   PartitionDistributeBdNodes_Private(p);
2473:   return(0);
2474: }

2476: #undef  __FUNCT__
2478: int PartitionGlobalToLocal_Private(Partition p)
2479: {
2480:   Partition_Triangular_2D *q                  = (Partition_Triangular_2D *) p->data;
2481:   Mesh                     mesh               = p->mesh;
2482:   Mesh_Triangular         *tri                = (Mesh_Triangular *) mesh->data;
2483:   int                      numOverlapElements = p->numOverlapElements;
2484:   int                      numCorners         = mesh->numCorners;
2485:   int                     *faces              = tri->faces;
2486:   int                     *neighbors          = tri->neighbors;
2487:   int                      numLocEdges        = q->numLocEdges;
2488:   int                     *edges              = tri->edges;
2489:   int                      corner, neighbor, node;
2490:   int                      ierr;

2493:   for(corner = 0; corner < numOverlapElements*numCorners; corner++) {
2494:     PartitionGlobalToLocalNodeIndex(p, faces[corner], &faces[corner]);
2495:   }
2496:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
2497:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
2498:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
2499:   }
2500:   for(node = 0; node < numLocEdges*2; node++) {
2501:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2502:   }
2503:   return(0);
2504: }

2506: #undef  __FUNCT__
2508: /*@
2509:   PartitionCreateTriangular2D - Creates a partition of a two dimensional unstructured grid.

2511:   Collective on Mesh

2513:   Input Parameters:
2514: . mesh      - The mesh to be partitioned

2516:   Output Paramter:
2517: . partition - The partition

2519:   Level: beginner

2521: .keywords unstructured mesh, partition
2522: .seealso MeshCreateTriangular2D
2523: @*/
2524: int PartitionCreateTriangular2D(Mesh mesh, Partition *part)
2525: {
2526:   int        numProcs;
2527:   PetscTruth opt;
2528:   int        ierr;

2531:   MPI_Comm_size(mesh->comm, &numProcs);
2532:   PartitionCreate(mesh, part);
2533:   if (numProcs == 1) {
2534:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_Uni",
2535:                                       (void (*)(void)) PartitionCreate_Uni);
2536: 
2537:   } else {
2538:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_ElementBased",
2539:                                       (void (*)(void)) PartitionCreate_ElementBased);
2540: 
2541:     PetscOptionsHasName(mesh->prefix, "-part_node_based", &opt);
2542:     if (opt == PETSC_TRUE) {
2543:       PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_NodeBased",
2544:                                         (void (*)(void)) PartitionCreate_NodeBased);
2545: 
2546:     }
2547:   }
2548:   PartitionSetType(*part, PARTITION_TRIANGULAR_2D);

2550:   PartitionViewFromOptions_Private(*part, "Partition");
2551:   return(0);
2552: }

2554: EXTERN_C_BEGIN
2555: #undef  __FUNCT__
2557: int PartitionSerialize_Triangular_2D(Mesh mesh, Partition *part, PetscViewer viewer, PetscTruth store)
2558: {
2559:   Partition                p;
2560:   Partition_Triangular_2D *q;
2561:   int                      fd;
2562:   int                      numGhostElements, numGhostNodes, numGhostBdNodes, hasOrdering;
2563:   int                      numProcs, rank;
2564:   int                      one  = 1;
2565:   int                      zero = 0;
2566:   int                      ierr;

2569:   PetscViewerBinaryGetDescriptor(viewer, &fd);
2570:   if (store == PETSC_TRUE) {
2571:     p = *part;
2572:     numProcs = p->numProcs;
2573:     numGhostElements = p->numOverlapElements - p->numLocElements;
2574:     PetscBinaryWrite(fd, &p->numProcs,           1,                PETSC_INT, 0);
2575:     PetscBinaryWrite(fd, &p->rank,               1,                PETSC_INT, 0);
2576:     PetscBinaryWrite(fd, &p->numLocElements,     1,                PETSC_INT, 0);
2577:     PetscBinaryWrite(fd, &p->numElements,        1,                PETSC_INT, 0);
2578:     PetscBinaryWrite(fd, &p->numOverlapElements, 1,                PETSC_INT, 0);
2579:     PetscBinaryWrite(fd,  p->firstElement,       numProcs+1,       PETSC_INT, 0);
2580:     PetscBinaryWrite(fd,  p->ghostElements,      numGhostElements, PETSC_INT, 0);
2581:     PetscBinaryWrite(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT, 0);
2582:     if (p->ordering != PETSC_NULL) {
2583:       PetscBinaryWrite(fd, &one,                 1,                PETSC_INT, 0);
2584:       AOSerialize(p->comm, &p->ordering, viewer, store);
2585:     } else {
2586:       PetscBinaryWrite(fd, &zero,                1,                PETSC_INT, 0);
2587:     }

2589:     q    = (Partition_Triangular_2D *) (*part)->data;
2590:     numGhostNodes   = q->numOverlapNodes   - q->numLocNodes;
2591:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2592:     PetscBinaryWrite(fd, &q->numLocNodes,        1,                PETSC_INT, 0);
2593:     PetscBinaryWrite(fd, &q->numNodes,           1,                PETSC_INT, 0);
2594:     PetscBinaryWrite(fd, &q->numOverlapNodes,    1,                PETSC_INT, 0);
2595:     PetscBinaryWrite(fd,  q->firstNode,          numProcs+1,       PETSC_INT, 0);
2596:     PetscBinaryWrite(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT, 0);
2597:     PetscBinaryWrite(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT, 0);
2598:     PetscBinaryWrite(fd, &q->numLocEdges,        1,                PETSC_INT, 0);
2599:     PetscBinaryWrite(fd, &q->numEdges,           1,                PETSC_INT, 0);
2600:     PetscBinaryWrite(fd,  q->firstEdge,          numProcs+1,       PETSC_INT, 0);
2601:     PetscBinaryWrite(fd, &q->numLocBdNodes,      1,                PETSC_INT, 0);
2602:     PetscBinaryWrite(fd, &q->numBdNodes,         1,                PETSC_INT, 0);
2603:     PetscBinaryWrite(fd, &q->numOverlapBdNodes,  1,                PETSC_INT, 0);
2604:     PetscBinaryWrite(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT, 0);
2605:     PetscBinaryWrite(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT, 0);
2606:   } else {
2607:     /* Create the partition context */
2608:     PartitionCreate(mesh, &p);
2609:     PetscNew(Partition_Triangular_2D, &q);
2610:     PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2611:     PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2612:     PetscStrallocpy(PARTITION_TRIANGULAR_2D, &p->type_name);
2613:     p->data = (void *) q;

2615:     MPI_Comm_size(p->comm, &numProcs);
2616:     MPI_Comm_rank(p->comm, &rank);
2617:     PetscBinaryRead(fd, &p->numProcs,           1,                PETSC_INT);
2618:     PetscBinaryRead(fd, &p->rank,               1,                PETSC_INT);
2619:     if (p->numProcs != numProcs) {
2620:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid number of processors %d should be %d", numProcs, p->numProcs);
2621:     }
2622:     if (p->rank != rank) {
2623:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid processor rank %d should be %d", rank, p->rank);
2624:     }
2625:     PetscBinaryRead(fd, &p->numLocElements,     1,                PETSC_INT);
2626:     PetscBinaryRead(fd, &p->numElements,        1,                PETSC_INT);
2627:     PetscBinaryRead(fd, &p->numOverlapElements, 1,                PETSC_INT);
2628:     PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2629:     PetscBinaryRead(fd,  p->firstElement,       numProcs+1,       PETSC_INT);
2630:     numGhostElements = p->numOverlapElements - p->numLocElements;
2631:     if (numGhostElements > 0) {
2632:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElements);
2633:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElementProcs);
2634:     }
2635:     PetscBinaryRead(fd,  p->ghostElements,      numGhostElements, PETSC_INT);
2636:     PetscBinaryRead(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT);
2637:     PetscBinaryRead(fd, &hasOrdering,           1,                PETSC_INT);
2638:     if (hasOrdering) {
2639:       AOSerialize(p->comm, &p->ordering, viewer, store);
2640:     }

2642:     q->ghostNodes        = PETSC_NULL;
2643:     q->ghostNodeProcs    = PETSC_NULL;
2644:     q->ghostBdNodes      = PETSC_NULL;
2645:     q->nodeOrdering      = PETSC_NULL;
2646:     q->edgeOrdering      = PETSC_NULL;

2648:     PetscBinaryRead(fd, &q->numLocNodes,        1,                PETSC_INT);
2649:     PetscBinaryRead(fd, &q->numNodes,           1,                PETSC_INT);
2650:     PetscBinaryRead(fd, &q->numOverlapNodes,    1,                PETSC_INT);
2651:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2652:     PetscBinaryRead(fd,  q->firstNode,          numProcs+1,       PETSC_INT);
2653:     numGhostNodes   = q->numOverlapNodes - q->numLocNodes;
2654:     if (numGhostNodes > 0) {
2655:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
2656:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2657:     }
2658:     PetscBinaryRead(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT);
2659:     PetscBinaryRead(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT);
2660:     PetscBinaryRead(fd, &q->numLocEdges,        1,                PETSC_INT);
2661:     PetscBinaryRead(fd, &q->numEdges,           1,                PETSC_INT);
2662:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2663:     PetscBinaryRead(fd,  q->firstEdge,          numProcs+1,       PETSC_INT);
2664:     PetscBinaryRead(fd, &q->numLocBdNodes,      1,                PETSC_INT);
2665:     PetscBinaryRead(fd, &q->numBdNodes,         1,                PETSC_INT);
2666:     PetscBinaryRead(fd, &q->numOverlapBdNodes,  1,                PETSC_INT);
2667:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2668:     PetscBinaryRead(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT);
2669:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2670:     if (numGhostBdNodes) {
2671:       PetscMalloc(numGhostBdNodes * sizeof(int), &q->ghostBdNodes);
2672:     }
2673:     PetscBinaryRead(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT);
2674:     PetscLogObjectMemory(p, ((numProcs+1)*4 + numGhostElements*2 + numGhostNodes*2 + numGhostBdNodes)* sizeof(int));
2675:     *part = p;
2676:   }
2677:   if (p->numProcs > 1) {
2678:     AOSerialize(p->comm, &q->nodeOrdering, viewer, store);
2679:     AOSerialize(p->comm, &q->edgeOrdering, viewer, store);
2680:   }

2682:   return(0);
2683: }
2684: EXTERN_C_END

2686: EXTERN_C_BEGIN
2687: #undef  __FUNCT__
2689: int PartitionCreate_ElementBased(Partition p)
2690: {
2691: #ifdef PETSC_USE_BOPT_g
2692:   int cut; /* The number of edges of the dual crossing the partition */
2693: #endif


2698:   /* Partition elements */
2699:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2700:                                     "PartitionCreateElementMap_METIS", (void (*)(void)) PartitionCreateElementMap_METIS);
2701: 
2702:   PartitionElements_Private(p);

2704:   /* Partition the nodes */
2705:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2706:                                     "PartitionCreateNodeMap_ElementBased", (void (*)(void)) PartitionCreateNodeMap_ElementBased);
2707: 
2708:   PartitionNodes_Private(p);

2710:   /* Partition the edges -- Changes the ghost nodes */
2711:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2712:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2713: 
2714:   PartitionEdges_Private(p);

2716:   /* Partition boundary nodes */
2717:   PartitionBoundaryNodes_Private(p);

2719:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2720:   PartitionDistribute_Private(p);

2722:   /* Change to local node numbers */
2723:   PartitionGlobalToLocal_Private(p);

2725: #ifdef PETSC_USE_BOPT_g
2726:   /* Compute the size of the cut */
2727:   PartitionCalcCut_Private(p, &cut);
2728:   PetscLogInfo(p, "Size of cut: %dn", cut);
2729: #endif

2731:   return(0);
2732: }
2733: EXTERN_C_END

2735: EXTERN_C_BEGIN
2736: #undef  __FUNCT__
2738: int PartitionCreate_Uni(Partition p)
2739: {
2740:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
2741:   Mesh                     mesh = p->mesh;
2742:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
2743:   PetscTruth               opt;
2744:   int                      ierr;

2747:   PetscOptionsHasName(p->prefix, "-part_mesh_reorder", &opt);
2748:   if (opt == PETSC_TRUE) {
2749:     MeshGetOrdering(mesh, MESH_ORDER_TRIANGULAR_2D_RCM, &q->nodeOrdering);
2750:     PetscLogObjectParent(p, q->nodeOrdering);

2752:     /* Permute arrays implicitly numbered by node numbers */
2753:     AOApplicationToPetscPermuteReal(q->nodeOrdering, 2, tri->nodes);
2754:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->markers);
2755:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->degrees);
2756:     /* Renumber arrays dependent on the canonical node numbering */
2757:     AOApplicationToPetsc(q->nodeOrdering, mesh->numEdges*2,                       tri->edges);
2758:     AOApplicationToPetsc(q->nodeOrdering, p->numOverlapElements*mesh->numCorners, tri->faces);
2759:     AOApplicationToPetsc(q->nodeOrdering, mesh->numBdNodes,                       tri->bdNodes);
2760:   }
2761:   return(0);
2762: }
2763: EXTERN_C_END

2765: EXTERN_C_BEGIN
2766: #undef  __FUNCT__
2768: int PartitionCreate_NodeBased(Partition p) {
2769: #ifdef PETSC_USE_BOPT_g
2770:   int cut; /* The number of edges of the dual crossing the partition */
2771: #endif

2775:   /* Partition Nodes */
2776:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2777:                                     "PartitionCreateNodeMap_Simple_Seq", (void (*)(void)) PartitionCreateNodeMap_Simple_Seq);
2778: 
2779:   PartitionNodes_Private(p);

2781:   /* Partition elements */
2782:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2783:                                     "PartitionCreateElementMap_NodeBased", (void (*)(void)) PartitionCreateElementMap_NodeBased);
2784: 
2785:   PartitionElements_Private(p);

2787:   /* Partition edges */
2788:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2789:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2790: 
2791:   PartitionEdges_Private(p);

2793:   /* Partition boundary nodes */
2794:   PartitionBoundaryNodes_Private(p);

2796:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2797:   PartitionDistribute_Private(p);

2799:   /* Change to local node numbers */
2800:   PartitionGlobalToLocal_Private(p);

2802: #ifdef PETSC_USE_BOPT_g
2803:   /* Compute the size of the cut */
2804:   PartitionCalcCut_Private(p, &cut);
2805:   PetscLogInfo(p, "Size of cut: %dn", cut);
2806: #endif

2808:   return(0);
2809: }
2810: EXTERN_C_END


2813: EXTERN_C_BEGIN
2814: #undef  __FUNCT__
2816: int PartitionCreate_Triangular_2D(Partition p) {
2817:   Partition_Triangular_2D *q;
2818:   Mesh                     mesh = p->mesh;
2819:   int                    (*f)(Partition);
2820:   int                      numProcs, rank, rem;
2821:   int                      proc;
2822:   int                      ierr;

2825:   MPI_Comm_size(p->comm, &numProcs);
2826:   MPI_Comm_rank(p->comm, &rank);

2828:   PetscNew(Partition_Triangular_2D, &q);
2829:   PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2830:   PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2831:   p->data = (void *) q;
2832:   PetscStrallocpy(PARTITION_SER_TRIANGULAR_2D_BINARY, &p->serialize_name);
2833:   PetscLogObjectParent(mesh, p);

2835:   /* Initialize structure */
2836:   p->numProcs             = numProcs;
2837:   p->rank                 = rank;
2838:   p->isElementPartitioned = PETSC_FALSE;
2839:   p->ordering             = PETSC_NULL;
2840:   p->ghostElements        = PETSC_NULL;
2841:   p->ghostElementProcs    = PETSC_NULL;
2842:   q->isNodePartitioned    = PETSC_FALSE;
2843:   q->isEdgePartitioned    = PETSC_FALSE;
2844:   q->nodeOrdering         = PETSC_NULL;
2845:   q->ghostNodes           = PETSC_NULL;
2846:   q->ghostNodeProcs       = PETSC_NULL;
2847:   q->edgeOrdering         = PETSC_NULL;
2848:   q->ghostBdNodes         = PETSC_NULL;
2849:   PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2850:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2851:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2852:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2853:   PetscLogObjectMemory(p, (numProcs+1)*4 * sizeof(int));
2854:   PetscMemzero(p->firstElement, (numProcs+1) * sizeof(int));
2855:   PetscMemzero(q->firstNode,    (numProcs+1) * sizeof(int));
2856:   PetscMemzero(q->firstBdNode,  (numProcs+1) * sizeof(int));
2857:   PetscMemzero(q->firstEdge,    (numProcs+1) * sizeof(int));

2859:   /* Setup crude preliminary partition */
2860:   for(proc = 0; proc < numProcs; proc++) {
2861:     rem                   = (mesh->numFaces%numProcs);
2862:     p->firstElement[proc] = (mesh->numFaces/numProcs)*proc + PetscMin(rem, proc);
2863:     rem                   = (mesh->numNodes%numProcs);
2864:     q->firstNode[proc]    = (mesh->numNodes/numProcs)*proc + PetscMin(rem, proc);
2865:     rem                   = (mesh->numEdges%numProcs);
2866:     q->firstEdge[proc]    = (mesh->numEdges/numProcs)*proc + PetscMin(rem, proc);
2867:     rem                   = (mesh->numBdNodes%numProcs);
2868:     q->firstBdNode[proc]  = (mesh->numBdNodes/numProcs)*proc + PetscMin(rem, proc);
2869:   }
2870:   p->firstElement[numProcs] = mesh->numFaces;
2871:   q->firstNode[numProcs]    = mesh->numNodes;
2872:   q->firstEdge[numProcs]    = mesh->numEdges;
2873:   q->firstBdNode[numProcs]  = mesh->numBdNodes;

2875:   p->numLocElements            = p->firstElement[rank+1] - p->firstElement[rank];
2876:   p->numElements               = p->firstElement[numProcs];
2877:   p->numOverlapElements        = p->numLocElements;
2878:   q->numLocNodes               = q->firstNode[rank+1] - q->firstNode[rank];
2879:   q->numNodes                  = q->firstNode[numProcs];
2880:   q->numOverlapNodes           = q->numLocNodes;
2881:   q->numLocEdges               = q->firstEdge[rank+1] - q->firstEdge[rank];
2882:   q->numEdges                  = q->firstEdge[numProcs];
2883:   q->numLocBdNodes             = q->firstBdNode[rank+1] - q->firstBdNode[rank];
2884:   q->numBdNodes                = q->firstBdNode[numProcs];
2885:   q->numOverlapBdNodes         = q->numLocBdNodes;

2887:   /* Partition the mesh */
2888:   PetscObjectQueryFunction((PetscObject) p,"PartitionTriangular2D_Create_C",(PetscVoidFunction) &f);
2889:   (*f)(p);

2891:   /* Recalculate derived quantites */
2892:   MeshTriangular2DCalcAreas(mesh, PETSC_FALSE);
2893:   MeshTriangular2DCalcAspectRatios(mesh, PETSC_FALSE);

2895:   return(0);
2896: }
2897: EXTERN_C_END