Actual source code: grid2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: grid2d.c,v 1.31 2000/07/16 23:20:01 knepley Exp $";
  3: #endif

  5: /* Implements 2d triangular grids */
  6: #include "petscts.h"
  7: #include "gsolver.h"
  8: #include "src/grid/gridimpl.h"         /*I "grid.h" I*/
  9: #include "src/mesh/impls/triangular/triimpl.h"
 10: #include "src/gvec/impls/triangular/2d/gvec2d.h"
 11: #include "src/gvec/impls/triangular/2d/gvec2dView.h"
 12: #include "src/gvec/impls/triangular/2d/gmat2d.h"
 13: #include "grid2d.h"
 14: #include "elemvec2d.h"
 15: #include "varorder2d.h"

 17: extern int GridResetConstrainedMultiply_Private(Grid, GMat);

 19: #undef  __FUNCT__
 21: int GridDestroy_Triangular_2D(Grid grid) {
 22:   int  field, bd;
 23:   int  ierr;

 26:   /* Field variables */
 27:   for(field = 0; field < grid->numFields; field++) {
 28:     if (grid->fields[field].name != PETSC_NULL) {
 29:       PetscFree(grid->fields[field].name);
 30:     }
 31:     PetscFree(grid->fields[field].discType);
 32:     DiscretizationDestroy(grid->fields[field].disc);
 33:   }
 34:   PetscFree(grid->fields);
 35:   /* Class variables */
 36:   if (grid->cm) {
 37:     FieldClassMapDestroy(grid->cm);
 38:   }
 39:   /* Default variable orderings */
 40:   if (grid->order) {
 41:     VarOrderingDestroy(grid->order);
 42:   }
 43:   if (grid->locOrder) {
 44:     LocalVarOrderingDestroy(grid->locOrder);
 45:   }
 46:   /* Ghost variable scatter */
 47:   if (grid->ghostVec) {
 48:     VecDestroy(grid->ghostVec);
 49:   }
 50:   if (grid->ghostScatter) {
 51:     VecScatterDestroy(grid->ghostScatter);
 52:   }
 53:   /* Constraint variables */
 54:   if (grid->constraintCM) {
 55:     FieldClassMapDestroy(grid->constraintCM);
 56:   }
 57:   if (grid->constraintOrder) {
 58:     VarOrderingDestroy(grid->constraintOrder);
 59:   }
 60:   if (grid->constraintOrdering) {
 61:     ISDestroy(grid->constraintOrdering);
 62:   }
 63:   if (grid->constraintMatrix) {
 64:     MatDestroy(grid->constraintMatrix);
 65:   }
 66:   if (grid->constraintInverse) {
 67:     MatDestroy(grid->constraintInverse);
 68:   }
 69:   /* Problem variables */
 70:   PetscFree(grid->rhsFuncs);
 71:   PetscFree(grid->rhsOps);
 72:   PetscFree(grid->matOps);
 73:   /* Assembly variables */
 74:   PetscFree(grid->defaultFields);
 75:   if (grid->vec) {
 76:     ElementVecDestroy(grid->vec);
 77:   }
 78:   if (grid->mat) {
 79:     ElementMatDestroy(grid->mat);
 80:   }
 81:   if (grid->ghostElementVec) {
 82:     ElementVecDestroy(grid->ghostElementVec);
 83:   }
 84:   /* Boundary condition variables */
 85:   if (grid->reductionCM) {
 86:     FieldClassMapDestroy(grid->reductionCM);
 87:   }
 88:   if (grid->reduceOrder) {
 89:     VarOrderingDestroy(grid->reduceOrder);
 90:   }
 91:   if (grid->locReduceOrder) {
 92:     LocalVarOrderingDestroy(grid->locReduceOrder);
 93:   }
 94:   PetscFree(grid->bc);
 95:   PetscFree(grid->pointBC);
 96:   /* Boundary iteration variables */
 97:   for(bd = 0; bd < grid->numBd; bd++) {
 98:     if (grid->bdSize[bd] != PETSC_NULL) {
 99:       PetscFree(grid->bdSize[bd]);
100:     }
101:   }
102:   PetscFree(grid->bdSize);
103:   if (grid->bdOrder) {
104:     VarOrderingDestroy(grid->bdOrder);
105:   }
106:   if (grid->bdLocOrder) {
107:     LocalVarOrderingDestroy(grid->bdLocOrder);
108:   }
109:   /* Subobjects */
110:   MeshDestroy(grid->mesh);
111:   return(0);
112: }

114: #undef  __FUNCT__
116: static int GridView_Triangular_2D_File(Grid grid, PetscViewer viewer) {
117:   VarOrdering order = grid->order;
118:   FILE       *fd;
119:   int         rank, field;
120:   int         ierr;

123:   MPI_Comm_rank(grid->comm, &rank);
124:   PetscViewerASCIIGetPointer(viewer, &fd);
125:   PetscFPrintf(grid->comm, fd, "Grid Object:n");
126:   if (grid->numFields == 1) {
127:     PetscFPrintf(grid->comm, fd, "  %d field:n", grid->numFields);
128:   } else {
129:     PetscFPrintf(grid->comm, fd, "  %d fields:n", grid->numFields);
130:   }
131:   for(field = 0; field < grid->numFields; field++) {
132:     /* Grid structure */
133:     if (grid->fields[field].name != PETSC_NULL) {
134:       PetscFPrintf(grid->comm, fd, "  %s field", grid->fields[field].name);
135:     } else {
136:       PetscFPrintf(grid->comm, fd, "  field %d", field);
137:     }
138:     if (grid->fields[field].numComp == 1) {
139:       PetscFPrintf(grid->comm, fd, " with %d component is ", grid->fields[field].numComp);
140:     } else {
141:       PetscFPrintf(grid->comm, fd, " with %d components is ", grid->fields[field].numComp);
142:     }
143:     if (grid->fields[field].isActive) {
144:       PetscFPrintf(grid->comm, fd, "activen    ");
145:     } else {
146:       PetscFPrintf(grid->comm, fd, "inactiven    ");
147:     }
148:     DiscretizationView(grid->fields[field].disc, viewer);
149:   }

151:   /* Problem specific information */
152:   if (grid->numActiveFields > 0) {
153:     PetscFPrintf(grid->comm, fd, "  %d variables in the problem:n", order->numVars);
154:     PetscSynchronizedFPrintf(grid->comm, fd, "    %d variables and %d ghost variables in domain %d:n",
155:                              order->numLocVars, order->numOverlapVars - order->numLocVars, rank);
156:     PetscSynchronizedFlush(grid->comm);
157:   }

159:   /* Underlying mesh */
160:   MeshView(grid->mesh, viewer);
161:   return(0);
162: }

164: #undef  __FUNCT__
166: int GridView_Triangular_2D(Grid grid, PetscViewer viewer) {
167:   PetscTruth isascii;
168:   int        ierr;

171:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
172:   if (isascii == PETSC_TRUE) {
173:     GridView_Triangular_2D_File(grid, viewer);
174:   }
175:   return(0);
176: }

178: #undef  __FUNCT__
180: int GridSetupGhostScatter_Triangular_2D(Grid grid, VarOrdering order, Vec *ghostVec, VecScatter *ghostScatter) {
181:   FieldClassMap         map;
182:   PetscConstraintObject constCtx          = grid->constraintCtx;
183:   int                   numOverlapVars    = order->numOverlapVars;
184:   int                   numLocVars        = order->numLocVars;
185:   int                   numVars           = order->numVars;
186:   int                   numLocNewVars     = order->numLocNewVars;
187:   int                   numOverlapNewVars = order->numOverlapNewVars;
188:   int                   numGhostNewVars   = order->numOverlapNewVars - order->numLocNewVars;
189:   int                  *firstVar          = order->firstVar;
190:   int                  *offsets           = order->offsets;
191:   int                   numNodes, numGhostNodes;
192:   int                  *classes, *classSizes;
193:   IS                    localIS;       /* Local  indices for local ghost vector variables */
194:   int                  *indices;       /* Global indices for local ghost vector variables */
195:   IS                    globalIS;      /* Global indices for local ghost vector variables */
196:   Vec                   dummyVec;      /* Dummy global vector used to create the ghost variable scatter */
197:   int                   rank, newComp;
198:   int                   node, nclass, var, startVar, newField, i, c;
199:   int                   ierr;

204:   VarOrderingGetClassMap(order, &map);
205:   numNodes      = map->numNodes;
206:   numGhostNodes = map->numGhostNodes;
207:   classes       = map->classes;
208:   classSizes    = map->classSizes;

210:   /* Create the ghost variable scatter -- Notice that for no ghost variables localOffsets is not used */
211:   MPI_Comm_rank(grid->comm, &rank);
212:   ISCreateStride(grid->comm, numOverlapVars, 0, 1, &localIS);
213:   PetscMalloc(numOverlapVars * sizeof(int), &indices);
214:   for(var = 0; var < numLocVars; var++) {
215:     indices[var] = var + firstVar[rank];
216:   }
217:   for(node = 0, var = numLocVars; node < numGhostNodes; node++) {
218:     nclass = classes[numNodes+node];
219:     for(i = 0; i < classSizes[nclass]; i++) {
220:       indices[var++] = offsets[numNodes+node] + i;
221:     }
222:   }
223:   if (numGhostNewVars > 0) {
224:     /* Add in constraints that generate off-processor variables */
225:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
226: 
227:     for(newField = numLocNewVars/newComp; newField < numOverlapNewVars/newComp; newField++) {
228:       (*constCtx->ops->getindices)(constCtx, grid->mesh, order, newField, CONSTRAINT_NEW_INDEX, &startVar);
229: 
230:       for(c = 0; c < newComp; c++, var++) {
231:         indices[var] = startVar+c;
232:       }
233:     }
234:   }
235:   if (var != numOverlapVars) SETERRQ(PETSC_ERR_PLIB, "Invalid ghost vector numbering");
236:   ISCreateGeneral(grid->comm, numOverlapVars, indices, &globalIS);
237:   VecCreateMPI(grid->comm, numLocVars, numVars, &dummyVec);
238:   VecCreateSeq(PETSC_COMM_SELF, numOverlapVars, ghostVec);
239:   VecScatterCreate(dummyVec, globalIS, *ghostVec, localIS, ghostScatter);
240:   PetscLogObjectParent(grid, *ghostVec);
241:   PetscLogObjectParent(grid, *ghostScatter);

243:   /* Cleanup */
244:   VecDestroy(dummyVec);
245:   ISDestroy(localIS);
246:   ISDestroy(globalIS);
247:   PetscFree(indices);
248:   return(0);
249: }

251: #undef  __FUNCT__
253: int GridSetupBoundarySizes_Triangular_2D(Grid grid) {
254:   Mesh_Triangular         *tri  = (Mesh_Triangular *) grid->mesh->data;
255:   Partition                part = grid->mesh->part;
256:   int                      numFields    = grid->cm->numFields;
257:   int                     *fields       = grid->cm->fields;
258:   int                      numClasses   = grid->cm->numClasses;
259:   int                     *classes      = grid->cm->classes;
260:   int                    **fieldClasses = grid->cm->fieldClasses;
261:   int                     *bdCount; /* Number of boundary nodes of a given class */
262:   int                      firstNode;
263:   int                      bd, bdNode, f, field, node, nclass;
264:   int                      ierr;

267:   PetscMalloc(numClasses * sizeof(int), &bdCount);
268:   PartitionGetStartNode(part, &firstNode);
269:   for(bd = 0; bd < grid->numBd; bd++) {
270:     /* Count the number of boundary nodes of each class */
271:     PetscMemzero(bdCount, numClasses * sizeof(int));
272:     for(bdNode = tri->bdBegin[bd]; bdNode < tri->bdBegin[bd+1]; bdNode++) {
273:       node = tri->bdNodes[bdNode] - firstNode;
274:       if ((node >= 0) && (node < grid->mesh->numNodes)) {
275:         bdCount[classes[node]]++;
276:       }
277:     }
278:     /* Calculate boundary sizes */
279:     PetscMemzero(grid->bdSize[bd], grid->numFields * sizeof(int));
280:     for(f = 0; f < numFields; f++) {
281:       field = fields[f];
282:       for(nclass = 0; nclass < numClasses; nclass++) {
283:         if (fieldClasses[f][nclass]) {
284:           grid->bdSize[bd][field] += bdCount[nclass];
285:         }
286:       }
287:     }
288:   }

290:   /* Cleanup */
291:   PetscFree(bdCount);
292:   return(0);
293: }

295: #if 0
296: #undef  __FUNCT__
298: /*@C
299:   GridExtantExchange
300:   This functions transfers data between local storage in different domains without a predefined mapping.

302:   Input Parameters:
303: . numExtants   - The number of extants (interior variables) in this domain
304: . extantProcs  - The processor to which to send each extant
305: . firstExtant  - The first extant variable in each domain

307: . ghostIndices - The global index for each ghost
308: . dataType     - The type of the variables
309: . firstVar     - The first variable on each processor
310: . addv         - The insert mode, INSERT_VALUES or ADD_VALUES
311: . mode         - The direction of the transfer, SCATTER_FORWARD or SCATTER_REVERSE
312: . locVars      - The local variable array

314:   Output Paramters:
315: . firstExtant  - The first extant variable in each domain after repartitioning

317: . ghostVars    - The ghost variables

319:   Note:
320:   The data in ghostVars is assumed contiguous and implicitly indexed by the order of
321:   ghostProcs and ghostIndices. The SCATTER_FORWARD mode will take the requested data
322:   from locVars and copy it to ghostVars in the order specified by ghostIndices. The
323:   SCATTER_REVERSE mode will take data from ghostVars and copy it to locVars.

325:   Level: developer

327: .keywords ghost, exchange, grid
328: .seealso GridGlobalToLocal, GridLocalToGlobal
329: @*/
330: int GridExtantExchange(MPI_Comm comm, int numExtants, int *extantProcs, int *firstExtant, PetscDataType dataType, AO *ordering)

332:                        int *firstVar, InsertMode addv, ScatterMode mode, void *locVars, void *ghostVars
333: {
334:   int         *numSendExtants; /* The number of extants from each domain */
335:   int         *numRecvExtants; /* The number of extants in each domain */
336:   int         *sumSendExtants; /* The prefix sums of numSendExtants */
337:   int         *sumRecvExtants; /* The prefix sums of numRecvExtantss */
338:   int         *offsets;        /* The offset into the send array for each domain */
339:   int          totSendExtants; /* The number of ghosts to request variables for */
340:   int          totRecvExtants; /* The number of nodes to provide class info about */
341:   int         *sendIndices;    /* The canonical indices of extants in this domain */
342:   int         *recvIndices;    /* The canonical indices of extants to return variables for */
343:   int         *extantIndices;  /* The new canonical indices of extants after reordering */
344:   char        *tempVars;       /* The variables of the requested or submitted extants */
345:   int          numLocVars;
346:   char        *locBytes   = (char *) locVars;
347:   MPI_Datatype MPIType;
348:   int          typeSize;
349:   int          numProcs, rank;
350:   int          proc, extant, locIndex, byte;
351:   int          ierr;

354:   /* Initialize communication */
355:   MPI_Comm_size(comm, &numProcs);
356:   MPI_Comm_rank(comm, &rank);
357:   PetscMalloc(numProcs * sizeof(int), &numSendExtants);
358:   PetscMalloc(numProcs * sizeof(int), &numRecvExtants);
359:   PetscMalloc(numProcs * sizeof(int), &sumSendExtants);
360:   PetscMalloc(numProcs * sizeof(int), &sumRecvExtants);
361:   PetscMalloc(numProcs * sizeof(int), &offsets);
362:   PetscMemzero(numSendExtants,  numProcs * sizeof(int));
363:   PetscMemzero(numRecvExtants,  numProcs * sizeof(int));
364:   PetscMemzero(sumSendExtants,  numProcs * sizeof(int));
365:   PetscMemzero(sumRecvExtants,  numProcs * sizeof(int));
366:   PetscMemzero(offsets,         numProcs * sizeof(int));
367:   numLocVars = firstVar[rank+1] - firstVar[rank];

369:   /* Get number of extants to send to each processor */
370:   for(extant = 0; extant < numExtants; extant++) {
371:     numSendExtants[extantProcs[extant]]++;
372:   }

374:   /* Get number of extants to receive from each processor */
375:   MPI_Alltoall(numSendExtants, 1, MPI_INT, numRecvExtants, 1, MPI_INT, comm);
376:   for(proc = 1; proc < numProcs; proc++) {
377:     sumSendExtants[proc] = sumSendExtants[proc-1] + numSendExtants[proc-1];
378:     sumRecvExtants[proc] = sumRecvExtants[proc-1] + numRecvExtants[proc-1];
379:     offsets[proc]       = sumSendExtants[proc];
380:   }
381:   totSendExtants = sumSendExtants[numProcs-1] + numSendExtants[numProcs-1];
382:   totRecvExtants = sumRecvExtants[numProcs-1] + numRecvExtants[numProcs-1];
383:   if (numExtants != totSendExtants) SETERRQ(PETSC_ERR_PLIB, "Invalid number of extants in send");

385:   PetscDataTypeGetSize(dataType, &typeSize);
386:   if (totSendExtants) {
387:     PetscMalloc(totSendExtants * sizeof(int), &sendIndices);
388:   }
389:   if (totRecvExtants) {
390:     PetscMalloc(totRecvExtants * sizeof(int), &recvIndices);
391:     PetscMalloc(totRecvExtants * sizeof(int), &extantIndices);
392:     PetscMalloc(totRecvExtants * typeSize,    &tempVars);
393:   }

395:   /* Must order extants by processor */
396:   for(extant = 0; extant < numExtants; extant++)
397:     sendIndices[offsets[extantProcs[extant]]++] = extant + firstExtant[rank];

399:   /* Get canonical indices of extants to provide variables for */
400:   MPI_Alltoallv(sendIndices, numSendExtants, sumSendExtants, MPI_INT,
401:                        recvIndices, numRecvExtants, sumRecvExtants, MPI_INT, comm);
402: 

404:   /* Recompute size and offset of each domain */
405:   MPI_Allgather(&totRecvExtants, 1, MPI_INT, &firstExtant[1], 1, MPI_INT, comm);
406:   firstExtant[0] = 0;
407:   for(proc = 1; proc <= numProcs; proc++)
408:     firstExtant[proc] += firstExtant[proc-1];

410:   /* Create the global extant reordering */
411:   for(extant = 0; extant < totRecvExtants; extant++)
412:     /* This would be the time to do RCM on the local graph by reordering extantIndices[] */
413:     extantIndices[extant] =  extant + firstExtant[rank];
414:   AOCreateDebug(comm, totRecvExtants, recvIndices, extantIndices, ordering);

416:   switch(mode)
417:   {
418:   case SCATTER_FORWARD:
419:     /* Get extant variables */
420:     if (addv == INSERT_VALUES) {
421:       for(extant = 0; extant < totRecvExtants; extant++)
422:       {
423:         locIndex = recvIndices[extant] - firstVar[rank];
424: #ifdef PETSC_USE_BOPT_g
425:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
426: #endif
427:         for(byte = 0; byte < typeSize; byte++)
428:           tempVars[extant*typeSize+byte] = locBytes[locIndex*typeSize+byte];
429:       }
430:     } else {
431:       for(extant = 0; extant < totRecvExtants; extant++)
432:       {
433:         locIndex = recvIndices[extant] - firstVar[rank];
434: #ifdef PETSC_USE_BOPT_g
435:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
436: #endif
437:         for(byte = 0; byte < typeSize; byte++)
438:           tempVars[extant*typeSize+byte] += locBytes[locIndex*typeSize+byte];
439:       }
440:     }

442:     /* Communicate local variables to extant storage */
443:     PetscDataTypeToMPIDataType(dataType, &MPIType);
444:     MPI_Alltoallv(tempVars,  numRecvExtants, sumRecvExtants, MPIType,
445:                          extantVars, numSendExtants, sumSendExtants, MPIType, comm);
446: 
447:     break;
448:   case SCATTER_REVERSE:
449:     /* Communicate extant variables to local storage */
450:     PetscDataTypeToMPIDataType(dataType, &MPIType);
451:     MPI_Alltoallv(extantVars, numSendExtants, sumRecvExtants, MPIType,
452:                          tempVars,  numRecvExtants, sumSendExtants, MPIType, comm);
453: 

455:     /* Get extant variables */
456:     if (addv == INSERT_VALUES) {
457:       for(extant = 0; extant < totRecvExtants; extant++)
458:       {
459:         locIndex = recvIndices[extant] - firstVar[rank];
460: #ifdef PETSC_USE_BOPT_g
461:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
462: #endif
463:         for(byte = 0; byte < typeSize; byte++)
464:           locBytes[locIndex*typeSize+byte] = tempVars[extant*typeSize+byte];
465:       }
466:     } else {
467:       for(extant = 0; extant < totRecvExtants; extant++)
468:       {
469:         locIndex = recvIndices[extant] - firstVar[rank];
470: #ifdef PETSC_USE_BOPT_g
471:         if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
472: #endif
473:         for(byte = 0; byte < typeSize; byte++)
474:           locBytes[locIndex*typeSize+byte] += tempVars[extant*typeSize+byte];
475:       }
476:     }
477:     break;
478:   default:
479:     SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid scatter mode");
480:   }

482:   /* Cleanup */
483:   PetscFree(numSendExtants);
484:   PetscFree(numRecvExtants);
485:   PetscFree(sumSendExtants);
486:   PetscFree(sumRecvExtants);
487:   PetscFree(offsets);
488:   if (totSendExtants) {
489:     PetscFree(sendIndices);
490:   }
491:   if (totRecvExtants) {
492:     PetscFree(recvIndices);
493:     PetscFree(tempVars);
494:   }
495:   return(0);
496: }
497: #endif

499: #undef  __FUNCT__
501: int GridSetUp_Triangular_2D(Grid grid) {
502:   FieldClassMap newCM;
503: #ifdef NEW_REDUCTION
504:   int           numReduceFields;
505:   int          *reduceFields;
506:   int           bc;
507: #endif
508:   int           elemSize;
509:   int           f, field;
510:   int           ierr;

513:   if (grid->numActiveFields <= 0) PetscFunctionReturn(1);

515:   /* Create default class map */
516:   if (grid->cm != PETSC_NULL) {
517:     FieldClassMapDestroy(grid->cm);
518:   }
519:   FieldClassMapCreateTriangular2D(grid, grid->numActiveFields, grid->defaultFields, &grid->cm);
520:   /* Implement system constraints */
521:   if (grid->reduceSystem == PETSC_TRUE) {
522:     /* Constrain the default class structure */
523:     FieldClassMapConstrain(grid->cm, grid, PETSC_TRUE, PETSC_FALSE, &newCM);
524:     FieldClassMapDestroy(grid->cm);
525:     grid->cm = newCM;
526:     /* Create reduction class map */
527:     if (grid->reductionCM != PETSC_NULL) {
528:       FieldClassMapDestroy(grid->reductionCM);
529:     }
530: #ifdef NEW_REDUCTION
531:     PetscMalloc((grid->numBC+grid->numPointBC) * sizeof(int), &reduceFields);
532:     for(bc = 0, numReduceFields = 0; bc < grid->numBC; bc++) {
533:       if (grid->bcReduce[bc] != PETSC_TRUE) continue;
534:       for(f = 0; f < numReduceFields; f++) {
535:         if (reduceFields[f] == grid->bcField[bc]) break;
536:       }
537:       if (f == numReduceFields) reduceFields[numReduceFields++] = grid->bcField[bc];
538:     }
539:     for(bc = 0; bc < grid->numPointBC; bc++) {
540:       if (grid->pointBCReduce[bc] != PETSC_TRUE) continue;
541:       for(f = 0; f < numReduceFields; f++) {
542:         if (reduceFields[f] == grid->pointBCField[bc]) break;
543:       }
544:       if (f == numReduceFields) reduceFields[numReduceFields++] = grid->pointBCField[bc];
545:     }
546:     FieldClassMapCreateTriangular2D(grid, numReduceFields, reduceFields, &newCM);
547:     FieldClassMapReduce(newCM, grid, &grid->reductionCM);
548:     FieldClassMapDestroy(newCM);
549:     PetscFree(reduceFields);
550: #else
551:     FieldClassMapReduce(grid->cm, grid, &grid->reductionCM);
552: #endif
553:   }
554:   /* Calculate boundary sizes after reduction */
555:   GridSetupBoundarySizes_Triangular_2D(grid);

557:   /* Setup default global and local variable orderings */
558:   if (grid->order) {
559:     VarOrderingDestroy(grid->order);
560:   }
561:   if (grid->locOrder) {
562:     LocalVarOrderingDestroy(grid->locOrder);
563:   }
564:   VarOrderingCreate(grid, &grid->order);
565:   LocalVarOrderingCreate(grid, grid->cm->numFields, grid->cm->fields, &grid->locOrder);

567:   /* Setup global and local variable orderings for BC reduction */
568:   if (grid->reduceOrder) {
569:     VarOrderingDestroy(grid->reduceOrder);
570:   }
571:   if (grid->locReduceOrder) {
572:     LocalVarOrderingDestroy(grid->locReduceOrder);
573:   }
574:   if (grid->reduceSystem) {
575:     VarOrderingCreateReduce(grid, &grid->reduceOrder);
576:     LocalVarOrderingCreate(grid, grid->reductionCM->numFields, grid->reductionCM->fields, &grid->locReduceOrder);
577: 
578:   }

580:   /* Setup element vector and matrix */
581:   if (grid->vec != PETSC_NULL) {
582:     ElementVecDestroy(grid->vec);
583:   }
584:   if (grid->ghostElementVec != PETSC_NULL) {
585:     ElementVecDestroy(grid->ghostElementVec);
586:   }
587:   if (grid->mat != PETSC_NULL) {
588:     ElementMatDestroy(grid->mat);
589:   }
590:   elemSize = grid->locOrder->elemSize;
591:   if (grid->explicitConstraints == PETSC_TRUE) {
592:     for(f = 0; f < grid->cm->numFields; f++) {
593:       field = grid->cm->fields[f];
594:       if (grid->fields[field].isConstrained == PETSC_TRUE)
595:         elemSize += grid->fields[field].disc->funcs*grid->fields[field].constraintCompDiff;
596:     }
597:   }
598:   ElementVecCreate(grid->comm, elemSize, &grid->vec);
599:   ElementVecCreate(grid->comm, elemSize, &grid->ghostElementVec);
600:   ElementMatCreate(grid->comm, elemSize, elemSize, &grid->mat);
601:   grid->vec->reduceSize             = grid->locOrder->elemSize;
602:   grid->ghostElementVec->reduceSize = grid->locOrder->elemSize;
603:   grid->mat->reduceRowSize          = grid->locOrder->elemSize;
604:   grid->mat->reduceColSize          = grid->locOrder->elemSize;

606:   return(0);
607: }

609: #undef  __FUNCT__
611: int GridSetupConstraints_Triangular_2D(Grid grid, PetscConstraintObject ctx) {
612:   Mesh             mesh;
613:   Field           *fields             = grid->fields;
614:   FieldClassMap    cm                 = grid->cm;
615:   int              numFields          = grid->cm->numFields;
616:   int              numNodes           = grid->cm->numNodes;
617:   int            **fieldClasses       = grid->cm->fieldClasses;
618:   int             *classes            = grid->cm->classes;
619:   int             *classSizes         = grid->cm->classSizes;
620:   int              numVars            = grid->order->numVars;
621:   int              numLocVars         = grid->order->numLocVars;
622:   int             *firstVar           = grid->order->firstVar;
623:   int             *offsets            = grid->order->offsets;
624:   int              numTotalFields     = grid->order->numTotalFields;
625:   int            **localStart         = grid->order->localStart;
626:   int              constField         = -1; /* The field which is constrained */
627:   int             *ordering;                /* Gives a mapping between the two variable numberings */
628:   int             *diagRows;                /* Allocation for the projector P */
629:   int             *offdiagRows;             /* Allocation for the projector P */
630:   int              numConstrainedFields;
631:   int              rowStartVar, colStartVar, locColStart, locColEnd, numLocConstraintVars;
632:   int              rank;
633:   int              f, field, node, marker, nclass, comp, nodeVars, var, count;
634:   PetscTruth       opt;
635:   int              ierr;

638:   MPI_Comm_rank(grid->comm, &rank);
639:   GridGetMesh(grid, &mesh);
640:   /* Check constrained fields */
641:   for(field = 0, numConstrainedFields = 0; field < numTotalFields; field++)
642:     if (fields[field].isConstrained == PETSC_TRUE) {
643:       constField = field;
644:       numConstrainedFields++;
645:     }
646:   if (numConstrainedFields == 0) return(0);
647:   if (numConstrainedFields > 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only one field may be constrained");

649:   /* Create constrained class map */
650:   if (grid->constraintCM != PETSC_NULL) {
651:     FieldClassMapDestroy(grid->constraintCM);
652:   }
653:   FieldClassMapConstrain(grid->cm, grid, PETSC_FALSE, PETSC_TRUE, &grid->constraintCM);

655:   /* Create variable ordering for constrained and new fields */
656:   if (grid->constraintOrder != PETSC_NULL) {
657:     VarOrderingDestroy(grid->constraintOrder);
658:   }
659:   VarOrderingConstrain(grid, grid->order, &grid->constraintOrder);

661:   /* Calculate mapping between variable numberings */
662:   if (grid->constraintOrdering != PETSC_NULL) {
663:     ISDestroy(grid->constraintOrdering);
664:   }
665:   PetscMalloc(numLocVars * sizeof(int), &ordering);
666:   numLocConstraintVars = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
667:   for(node = 0, count = 0; node < numNodes; node++) {
668:     nclass      = classes[node];
669:     rowStartVar = offsets[node];
670:     nodeVars    = classSizes[nclass];
671:     colStartVar = grid->constraintOrder->offsets[node];

673:     MeshGetNodeBoundary(mesh, node, &marker);
674:     if ((marker < 0) && (localStart[constField][nclass] >= 0)) {
675:       /* The preceeding fields on the node */
676:       for(var = 0; var < localStart[constField][nclass]; var++, count++)
677:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
678:       /* Nonzeroes in C */
679:       rowStartVar += localStart[constField][nclass];
680:       colStartVar += localStart[constField][nclass];
681:       for(var = 0; var < fields[constField].numComp; var++, count++)
682:         ordering[rowStartVar-firstVar[rank]+var] = numLocConstraintVars++;
683:       /* The remaining fields on the node */
684:       for(var = fields[constField].numComp; var < nodeVars - localStart[constField][nclass]; var++, count++)
685:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var-fields[constField].numComp;
686:     } else {
687:       /* Nonzeroes in I */
688:       for(var = 0; var < nodeVars; var++, count++)
689:         ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
690:     }
691:   }
692:   if (numLocConstraintVars != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
693:   if (count != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
694:   ISCreateGeneral(PETSC_COMM_SELF, numLocVars, ordering, &grid->constraintOrdering);
695:   PetscFree(ordering);


698:   /* Calculate allocation for constraint matrix which transforms unconstrained fields to constrained and new fields:

700:          / I 0  / v_Int  = / v_Int 
701:           0 C /  v_Bd  /    v_New /
702:   */
703:   PetscMalloc(numLocVars * sizeof(int), &diagRows);
704:   PetscMalloc(numLocVars * sizeof(int), &offdiagRows);
705:   PetscMemzero(diagRows,    numLocVars * sizeof(int));
706:   PetscMemzero(offdiagRows, numLocVars * sizeof(int));
707:   locColStart = grid->constraintOrder->firstVar[rank];
708:   locColEnd   = grid->constraintOrder->firstVar[rank+1];
709:   for(node = 0; node < numNodes; node++) {
710:     nclass           = classes[node];
711:     rowStartVar      = offsets[node] - firstVar[rank];
712:     nodeVars         = classSizes[nclass];

714:     /* All constrained nodes have negative markers */
715:     MeshGetNodeBoundary(mesh, node, &marker);
716:     if (marker < 0) {
717:       for(f = 0; f < numFields; f++) {
718:         field = cm->fields[f];
719:         if (fields[field].isConstrained == PETSC_TRUE) {
720:           comp = fields[field].numComp + fields[field].constraintCompDiff;
721:           (*ctx->ops->getindices)(ctx, grid->mesh, grid->constraintOrder, node, CONSTRAINT_COL_INDEX, &colStartVar);
722: 
723:           /* Check to see whether the variables fall within the diagonal block --
724:                Notice we are overestimating as if every constrained variable
725:                depends on all the new variables
726:           */
727:           if ((colStartVar + comp <= locColStart) || (colStartVar >= locColEnd)) {
728:             for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
729:               offdiagRows[rowStartVar] += comp;
730:           } else if ((colStartVar >= locColStart) && (colStartVar + comp <= locColEnd)) {
731:             for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
732:               diagRows[rowStartVar]    += comp;
733: #if 0
734:           /* Allow cuts on a single node for rectangular matrices */
735:           } else if (rectangular) {
736:             if (colStartVar < locColStart) {
737:               /* Cut is from below */
738:               for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
739:               {
740:                 diagRows[rowStartVar]    += (colStartVar + comp) - locColStart;
741:                 offdiagRows[rowStartVar] += locColStart - colStartVar;
742:               }
743:             } else {
744:               /* Cut is from above */
745:               for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
746:               {
747:                 diagRows[rowStartVar]    += locColEnd - colStartVar;
748:                 offdiagRows[rowStartVar] += (colStartVar + comp) - locColEnd;
749:               }
750:             }
751: #endif
752:           } else {
753:             /* Row blocking cuts variables on a single node. This is bad partitioning. */
754:             SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
755:           }
756:         } else if (fieldClasses[f][nclass]) {
757:           /* Remember localStart[][] is -1 if the field is not on the node */
758:           for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
759:             diagRows[rowStartVar] = 1;
760:         }
761:       }
762:     } else {
763:       /* Unconstrained nodes */
764:       for(var = 0; var < nodeVars; var++)
765:         diagRows[rowStartVar+var] = 1;
766:     }
767:   }

769:   /* Create the constraint matrix */
770:   if (grid->constraintMatrix != PETSC_NULL) {
771:     MatDestroy(grid->constraintMatrix);
772:   }
773:   MatCreateMPIAIJ(grid->comm, numLocVars, grid->constraintOrder->numLocVars, numVars,
774:                          grid->constraintOrder->numVars, 0, diagRows, 0, offdiagRows, &grid->constraintMatrix);
775: 
776:   MatSetOption(grid->constraintMatrix, MAT_NEW_NONZERO_ALLOCATION_ERR);

778:   /* Create the pseudo-inverse of the constraint matrix */
779:   PetscOptionsHasName(PETSC_NULL, "-grid_const_inv", &opt);
780:   if (opt == PETSC_TRUE) {
781:     if (grid->constraintInverse != PETSC_NULL) {
782:       MatDestroy(grid->constraintInverse);
783:     }
784:     MatCreateMPIAIJ(grid->comm, grid->constraintOrder->numLocVars, grid->constraintOrder->numLocVars,
785:                            grid->constraintOrder->numVars, grid->constraintOrder->numVars, 3, PETSC_NULL, 0, PETSC_NULL,
786:                            &grid->constraintInverse);
787: 
788:     MatSetOption(grid->constraintInverse, MAT_NEW_NONZERO_ALLOCATION_ERR);
789:   }

791:   /* Cleanup */
792:   PetscFree(diagRows);
793:   PetscFree(offdiagRows);

795:   return(0);
796: }

798: #undef  __FUNCT__
800: int GridSetupBoundary_Triangular_2D(Grid grid) {
801:   Mesh                     mesh;
802:   Partition                part;
803:   FieldClassMap            map             = grid->cm;
804:   PetscConstraintObject    constCtx        = grid->constraintCtx;
805:   int                      numBC           = grid->numBC;
806:   GridBC                  *gridBC          = grid->bc;
807:   int                      numFields       = map->numFields;
808:   int                     *fields          = map->fields;
809:   int                      numNodes        = map->numNodes;
810:   int                      numOverlapNodes = map->numOverlapNodes;
811:   int                      numGhostNodes   = map->numGhostNodes;
812:   int                      numClasses      = map->numClasses;
813:   int                    **fieldClasses    = map->fieldClasses;
814:   int                     *classes         = map->classes;
815:   int                     *classSizes      = map->classSizes;
816:   int                     *localOffsets;
817:   int                      numNewVars;
818:   VarOrdering              o;
819:   LocalVarOrdering         l;
820:   /* Ghost variable communication */
821:   int                     *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
822:   int                     *sumSendVars;      /* Prefix sums of ghostSendVars */
823:   int                     *ghostRecvVars;    /* Number of ghost variables on a given processor */
824:   int                     *sumRecvVars;      /* Prefix sums of ghostRecvVars */
825:   int                     *displs;           /* Offsets into ghostRecvVars */
826:   int                      numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
827:   int                     *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
828:   int                      numProcs, rank;
829:   int                      elemOffset;
830:   int                      proc, f, field, bc, node, locNode, gNode, marker, nclass, var;
831:   int                      ierr;

834:   grid->bdSetupCalled = PETSC_TRUE;
835:   GridGetMesh(grid, &mesh);
836:   MeshGetPartition(mesh, &part);

838:   /* Destroy old orderings */
839:   if (grid->bdOrder) {
840:     VarOrderingDestroy(grid->bdOrder);
841:   }
842:   if (grid->bdLocOrder) {
843:     LocalVarOrderingDestroy(grid->bdLocOrder);
844:   }

846:   /* Setup the boundary ordering */
847:   PetscHeaderCreate(o, _VarOrdering, int, IS_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
848:   PetscLogObjectCreate(o);
849:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) map);

851:   /* Allocate memory */
852:   MPI_Comm_size(grid->comm, &numProcs);
853:   MPI_Comm_rank(grid->comm, &rank);
854:   GridGetNumFields(grid, &o->numTotalFields);
855:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
856:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
857:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
858:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses) * sizeof(int) + o->numTotalFields*sizeof(int *));
859:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));
860:   o->numLocNewVars = 0;
861:   o->numNewVars    = 0;

863:   /* Setup domain variable numbering */
864:   o->offsets[0] = 0;
865:   for(node = 0; node < numNodes-1; node++) {
866:     MeshGetNodeBoundary(mesh, node, &marker);
867:     if (marker == 0) {
868:       o->offsets[node+1] = o->offsets[node];
869:     } else {
870:       for(bc = 0; bc < numBC; bc++) {
871:         if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == marker)) break;
872:       }
873:       if (bc == numBC) {
874:         o->offsets[node+1] = o->offsets[node] + classSizes[classes[node]];
875:       } else {
876:         o->offsets[node+1] = o->offsets[node];
877:       }
878:     }
879:   }
880:   MeshGetNodeBoundary(mesh, numNodes-1, &marker);
881:   for(bc = 0; bc < numBC; bc++) {
882:     if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == marker)) break;
883:   }
884:   if (bc == numBC) {
885:     o->numLocVars = o->offsets[numNodes-1] + classSizes[classes[numNodes-1]];
886:   } else {
887:     o->numLocVars = o->offsets[numNodes-1];
888:   }
889:   if (map->isConstrained == PETSC_TRUE) {
890:     (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
891: 
892:     o->numLocVars += o->numLocNewVars;
893:   }
894:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
895:   o->firstVar[0] = 0;
896:   for(proc = 1; proc <= numProcs; proc++)
897:     o->firstVar[proc] += o->firstVar[proc-1];
898:   o->numVars = o->firstVar[numProcs];
899:   if (map->isConstrained == PETSC_TRUE) {
900:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
901: 
902:     MPI_Allreduce(&o->numLocNewVars, &numNewVars, 1, MPI_INT, MPI_SUM, o->comm);
903:     if (o->numNewVars != numNewVars) SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
904:   }

906:   /* Initialize the overlap */
907:   o->numOverlapVars    = o->numLocVars;
908:   o->numOverlapNewVars = o->numLocNewVars;

910:   if (numProcs > 1) {
911:     /* Map local to global variable numbers */
912:     for(node = 0; node < numNodes; node++)
913:       o->offsets[node] += o->firstVar[rank];

915:     /* Initialize communication */
916:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
917:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
918:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
919:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
920:     PetscMalloc(numProcs * sizeof(int), &displs);
921:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
922:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
923:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
924:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
925:     PetscMemzero(displs,        numProcs * sizeof(int));

927:     /* Get number of ghost variables to receive from each processor and size of blocks --
928:          we here assume that classes[] already has ghost node classes in it */
929:     for(node = 0; node < numGhostNodes; node++) {
930:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
931:       nclass = classes[numNodes+node];
932:       ghostRecvVars[proc]++;
933:       o->numOverlapVars += classSizes[nclass];
934:     }

936:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
937:     if (map->isConstrained == PETSC_TRUE) {
938:       (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
939: 
940:     }
941:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

943:     /* Get sizes of ghost variable blocks to send to each processor */
944:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

946:     /* Calculate offets into the ghost variable receive array */
947:     for(proc = 1; proc < numProcs; proc++) {
948:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
949:       displs[proc]      = sumRecvVars[proc];
950:     }

952:     /* Calculate offsets into the ghost variable send array */
953:     for(proc = 1; proc < numProcs; proc++)
954:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

956:     /* Send requests for ghost variable offsets to each processor */
957:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
958:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
959:     for(node = 0; node < numGhostNodes; node++) {
960:       PartitionGhostToGlobalNodeIndex(part, node, &gNode, &proc);
961:       o->offsets[numNodes+(displs[proc]++)] = gNode;
962:     }
963:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
964:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
965: 

967:     /* Send ghost variables offsets to each processor */
968:     for(node = 0; node < numSendGhostVars; node++) {
969:       PartitionGlobalToLocalNodeIndex(part, sendGhostBuffer[node], &locNode);
970:       sendGhostBuffer[node] = o->offsets[locNode];
971:     }
972:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
973:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
974: 

976:     /* Cleanup */
977:     PetscFree(ghostSendVars);
978:     PetscFree(sumSendVars);
979:     PetscFree(ghostRecvVars);
980:     PetscFree(sumRecvVars);
981:     PetscFree(displs);
982:     PetscFree(sendGhostBuffer);

984:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
985:        interior variable, rather than the offset of the given ghost variable in the global
986:        matrix. */
987:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
988:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
989:       o->localOffsets[node] = var;
990:       nclass = classes[numNodes+node];
991:       var   += classSizes[nclass];
992:     }
993:   }

995:   /* Allocate memory */
996:   PetscMalloc(numClasses * sizeof(int), &localOffsets);
997:   PetscMemzero(localOffsets, numClasses * sizeof(int));

999:   /* Setup local field offsets */
1000:   for(f = 0; f < numFields; f++) {
1001:     field = fields[f];
1002:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
1003:     for(nclass = 0; nclass < numClasses; nclass++) {
1004:       if (fieldClasses[f][nclass]) {
1005:         o->localStart[field][nclass]  = localOffsets[nclass];
1006:         localOffsets[nclass]         += grid->fields[field].disc->bdDisc->comp;
1007:       } else {
1008:         o->localStart[field][nclass]  = -1;
1009:       }
1010:     }
1011:   }
1012:   grid->bdOrder = o;

1014:   /* Cleanup */
1015:   PetscFree(localOffsets);

1017:   /* Setup the local boundary ordering */
1018:   PetscHeaderCreate(l, _LocalVarOrdering, int, IS_COOKIE, 0, "LocalVarOrdering", grid->comm, LocalVarOrderingDestroy, 0);
1019:   PetscLogObjectCreate(l);

1021:   /* Allocate memory */
1022:   l->numFields = numFields;
1023:   PetscMalloc(numFields       * sizeof(int), &l->fields);
1024:   PetscMalloc(grid->numFields * sizeof(int), &l->elemStart);
1025:   PetscLogObjectMemory(l, (numFields + grid->numFields) * sizeof(int));
1026:   PetscMemcpy(l->fields, fields, numFields * sizeof(int));

1028:   /* Put in sentinel values */
1029:   for(f = 0; f < grid->numFields; f++) {
1030:     l->elemStart[f] = -1;
1031:   }

1033:   /* Setup local and global offsets offsets with lower dimensional discretizations */
1034:   for(f = 0, elemOffset = 0; f < numFields; f++) {
1035:     field               = fields[f];
1036:     l->elemStart[field] = elemOffset;
1037:     elemOffset         += grid->fields[field].disc->bdDisc->size;
1038:   }
1039:   l->elemSize = elemOffset;
1040:   grid->bdLocOrder = l;

1042:   return(0);
1043: }

1045: #undef  __FUNCT__
1047: int GridReformMesh_Triangular_2D(Grid grid) {

1051:   GridSetupBoundarySizes_Triangular_2D(grid);
1052:   return(0);
1053: }

1055: #undef  __FUNCT__
1057: int GridGetBoundaryNext_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map, int *node, int *nclass) {

1061:   do {
1062:     MeshGetBoundaryNext(grid->mesh, boundary, ghost, node);
1063:   }
1064:   /* Note: I am using the boolean short circuit to avoid classes[] with node == -1 */
1065:   while((*node != -1) && (map->fieldClasses[fieldIdx][map->classes[*node]] == 0));
1066:   if (*node != -1) {
1067:     *nclass = map->classes[*node];
1068:   } else {
1069:     *nclass = -1;
1070:   }
1071:   return(0);
1072: }

1074: #undef  __FUNCT__
1076: int GridGetBoundaryStart_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map, int *node, int *nclass) {
1077:   Mesh mesh;
1078:   Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
1079:   int  b; /* Canonical boundary number */
1080:   int  ierr;

1083:   /* Find canonical boundary number */
1084:   GridGetMesh(grid, &mesh);
1085:   MeshGetBoundaryIndex(mesh, boundary, &b);
1086:   if (mesh->activeBd != -1) SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Already iterating over a boundary");
1087:   /* Find first boundary node of a class in the active field */
1088:   mesh->activeBd     = b;
1089:   mesh->activeBdOld  = b;
1090:   mesh->activeBdNode = tri->bdBegin[b] - 1;
1091:   GridGetBoundaryNext_Triangular_2D(grid, boundary, fieldIdx, ghost, map, node, nclass);
1092:   return(0);
1093: }

1095: #undef  __FUNCT__
1097: int GridCreateRestriction_Triangular_2D(Grid dcf, Grid dcc, GMat *gmat) {
1098:   SETERRQ(PETSC_ERR_SUP, " ");
1099: }

1101: #undef  __FUNCT__
1103: int GridEvaluateRhs_Triangular_2D(Grid grid, GVec x, GVec f, PetscObject ctx) {
1104:   Mesh                  mesh;
1105:   Partition             part;
1106:   Field                *fields               = grid->fields;
1107:   int                   numNewFields         = grid->numNewFields;         /* The number of new fields added by constraints */
1108:   GridFunc             *rhsFuncs             = grid->rhsFuncs;             /* The Rhs PointFunctions */
1109:   int                   numRhsOps            = grid->numRhsOps;            /* The number of Rhs operators */
1110:   GridOp               *rhsOps               = grid->rhsOps;               /* The operators on the Rhs */
1111:   PetscTruth            reduceSystem         = grid->reduceSystem;
1112:   PetscTruth            reduceElement        = grid->reduceElement;
1113:   PetscTruth            explicitConstraints  = grid->explicitConstraints;
1114:   PetscConstraintObject constCtx             = grid->constraintCtx;        /* The constraint object */
1115:   int                   numFields            = grid->cm->numFields;        /* The number of fields in the calculation */
1116:   LocalVarOrdering      locOrder             = grid->locOrder;             /* The default local variable ordering */
1117:   int                   elemSize             = locOrder->elemSize;         /* The number of shape funcs in the elem mat */
1118:   int                  *elemStart            = locOrder->elemStart;        /* The offset of each field in the elem mat */
1119:   ElementVec            vec                  = grid->vec;                  /* The element vector */
1120:   PetscScalar          *array                = vec->array;                 /* The values in the element vector */
1121:   Vec                   ghostVec             = grid->ghostVec;             /* The local solution vector */
1122:   ElementVec            elemGhostVec         = grid->ghostElementVec;      /* The element vector from ghostVec */
1123:   PetscScalar          *ghostArray           = elemGhostVec->array;        /* The values in elemGhostVec */
1124:   ElementMat            mat                  = grid->mat;                  /* A temporary element matrix */
1125:   PetscScalar          *matArray             = mat->array;                 /* The values in the element matrix */
1126:   MeshMover             mover;
1127:   Grid                  ALEGrid;                                           /* The grid describing the mesh velocity */
1128:   VarOrdering           order;                                             /* The default variable ordering */
1129:   ElementVec            MeshALEVec;                                        /* ALE velocity vector from mesh */
1130:   ElementVec            ALEVec;                                            /* ALE velocity vector */
1131:   PetscScalar          *ALEArray;                                          /* The values in the ALE element vector */
1132:   int                   computeFunc, computeLinear, computeNonlinear;      /* Flags for selective computation */
1133:   PetscScalar          *nonlinearArgs[2];
1134:   int                   newComp = 0;
1135:   int                   numElements;
1136:   int                   sField, tField, op, newField, elem, func, fieldIndex;
1137: #ifdef PETSC_USE_BOPT_g
1138:   int                   var;
1139:   PetscTruth            opt;
1140: #endif
1141:   int                   ierr;

1144:   GridGetMesh(grid, &mesh);
1145:   MeshGetPartition(mesh, &part);
1146:   if (explicitConstraints == PETSC_TRUE) {
1147:     order = grid->constraintOrder;
1148:   } else {
1149:     order = grid->order;
1150:   }
1151:   /* Handle selective computation */
1152:   computeFunc        = 1;
1153:   if (grid->activeOpTypes[0] == PETSC_FALSE) computeFunc      = 0;
1154:   computeLinear      = 1;
1155:   if (grid->activeOpTypes[1] == PETSC_FALSE) computeLinear    = 0;
1156:   computeNonlinear   = 1;
1157:   if (grid->activeOpTypes[2] == PETSC_FALSE) computeNonlinear = 0;

1159:   /* Fill the local solution vectors */
1160:   if (x != PETSC_NULL) {
1161:     GridGlobalToLocal(grid, INSERT_VALUES, x);
1162:   }

1164:   /* Setup ALE variables */
1165:   if (grid->ALEActive == PETSC_TRUE) {
1166:     MeshGetMover(mesh, &mover);
1167:     MeshMoverGetVelocityGrid(mover, &ALEGrid);
1168:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1169:     ElementVecDuplicate(grid->vec, &ALEVec);
1170:     ALEArray   = ALEVec->array;
1171:     MeshALEVec = ALEGrid->vec;
1172:   } else {
1173:     ALEArray   = PETSC_NULL;
1174:     MeshALEVec = PETSC_NULL;
1175:   }

1177:   /* Loop over elements */
1178:   PartitionGetNumElements(part, &numElements);
1179:   for(elem = 0; elem < numElements; elem++) {
1180:     /* Initialize element vector */
1181:     ElementVecZero(vec);
1182:     vec->reduceSize          = locOrder->elemSize;
1183:     elemGhostVec->reduceSize = locOrder->elemSize;

1185:     /* Setup local row indices for the ghost vector */
1186:     GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1187:     /* Setup local solution vector */
1188:     GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1189:     /* Must transform to unconstrained variables for element integrals */
1190:     GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);

1192:     /* Setup ALE variables */
1193:     if (grid->ALEActive == PETSC_TRUE) {
1194:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1195:       GridLocalToElement(ALEGrid, MeshALEVec);
1196:     }

1198:     if (computeFunc) {
1199:       for(func = 0; func < grid->numRhsFuncs; func++) {
1200:         if (fields[rhsFuncs[func].field].isActive == PETSC_FALSE) continue;
1201:         tField = rhsFuncs[func].field;
1202:         DiscretizationEvaluateFunctionGalerkin(fields[tField].disc, mesh, *rhsFuncs[tField].func, rhsFuncs[tField].alpha, elem,
1203:                                                       &array[elemStart[tField]], ctx);
1204: 
1205:       }
1206: #ifdef PETSC_USE_BOPT_g
1207:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1208: #endif
1209:     }

1211:     for(op = 0; op < numRhsOps; op++) {
1212:       if (fields[rhsOps[op].field].isActive == PETSC_FALSE) continue;
1213:       if ((rhsOps[op].nonlinearOp != PETSC_NULL) && (computeNonlinear)) {
1214:         tField = rhsOps[op].field;
1215:         nonlinearArgs[0] = &ghostArray[elemStart[tField]];
1216:         nonlinearArgs[1] = &ghostArray[elemStart[tField]];
1217:         if (rhsOps[op].isALE) {
1218:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, tField, ALEVec);
1219:           DiscretizationEvaluateNonlinearALEOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1220:                                                                     rhsOps[op].alpha, elem, 2, nonlinearArgs,
1221:                                                                     ALEArray, &array[elemStart[tField]], ctx);
1222: 
1223:         } else {
1224:           DiscretizationEvaluateNonlinearOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1225:                                                                  rhsOps[op].alpha, elem, 2, nonlinearArgs,
1226:                                                                  &array[elemStart[tField]], ctx);
1227: 
1228:         }
1229:       } else if (computeLinear) {
1230:         sField = rhsOps[op].field;
1231:         tField = fields[sField].disc->operators[rhsOps[op].op]->test->field;
1232:         ElementMatZero(mat);
1233:         if (rhsOps[op].isALE) {
1234:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1235:           DiscretizationEvaluateALEOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1236:                                                              rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1237:                                                              &ghostArray[elemStart[sField]], ALEArray, array, matArray, ctx);
1238: 
1239:         } else {
1240:           DiscretizationEvaluateOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1241:                                                           rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1242:                                                           &ghostArray[elemStart[sField]], array, matArray, ctx);
1243: 
1244:         }
1245:       }
1246: #ifdef PETSC_USE_BOPT_g
1247:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1248: #endif
1249:     }

1251:     /* Setup global row indices, with reduction if necessary */
1252:     GridCalcGeneralElementVecIndices(grid, elem, order, PETSC_NULL, PETSC_FALSE, vec);
1253: #ifdef PETSC_USE_BOPT_g
1254:     PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
1255:     if (opt == PETSC_TRUE) {
1256:       for(var = 0; var < vec->reduceSize; var++)
1257:         PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1258:     }
1259: #endif
1260:     /* Put values in the global vector */
1261:     ElementVecSetValues(vec, f, ADD_VALUES);
1262:   }

1264:   /* Cleanup ALE variables */
1265:   if (grid->ALEActive == PETSC_TRUE) {
1266:     ElementVecDestroy(ALEVec);
1267:   }

1269:   /* Evaluate self-interaction of new fields created by constraints */
1270:   if (explicitConstraints == PETSC_TRUE) {
1271:     /* WARNING: This only accomodates 1 constrained field */
1272:     /* Get constraint information */
1273:     for(fieldIndex = 0; fieldIndex < numFields; fieldIndex++) {
1274:       sField = grid->cm->fields[fieldIndex];
1275:       if (fields[sField].isConstrained == PETSC_TRUE) {
1276:         newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1277:         break;
1278:       }
1279:     }
1280:     /* Calculate self-interaction */
1281:     for(newField = 0; newField < numNewFields; newField++) {
1282:       /* Initialize element vector */
1283:       ElementVecZero(vec);
1284:       vec->reduceSize = newComp;

1286:       /* Calculate the indices and contribution to the element matrix from the new field */
1287:       (*constCtx->ops->newelemvec)(constCtx, order, newField, vec);
1288: #ifdef PETSC_USE_BOPT_g
1289:       PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly_constrained", &opt);
1290:       if (opt == PETSC_TRUE) {
1291:         for(var = 0; var < vec->reduceSize; var++)
1292:           PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1293:       }
1294: #endif
1295:       /* Put values in global matrix */
1296:       ElementVecSetValues(vec, f, ADD_VALUES);
1297: #ifdef PETSC_USE_BOPT_g
1298:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1299: #endif
1300:     }
1301:   }

1303:   /* Reset element vectors */
1304:   vec->reduceSize          = locOrder->elemSize;
1305:   elemGhostVec->reduceSize = locOrder->elemSize;

1307:   VecAssemblyBegin(f);
1308:   VecAssemblyEnd(f);
1309:   return(0);
1310: }

1312: #undef  __FUNCT__
1314: int GridEvaluateSystemMatrix_Triangular_2D(Grid grid, GVec x, GMat *J, GMat *M, MatStructure *flag, PetscObject ctx) {
1315:   GMat                  A             = *J;                     /* The working system matrix */
1316:   Field                *fields        = grid->fields;
1317:   int                   numNewFields  = grid->numNewFields;     /* The number of new fields added by constraints */
1318:   int                   numMatOps     = grid->numMatOps;        /* The number of operators in the matrix */
1319:   GridOp               *matOps        = grid->matOps;           /* The operators in the system matrix */
1320:   VarOrdering           constOrder    = grid->constraintOrder;  /* The constrained variable ordering */
1321:   PetscTruth            reduceSystem  = grid->reduceSystem;
1322:   PetscTruth            reduceElement = grid->reduceElement;
1323:   PetscTruth            expConst      = grid->explicitConstraints;
1324:   PetscConstraintObject constCtx      = grid->constraintCtx;    /* The constraint object */
1325:   int                   numFields     = grid->cm->numFields;    /* The number of fields in the calculation */
1326:   LocalVarOrdering      locOrder      = grid->locOrder;         /* The default local variable ordering */
1327:   int                   elemSize      = locOrder->elemSize;     /* The number of shape functions in the element matrix */
1328:   int                  *elemStart     = locOrder->elemStart;    /* The offset of each field in the element matrix */
1329:   ElementMat            mat           = grid->mat;              /* The element matrix */
1330:   PetscScalar          *array         = mat->array;             /* The values in the element matrix */
1331:   Vec                   ghostVec      = grid->ghostVec;         /* The local solution vector */
1332:   ElementVec            elemGhostVec  = grid->ghostElementVec;  /* The element vector from ghostVec */
1333:   PetscScalar          *ghostArray    = elemGhostVec->array;    /* The values in elemGhostVec */
1334:   Mesh                  mesh;
1335:   Partition             part;
1336:   MeshMover             mover;
1337:   Grid                  ALEGrid;                                /* The grid describing the mesh velocity */
1338:   VarOrdering           order;                                  /* The default variable ordering */
1339:   ElementVec            MeshALEVec;                             /* ALE velocity vector with mesh discretization */
1340:   ElementVec            ALEVec;                                 /* ALE velocity vector */
1341:   PetscScalar          *ALEArray;                               /* The values in the ALE element vector */
1342:   int                   newComp = 0;
1343:   int                   numElements;
1344:   int                   elem, f, sField, tField, op, newField;
1345: #ifdef PETSC_USE_BOPT_g
1346:   PetscTruth            opt;
1347: #endif
1348:   int                   ierr;

1351:   GridGetMesh(grid, &mesh);
1352:   MeshGetPartition(mesh, &part);
1353:   if (expConst == PETSC_TRUE) {
1354:     order = grid->constraintOrder;
1355:   } else {
1356:     order = grid->order;
1357:   }
1358:   /* Fill the local solution vectors */
1359:   if (x != PETSC_NULL) {
1360:     GridGlobalToLocal(grid, INSERT_VALUES, x);
1361:   }

1363:   /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
1364:   if (grid->ALEActive == PETSC_TRUE) {
1365:     MeshGetMover(mesh, &mover);
1366:     MeshMoverGetVelocityGrid(mover, &ALEGrid);
1367:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1368:     ElementVecDuplicate(grid->vec, &ALEVec);
1369:     ALEArray   = ALEVec->array;
1370:     MeshALEVec = ALEGrid->vec;
1371:   } else {
1372:     ALEArray   = PETSC_NULL;
1373:     MeshALEVec = PETSC_NULL;
1374:   }

1376:   /* Loop over elements */
1377:   PartitionGetNumElements(part, &numElements);
1378:   for(elem = 0; elem < numElements; elem++) {
1379:     /* Initialize element matrix */
1380:     ElementMatZero(mat);
1381:     mat->reduceRowSize       = locOrder->elemSize;
1382:     mat->reduceColSize       = locOrder->elemSize;
1383:     elemGhostVec->reduceSize = locOrder->elemSize;

1385:     /* Setup local row indices for the ghost vector */
1386:     GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1387:     /* Setup local solution vector */
1388:     GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1389:     /* Must transform to unconstrained variables for element integrals */
1390:     GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);

1392:     /* Setup ALE variables */
1393:     if (grid->ALEActive == PETSC_TRUE) {
1394:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1395:       GridLocalToElement(ALEGrid, MeshALEVec);
1396:     }

1398:     /* Calculate the contribution to the element matrix from each field */
1399:     for(op = 0; op < numMatOps; op++) {
1400:       sField = matOps[op].field;
1401:       tField = fields[sField].disc->operators[matOps[op].op]->test->field;
1402:       if (fields[sField].isActive) {
1403:         if (matOps[op].isALE) {
1404:           GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1405:           DiscretizationEvaluateALEOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1406:                                                            matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1407:                                                            ALEArray, array, ctx);
1408: 
1409:         } else {
1410:           DiscretizationEvaluateOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1411:                                                         matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1412:                                                         array, ctx);
1413: 
1414:         }
1415: #ifdef PETSC_USE_BOPT_g
1416:         PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1417: #endif
1418:       }
1419:     }

1421:     /* Setup global numbering, with reduction if necessary */
1422:     GridCalcGeneralElementMatIndices(grid, elem, order, order, PETSC_FALSE, mat);
1423: #ifdef PETSC_USE_BOPT_g
1424:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
1425:     if (opt == PETSC_TRUE) {
1426:       ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1427:     }
1428: #endif
1429:     /* Put values in the global matrix */
1430:     ElementMatSetValues(mat, A, ADD_VALUES);
1431:   }

1433:   /* Evaluate self-interaction of new fields created by constraints */
1434:   if (expConst == PETSC_TRUE) {
1435:     /* WARNING: This only accomodates 1 constrained field */
1436:     /* Get constraint information */
1437:     for(f = 0; f < numFields; f++) {
1438:       sField = grid->cm->fields[f];
1439:       if (fields[sField].isConstrained == PETSC_TRUE) {
1440:         newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1441:         break;
1442:       }
1443:     }
1444:     /* Calculate self-interaction */
1445:     for(newField = 0; newField < numNewFields; newField++) {
1446:       /* Initialize element matrix */
1447:       ElementMatZero(mat);
1448:       mat->reduceRowSize = newComp;
1449:       mat->reduceColSize = newComp;

1451:       /* Calculate the indices and contribution to the element matrix from the new field */
1452:       (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
1453: #ifdef PETSC_USE_BOPT_g
1454:       PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly_constrained", &opt);
1455:       if (opt == PETSC_TRUE) {
1456:         ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1457:       }
1458: #endif
1459:       /* Put values in global matrix */
1460:       ElementMatSetValues(mat, A, ADD_VALUES);
1461: #ifdef PETSC_USE_BOPT_g
1462:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1463: #endif
1464:     }
1465:   }

1467:   /* Assemble matrix */
1468:   MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
1469:   MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);

1471:   /* Reset element matrix and vector */
1472:   mat->reduceRowSize       = locOrder->elemSize;
1473:   mat->reduceColSize       = locOrder->elemSize;
1474:   elemGhostVec->reduceSize = locOrder->elemSize;

1476:   /* Cleanup */
1477:   if (grid->ALEActive == PETSC_TRUE) {
1478:     ElementVecDestroy(ALEVec);
1479:   }

1481:   GridResetConstrainedMultiply_Private(grid, A);
1482:   return(0);
1483: }

1485: static struct _GridOps GOps = {GridSetUp_Triangular_2D,
1486:                                GridSetupBoundary_Triangular_2D,
1487:                                GridSetupConstraints_Triangular_2D,
1488:                                GridSetupGhostScatter_Triangular_2D,
1489:                                PETSC_NULL/* GridSetFromOptions */,
1490:                                PETSC_NULL/* GridDuplicate */,
1491:                                PETSC_NULL/* GridReform */,
1492:                                PETSC_NULL/* GridCopy */,
1493:                                GridDestroy_Triangular_2D,
1494:                                GridView_Triangular_2D,
1495:                                GridGetBoundaryStart_Triangular_2D,
1496:                                GridGetBoundaryNext_Triangular_2D,
1497:                                GridReformMesh_Triangular_2D,
1498:                                GridCreateGMat_Triangular_2D,
1499:                                GridCreateVarOrdering_Triangular_2D,
1500:                                GridCreateLocalVarOrdering_Triangular_2D,
1501:                                GridCreateVarScatter_Triangular_2D,
1502:                                GridVarOrderingConstrain_Triangular_2D,
1503:                                GridCalcElementVecIndices_Triangular_2D,
1504:                                GridCalcElementMatIndices_Triangular_2D,
1505:                                GridCalcBoundaryElementVecIndices_Triangular_2D,
1506:                                GridCalcBoundaryElementMatIndices_Triangular_2D,
1507:                                GridProjectElementVec_Triangular_2D,
1508:                                GVecGetLocalGVec_Triangular_2D,
1509:                                GVecRestoreLocalGVec_Triangular_2D,
1510:                                0,/* GVecGetWorkGVec */
1511:                                0,/* GVecRestoreWorkGVec */
1512:                                GVecGlobalToLocal_Triangular_2D,
1513:                                GVecLocalToGlobal_Triangular_2D,
1514:                                GVecView_Triangular_2D,
1515:                                GridCreateRestriction_Triangular_2D,
1516:                                GVecEvaluateFunction_Triangular_2D,
1517:                                GVecEvaluateFunctionBoundary_Triangular_2D,
1518:                                GVecEvaluateFunctionCollective_Triangular_2D,
1519:                                GVecEvaluateFunctionGalerkin_Triangular_2D,
1520:                                GVecEvaluateFunctionGalerkinCollective_Triangular_2D,
1521:                                GVecEvaluateBoundaryFunctionGalerkin_Triangular_2D,
1522:                                GVecEvaluateBoundaryFunctionGalerkinCollective_Triangular_2D,
1523:                                GVecEvaluateOperatorGalerkin_Triangular_2D,
1524:                                GVecEvaluateNonlinearOperatorGalerkin_Triangular_2D,
1525:                                GVecEvaluateSystemMatrix_Triangular_2D,
1526:                                GVecEvaluateSystemMatrixDiagonal_Triangular_2D,
1527:                                GMatView_Triangular_2D,
1528:                                GMatEvaluateOperatorGalerkin_Triangular_2D,
1529:                                GMatEvaluateALEOperatorGalerkin_Triangular_2D,
1530:                                GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D,
1531:                                GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D,
1532:                                GridEvaluateRhs_Triangular_2D,
1533:                                GridEvaluateSystemMatrix_Triangular_2D};

1535: EXTERN_C_BEGIN
1536: #undef  __FUNCT__
1538: int GridCreate_Triangular_2D(Grid grid) {

1542:   PetscMemcpy(grid->ops, &GOps, sizeof(struct _GridOps));
1543:   /* General grid description */
1544:   grid->dim  = 2;
1545:   grid->data = PETSC_NULL;
1546:   return(0);
1547: }
1548: EXTERN_C_END