Actual source code: gmat.c
1: #ifdef PETSC_RCS_HEADER
2: static char vcid[] = "$Id: gmat.c,v 1.34 2000/07/16 23:20:03 knepley Exp $";
3: #endif
5: /* This file provides routines for grid matrices */
7: #include src/gvec/gvecimpl.h
8: #include petscts.h
9: #include gsolver.h
11: /* Logging support */
12: int GMAT_CreateRectangular, GMAT_EvaluateOperatorGalerkin, GMAT_EvaluateSystemMatrix, GMAT_SetBoundary;
13: int GMAT_MatMultConstrained, GMAT_MatMultTransposeConstrained;
15: extern int MatMult_MPIAIJ(Mat, Vec, Vec); /* For GMatMatMultConstrained */
17: #undef __FUNCT__
19: /*@C
20: GMatDestroy - Destroys a grid matrix.
21:
22: Input Parameter:
23: . mat - the matrix
25: Level: beginner
27: .keywords: matrix, destroy
28: @*/
29: int GMatDestroy(Mat mat)
30: {
35: if (--mat->refct > 0) return(0);
36: MatDestroy(mat);
37: return(0);
38: }
40: #undef __FUNCT__
42: /*@
43: GMatView - Views a grid matrix.
45: Input Parameters:
46: + mat - the grid matrix
47: - viewer - an optional visualization context
49: Notes:
50: GMatView() supports the same viewers as MatView(). The only difference
51: is that for all multiprocessor cases, the output vector employs the natural
52: ordering of the grid, so it many cases this corresponds to the ordering
53: that would have been used for the uniprocessor case.
55: The available visualization contexts include
56: $ VIEWER_STDOUT_SELF - standard output (default)
57: $ VIEWER_STDOUT_WORLD - synchronized standard
58: $ output where only the first processor opens
59: $ the file. All other processors send their
60: $ data to the first processor to print.
62: The user can open alternative visualization contexts with
63: $ PetscViewerFileOpenASCII() - output vector to a specified file
64: $ PetscViewerFileOpenBinary() - output in binary to a
65: $ specified file; corresponding input uses VecLoad()
66: $ PetscViewerDrawOpenX() - output vector to an X window display
67: $ DrawLGCreate() - output vector as a line graph to an X window display
68: $ PetscViewerMatlabOpen() - output vector to Matlab viewer
70: Level: beginner
72: .keywords: view, visualize, output, print, write, draw
73: .seealso: MatView()
74: @*/
75: int GMatView(GMat mat, PetscViewer viewer)
76: {
77: Grid grid;
78: int ierr;
83: if (!viewer) {
84: viewer = PETSC_VIEWER_STDOUT_SELF;
85: } else {
87: }
88: GMatGetGrid(mat, &grid);
89: (*grid->ops->gmatview)(mat, viewer);
90: return(0);
91: }
93: #undef __FUNCT__
95: /*@
96: GMatSerialize - This function stores or recreates a grid matrix using a viewer for
97: a binary file.
99: Input Parameters:
100: . viewer - The viewer context
101: . store - This flag is PETSC_TRUE is data is being written, otherwise it will be read
103: Output Parameter:
104: . m - The grid matrix
106: Level: beginner
108: .keywords: grid vector, serialize
109: .seealso: GridSerialize()
110: @*/
111: int GMatSerialize(Grid grid, GMat *m, PetscViewer viewer, PetscTruth store)
112: {
113: int fd;
114: GMat mat;
115: MatInfo info;
116: PetscScalar *vals, *vals2;
117: int *cols, *cols2;
118: int *diag;
119: int *offdiag;
120: int *firstCol;
121: int type, rowVars, rowLocVars, colVars, colLocVars, numNonZeros;
122: int rowStart, rowEnd, colStart, colEnd, offset, size;
123: int numProcs, rank;
124: int proc, row, col;
125: PetscTruth match;
126: int ierr;
133: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_BINARY, &match);
134: if (match == PETSC_FALSE) SETERRQ(PETSC_ERR_ARG_WRONG, "Must be binary viewer");
135: PetscViewerBinaryGetDescriptor(viewer, &fd);
136: if (store) {
138: MatGetSize(*m, &rowVars, &colVars);
139: MatGetLocalSize(*m, &rowLocVars, &colLocVars);
140: MatGetInfo(*m, MAT_LOCAL, &info);
141: MatGetOwnershipRange(*m, &rowStart, &rowEnd);
142: PetscBinaryWrite(fd, &(*m)->cookie, 1, PETSC_INT, 0);
143: PetscBinaryWrite(fd, &rowVars, 1, PETSC_INT, 0);
144: PetscBinaryWrite(fd, &rowLocVars, 1, PETSC_INT, 0);
145: PetscBinaryWrite(fd, &colVars, 1, PETSC_INT, 0);
146: PetscBinaryWrite(fd, &colLocVars, 1, PETSC_INT, 0);
147: PetscBinaryWrite(fd, &info.nz_used, 1, PETSC_INT, 0);
148: numNonZeros = (int) info.nz_used;
149: PetscMalloc((rowLocVars*2 + numNonZeros) * sizeof(int) + numNonZeros * sizeof(PetscScalar), &diag);
150: offdiag = diag + rowLocVars;
151: cols = offdiag + rowLocVars;
152: vals = (PetscScalar *) (cols + numNonZeros);
153: PetscMemzero(diag, rowLocVars*2 * sizeof(int));
154: MPI_Comm_size(grid->comm, &numProcs);
155: MPI_Comm_rank(grid->comm, &rank);
156: PetscMalloc((numProcs+1) * sizeof(int), &firstCol);
157: MPI_Allgather(&colLocVars, 1, MPI_INT, &firstCol[1], 1, MPI_INT, grid->comm);
158: for(proc = 1, firstCol[0] = 0; proc <= numProcs; proc++)
159: firstCol[proc] += firstCol[proc-1];
160: if (firstCol[numProcs] != colVars) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid column partition");
161: colStart = firstCol[rank];
162: colEnd = firstCol[rank+1];
163: for(row = rowStart, offset = 0; row < rowEnd; row++, offset += size) {
164: MatGetRow(*m, row, &size, &cols2, &vals2);
165: for(col = 0; col < size; col++)
166: if ((col >= colStart) && (col < colEnd)) {
167: diag[row]++;
168: } else {
169: offdiag[row]++;
170: }
171: PetscMemcpy(&cols[offset], cols2, size * sizeof(int));
172: PetscMemcpy(&vals[offset], vals2, size * sizeof(PetscScalar));
173: MatRestoreRow(*m, row, &size, &cols2, &vals2);
174: }
175: PetscBinaryWrite(fd, diag, rowLocVars, PETSC_INT, 0);
176: PetscBinaryWrite(fd, offdiag, rowLocVars, PETSC_INT, 0);
177: PetscBinaryWrite(fd, cols, numNonZeros, PETSC_INT, 0);
178: PetscBinaryWrite(fd, vals, numNonZeros, PETSC_SCALAR, 0);
179: PetscFree(diag);
180: PetscFree(offdiag);
181: PetscFree(cols);
182: PetscFree(vals);
183: } else {
184: PetscBinaryRead(fd, &type, 1, PETSC_INT);
185: if (type != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_ARG_WRONG, "Non-matrix object");
186: PetscBinaryRead(fd, &rowVars, 1, PETSC_INT);
187: PetscBinaryRead(fd, &rowLocVars, 1, PETSC_INT);
188: PetscBinaryRead(fd, &colVars, 1, PETSC_INT);
189: PetscBinaryRead(fd, &colLocVars, 1, PETSC_INT);
190: PetscBinaryRead(fd, &numNonZeros, 1, PETSC_INT);
191: MPI_Reduce(&rowLocVars, &size, 1, MPI_INT, MPI_SUM, 0, grid->comm);
192: if (size != rowVars) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid row partition");
193: MPI_Reduce(&colLocVars, &size, 1, MPI_INT, MPI_SUM, 0, grid->comm);
194: if (size != colVars) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid column partition");
195: PetscMalloc((rowLocVars*2 + numNonZeros) * sizeof(int) + numNonZeros * sizeof(PetscScalar), &diag);
196: offdiag = diag + rowLocVars;
197: cols = offdiag + rowLocVars;
198: vals = (PetscScalar *) (cols + numNonZeros);
199: PetscBinaryRead(fd, diag, rowLocVars, PETSC_INT);
200: PetscBinaryRead(fd, offdiag, rowLocVars, PETSC_INT);
201: MatCreateMPIAIJ(grid->comm, rowLocVars, colLocVars, rowVars, colVars, 0, diag, 0, offdiag, &mat);
202: PetscObjectCompose((PetscObject) mat, "Grid", (PetscObject) grid);
203: MatGetOwnershipRange(mat, &rowStart, &rowEnd);
204: if (rowEnd - rowStart + 1 != rowLocVars) SETERRQ(PETSC_ERR_ARG_CORRUPT, "Invalid row partition");
205: PetscBinaryRead(fd, cols, numNonZeros, PETSC_INT);
206: PetscBinaryRead(fd, vals, numNonZeros, PETSC_SCALAR);
207: for(row = rowStart, offset = 0; row < rowEnd; row++, offset += size)
208: {
209: size = diag[row] + offdiag[row];
210: MatSetValues(mat, 1, &row, size, &cols[offset], &vals[offset], INSERT_VALUES);
211: }
212: PetscFree(diag);
213: PetscFree(offdiag);
214: PetscFree(cols);
215: PetscFree(vals);
217: MatAssemblyBegin(mat, MAT_FINAL_ASSEMBLY);
218: MatAssemblyEnd(mat, MAT_FINAL_ASSEMBLY);
219: *m = mat;
220: }
221: return(0);
222: }
224: #undef __FUNCT__
226: /*@C
227: GMatDuplicate - Duplicates a grid vector.
229: Input Parameters:
230: . mat - The matrix
232: Level: beginner
234: .keywords: grid matrix, destroy
235: .seealso: MatDuplicate(), GMatDestroy()
236: @*/
237: int GMatDuplicate(GMat mat, GMat *newmat)
238: {
239: int ierr;
244: MatConvert(mat, MATSAME, newmat);
245: PetscFunctionReturn(ierr);
246: }
248: #undef __FUNCT__
250: /*@
251: GMatGetSize - Returns the numbers of rows and columns in a matrix.
253: Not Collective
255: Input Parameter:
256: . mat - the matrix
258: Output Parameters:
259: + M - the number of global rows
260: - N - the number of global columns
262: Level: intermediate
264: .keywords: matrix, dimension, size, rows, columns, global, get
265: .seealso: GMatGetLocalSize()
266: @*/
267: int GMatGetSize(GMat mat, int *M, int* N)
268: {
269: Grid grid;
270: PetscTruth isConstrained = PETSC_FALSE;
271: PetscTruth explicitConstraints = PETSC_TRUE;
272: int gM, gN;
273: int ierr;
277: GMatGetGrid(mat, &grid);
278: GridIsConstrained(grid, &isConstrained);
279: GridGetExplicitConstraints(grid, &explicitConstraints);
280: if ((isConstrained == PETSC_FALSE) || (explicitConstraints == PETSC_TRUE)) {
281: if (M) *M = mat->M;
282: if (N) *N = mat->N;
283: } else {
284: GridGetConstraints(grid, PETSC_NULL, PETSC_NULL, &gM, PETSC_NULL);
285: gN = gM;
286: /* KLUDGE - Must catch matrices which arise from nonstandard orderings
287: if (mat->N == x->N) gN = x->N;
288: if (mat->M == y->N) gM = y->N; */
289: if (M) *M = gM;
290: if (N) *N = gN;
291: }
292: return(0);
293: }
295: #undef __FUNCT__
297: /*@
298: GMatGetLocalSize - Returns the number of rows and columns in a matrix
299: stored locally. This information may be implementation dependent, so
300: use with care.
302: Not Collective
304: Input Parameters:
305: . mat - the matrix
307: Output Parameters:
308: + m - the number of local rows
309: - n - the number of local columns
311: Level: intermediate
313: .keywords: matrix, dimension, size, local, rows, columns, get
314: .seealso: GMatGetSize()
315: @*/
316: int GMatGetLocalSize(GMat mat, int *m, int* n)
317: {
318: Grid grid;
319: PetscTruth isConstrained = PETSC_FALSE;
320: PetscTruth explicitConstraints = PETSC_TRUE;
321: int gm, gn;
322: int ierr;
326: GMatGetGrid(mat, &grid);
327: GridIsConstrained(grid, &isConstrained);
328: GridGetExplicitConstraints(grid, &explicitConstraints);
329: if ((isConstrained == PETSC_FALSE) || (explicitConstraints == PETSC_TRUE)) {
330: if (m) *m = mat->m;
331: if (n) *n = mat->n;
332: } else {
333: GridGetConstraints(grid, PETSC_NULL, PETSC_NULL, PETSC_NULL, &gm);
334: gn = gm;
335: /* KLUDGE - Must catch matrices which arise from nonstandard orderings
336: if (mat->n == x->n) gn = x->n;
337: if (mat->m == y->n) gm = y->n; */
338: if (m) *m = gm;
339: if (n) *n = gn;
340: }
341: return(0);
342: }
344: #undef __FUNCT__
346: /*@
347: GMatGetGrid - This function returns the grid from a grid matrix.
349: Not collective
351: Input Parameter:
352: . m - The grid matrix
354: Output Parameter:
355: . grid - The grid
357: Level: intermediate
359: .keywords: grid matrix, grid, get
360: .seealso: GridGetMesh(), GVecGetGrid()
361: @*/
362: int GMatGetGrid(GMat m, Grid *grid)
363: {
370: PetscObjectQuery((PetscObject) m, "Grid", (PetscObject *) grid);
372: return(0);
373: }
375: #undef __FUNCT__
377: /*@
378: GMatGetOrder - This function returns the orderings from a grid matrix.
380: Not collective
382: Input Parameter:
383: . m - The grid matrix
385: Output Parameters:
386: + rowOrder - The row (or test function) ordering
387: - colOrder - The column (or shape function) ordering
389: Level: intermediate
391: .keywords: grid matrix, variable ordering, get
392: .seealso: GMatGetGrid(), GVecGetOrder()
393: @*/
394: int GMatGetOrder(GMat m, VarOrdering *rowOrder, VarOrdering *colOrder)
395: {
401: if (rowOrder != PETSC_NULL) {
403: PetscObjectQuery((PetscObject) m, "RowOrder", (PetscObject *) rowOrder);
405: }
406: if (colOrder != PETSC_NULL) {
408: PetscObjectQuery((PetscObject) m, "ColOrder", (PetscObject *) colOrder);
410: }
411: return(0);
412: }
414: #undef __FUNCT__
416: /*@
417: GMatGetDiagonalConstrained - This function returns the diagonal of a constrained matrix.
419: Input Paramter:
420: . mat - The grid matrix
422: Output Paramter:
423: . diag - A constrained grid vector containing the diagonal elements
425: Level: advanced
427: .keyowrds grid matrix, constraint
428: .seealso MatGetDiagonal()
429: @*/
430: int GMatGetDiagonalConstrained(GMat mat, GVec diag)
431: {
432: Grid grid;
433: PetscTruth isConstrained, explicitConstraints;
434: Vec diagLong;
435: PetscScalar *array;
436: PetscScalar *arrayC;
437: int *ordering;
438: int numInteriorVars;
439: int var, size;
440: int ierr;
445: GMatGetGrid(mat, &grid);
446: GridIsConstrained(grid, &isConstrained);
447: GridGetExplicitConstraints(grid, &explicitConstraints);
448: if ((isConstrained == PETSC_FALSE) || (explicitConstraints == PETSC_TRUE)) {
449: MatGetDiagonal(mat, diag);
450: return(0);
451: }
453: VecGetLocalSize(diag, &size);
454: if (size != grid->constraintOrder->numLocVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Vector wrong size for matrix");
455: ISGetSize(grid->constraintOrdering, &size);
456: if (size != grid->order->numLocVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Constraint mapping wrong size for matrix");
458: /* First copy all the interior variables */
459: GVecCreate(grid, &diagLong);
460: MatGetDiagonal(mat, diagLong);
461: VecGetArray(diagLong, &array);
462: VecGetArray(diag, &arrayC);
463: ISGetIndices(grid->constraintOrdering, &ordering);
464: numInteriorVars = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
465: for(var = 0; var < grid->order->numLocVars; var++) {
466: if (ordering[var] < numInteriorVars) {
467: if (PetscAbsScalar(array[var]) > PETSC_MACHINE_EPSILON)
468: arrayC[ordering[var]] = array[var];
469: else
470: arrayC[ordering[var]] = 1.0;
471: }
472: }
473: ISRestoreIndices(grid->constraintOrdering, &ordering);
474: VecRestoreArray(diagLong, &array);
475: VecRestoreArray(diag, &arrayC);
476: GVecDestroy(diagLong);
478: /* Now ask grid for diagonal of extra variables */
479: if ((isConstrained == PETSC_TRUE) && (grid->constraintCtx->ops->jacgetdiag)) {
480: (*grid->constraintCtx->ops->jacgetdiag)(mat, diag);
481: }
483: return(0);
484: }
486: #undef __FUNCT__
488: /*@
489: GMatGetDiagonalMF - This function returns the diagonal of the matrix.
491: Input Parameter:
492: . mat - The grid matrix
494: Output Parameter:
495: . diag - A grid vector containing the diagonal elements
497: Notes:
498: See comments in GMatGetDiagonalConstrained() about dealing with implicit constraints
500: Level: intermediate
502: .keywords grid matrix, constraint
503: .seealso GMatGetDiagonalConstrained
504: @*/
505: int GMatGetDiagonalMF(GMat mat, GVec diag)
506: {
507: Grid grid;
508: int ierr;
513: GMatGetGrid(mat, &grid);
514: GVecEvaluateJacobianDiagonal(diag, grid->matrixFreeArg, grid->matrixFreeContext);
515: return(0);
516: }
518: #undef __FUNCT__
520: /*@
521: GMatDiagonalScaleConstrained - Scales a constrained matrix on the left and
522: right by diagonal matrices that are stored as vectors. Either of the two scaling
523: matrices can be PETSC_NULL.
525: Input Parameters:
526: . mat - The grid matrix to be scaled
527: . l - The left scaling vector (or PETSC_NULL)
528: . r - The right scaling vector (or PETSC_NULL)
530: Notes:
531: GMatDiagonalScaleConstrained() computes A <- LAR, where
532: $ L = a diagonal matrix
533: $ R = a diagonal matrix
535: Level: advanced
537: .keywords: matrix, diagonal, scale
538: .seealso: MatDiagonalScale()
539: @*/
540: int GMatDiagonalScaleConstrained(GMat mat, GVec l, GVec r)
541: {
542: Grid grid;
543: GVec newL, newR;
544: PetscScalar *arrayL, *arrayR;
545: PetscScalar *arrayNewL, *arrayNewR;
546: PetscScalar one = 1.0;
547: int *ordering;
548: int numInteriorVars;
549: int var, size;
550: int ierr;
554: GMatGetGrid(mat, &grid);
556: if (grid->isConstrained == PETSC_FALSE) {
557: MatDiagonalScale(mat, l, r);
558: return(0);
559: }
560: ISGetSize(grid->constraintOrdering, &size);
561: if (size != grid->order->numLocVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Constraint mapping wrong size for matrix");
563: newL = PETSC_NULL;
564: newR = PETSC_NULL;
565: arrayL = PETSC_NULL;
566: arrayR = PETSC_NULL;
567: numInteriorVars = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
568: ISGetIndices(grid->constraintOrdering, &ordering);
569: if (l != PETSC_NULL)
570: {
572: VecGetLocalSize(l, &size);
573: if (size != grid->constraintOrder->numLocVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Left vector wrong size for matrix");
574: GVecCreate(grid, &newL);
575: VecSet(&one, newL);
576: VecGetArray(l, &arrayL);
577: VecGetArray(newL, &arrayNewL);
578: for(var = 0; var < grid->order->numLocVars; var++) {
579: if (ordering[var] < numInteriorVars)
580: arrayNewL[var] = arrayL[ordering[var]];
581: }
582: VecRestoreArray(l, &arrayL);
583: VecRestoreArray(newL, &arrayNewL);
584: }
585: if (r != PETSC_NULL) {
587: VecGetLocalSize(r, &size);
588: if (size != grid->constraintOrder->numLocVars) SETERRQ(PETSC_ERR_ARG_INCOMP, "Right vector wrong size for matrix");
589: GVecCreate(grid, &newR);
590: VecSet(&one, newR);
591: VecGetArray(r, &arrayR);
592: VecGetArray(newR, &arrayNewR);
593: for(var = 0; var < grid->order->numLocVars; var++) {
594: if (ordering[var] < numInteriorVars)
595: arrayNewR[var] = arrayR[ordering[var]];
596: }
597: VecRestoreArray(r, &arrayR);
598: VecRestoreArray(newR, &arrayNewR);
599: }
600: ISRestoreIndices(grid->constraintOrdering, &ordering);
602: MatDiagonalScale(mat, newL, newR);
604: if (l != PETSC_NULL) {
605: VecDestroy(newL);
606: }
607: if (r != PETSC_NULL) {
608: VecDestroy(newR);
609: }
610: return(0);
611: }
613: #undef __FUNCT__
615: /*@
616: GMatOrderConstrained - This function creates an ordering which places
617: all interior variables before variables eliminated by constraints.
619: Input Paramter:
620: . mat - The grid matrix
621: . type - The reordering type
623: Output Paramter:
624: . rowIS - The row reordering
625: . colIS - The column reordering
627: Level: advanced
629: .keyowrds grid matrix, constraint
630: .seealso GMatGetDiagonalConstrained()
631: @*/
632: int GMatOrderConstrained(GMat mat, MatOrderingType type, IS *rowIS, IS *colIS)
633: {
634: Grid grid;
635: PetscTruth opt, match;
636: int ierr;
642: PetscStrcmp(type, MATORDERING_CONSTRAINED, &match);
643: if (match == PETSC_FALSE) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid ordering type %s", type);
644: GMatGetGrid(mat, &grid);
645: if (grid->constraintOrdering) {
646: ISDuplicate(grid->constraintOrdering, rowIS);
647: ISDuplicate(grid->constraintOrdering, colIS);
648: PetscOptionsHasName(PETSC_NULL, "-gmat_order_nonzero_diagonal", &opt);
649: if (opt == PETSC_TRUE) {
650: /* Remove the zeros from the diagonal in both blocks */
651: GMatReorderForNonzeroDiagonalConstrained(mat, 1e-10, *rowIS, *colIS);
652: }
653: } else {
654: *rowIS = PETSC_NULL;
655: *colIS = PETSC_NULL;
656: }
657: return(0);
658: }
660: #undef __FUNCT__
662: /*
663: GMatFindPreviousPivot_Private - Finds a pivot element in the lower
664: triangle which does not introduce a zero on the above diagonal.
666: Input Parameters:
667: . mat - The grid matrix
668: . prow - The row with a zero pivot
669: . rowPerm - The row permutation
670: . colPerm - The column permutation
671: . atol - The smallest acceptable pivot
673: Output Paramters:
674: . replCol - The column to interchange with
675: . replVal - The magnitude of the new pivot
677: Level: developer
678: */
679: int GMatFindPreviousPivot_Private(GMat mat, int prow, int *rowPerm, int *colPerm, double atol, int *replCol, double *replVal)
680: {
681: int nz;
682: int *cols;
683: PetscScalar *vals;
684: int newNz;
685: int *newCols;
686: PetscScalar *newVals;
687: int newRow;
688: int repl;
689: int colIndex, newColIndex;
690: int ierr;
693: newRow = rowPerm[prow];
694: MatGetRow(mat, prow, &nz, &cols, &vals);
695: for(colIndex = 0; colIndex < nz; colIndex++)
696: {
697: /* Find an acceptable pivot in the lower triangle */
698: if ((colPerm[cols[colIndex]] < newRow) && (PetscAbsScalar(vals[colIndex]) > atol))
699: {
700: /* Check previous diagonal for zero pivot */
701: repl = cols[colIndex];
702: MatGetRow(mat, repl, &newNz, &newCols, &newVals);
703: for(newColIndex = 0; newColIndex < newNz; newColIndex++)
704: {
705: if ((colPerm[newCols[newColIndex]] == newRow) && (PetscAbsScalar(newVals[newColIndex]) > atol))
706: {
707: *replCol = repl;
708: *replVal = PetscAbsScalar(vals[colIndex]);
709: MatRestoreRow(mat, repl, &newNz, &newCols, &newVals);
710: MatRestoreRow(mat, prow, &nz, &cols, &vals);
711: return(0);
712: }
713: }
714: MatRestoreRow(mat, repl, &newNz, &newCols, &newVals);
715: }
716: }
717: MatRestoreRow(mat, prow, &nz, &cols, &vals);
718: /* Signal error since no acceptable pivot was found */
719: PetscFunctionReturn(1);
720: }
722: #undef __FUNCT__
724: /*
725: GMatReorderForNonzeroDiagonal_Private - This is identical to
726: MatReorderForNonzeroDiagonal(), but allows reordering of a
727: block of the matrix.
729: Input Parameters:
730: . mat - The grid matrix to reorder
731: . rowStart - The first row in the block
732: . rowEnd - The last row in the block
733: . rowIS - The row permutation, usually obtained from GMatOrderConstrained().
734: . colIS - The column permutation
736: Level: developer
738: */
739: int GMatReorderForNonzeroDiagonal_Private(GMat mat, int rowStart, int rowEnd, double atol, IS rowIS, IS colIS)
740: {
741: int *rowPerm;
742: int *colPerm;
743: int m, n, nz;
744: int *cols;
745: PetscScalar *vals;
746: int replCol; /* Replacement column in the original matrix */
747: double replVal; /* Replacement pivot magnitude */
748: int prow; /* Pivot row in the original matrix */
749: int newRow; /* Pivot row in the permuted matrix */
750: int temp;
751: int colIndex;
752: int ierr;
755: ISGetIndices(rowIS, &rowPerm);
756: ISGetIndices(colIS, &colPerm);
757: MatGetSize(mat, &m, &n);
759: for(prow = 0; prow < m; prow++)
760: {
761: newRow = rowPerm[prow]; /* Row in the permuted matrix */
763: /* Act only on a block in the reordered matrix */
764: if ((newRow < rowStart) || (newRow >= rowEnd))
765: continue;
766: MatGetRow(mat, prow, &nz, &cols, &vals);
768: /* Find diagonal element */
769: for(colIndex = 0; colIndex < nz; colIndex++)
770: if (colPerm[cols[colIndex]] == newRow)
771: break;
773: /* Check for zero pivot */
774: if ((colIndex >= nz) || (PetscAbsScalar(vals[colIndex]) <= atol))
775: {
776: /* Find the best candidate in the upper triangular part */
777: replCol = prow;
778: if (colIndex >= nz)
779: replVal = 0.0;
780: else
781: replVal = PetscAbsScalar(vals[colIndex]);
782: for(colIndex = 0; colIndex < nz; colIndex++)
783: {
784: /* Stay within block and upper triangle */
785: if ((colPerm[cols[colIndex]] <= newRow) || (colPerm[cols[colIndex]] >= rowEnd))
786: continue;
788: /* Check for acceptable pivot */
789: if (PetscAbsScalar(vals[colIndex]) > atol)
790: {
791: replCol = cols[colIndex];
792: replVal = PetscAbsScalar(vals[colIndex]);
793: break;
794: }
795: }
797: /* No candidate was found */
798: if (prow == replCol)
799: {
800: /* Now we need to look for an element that allows us
801: to pivot with a previous column. To do this, we need
802: to be sure that we don't introduce a zero in a previous
803: diagonal */
804: if (GMatFindPreviousPivot_Private(mat, prow, rowPerm, colPerm, atol, &replCol, &replVal)) {
805: SETERRQ(PETSC_ERR_ARG_WRONG, "Can not reorder matrix for nonzero diagonal");
806: }
807: }
809: /* Interchange columns */
810: temp = colPerm[prow];
811: colPerm[prow] = colPerm[replCol];
812: colPerm[replCol] = temp;
813: }
814: MatRestoreRow(mat, prow, &nz, &cols, &vals);
815: }
817: ISRestoreIndices(rowIS, &rowPerm);
818: ISRestoreIndices(colIS, &colPerm);
819: return(0);
820: }
822: #undef __FUNCT__
824: /*@
825: GMatReorderForNonzeroDiagonalConstrained - Changes matrix ordering
826: to remove zeros from diagonal. This may help in the LU factorization
827: to prevent a zero pivot.
829: Input Parameters:
830: . mat - The grid matrix to reorder
831: . atol - The smallest acceptable pivot
832: . rowIS - The row permutation, usually obtained from GMatOrderConstrained().
833: . colIS - The column permutation
835: Notes:
836: This is not intended as a replacement for pivoting for matrices that
837: have ``bad'' structure. It is only a stop-gap measure. Should be called
838: after a call to MatGetReordering(), this routine changes the column
839: ordering defined in cis.
841: Options Database Keys: (When using SLES)
842: . -gmat_order_nonzero_diagonal
844: Algorithm:
845: Column pivoting is used. Choice of column is made by looking at the
846: non-zero elements in the row. This algorithm is simple and fast but
847: does NOT guarantee that a non-singular or well conditioned
848: principle submatrix will be produced.
850: Level: developer
851: @*/
852: int GMatReorderForNonzeroDiagonalConstrained(GMat mat, double atol, IS rowIS, IS colIS)
853: {
854: Grid grid;
855: int m;
856: int mInt;
857: int ierr;
864: GMatGetGrid(mat, &grid);
865: if (grid->isConstrained == PETSC_FALSE)
866: {
867: MatReorderForNonzeroDiagonal(mat, atol, rowIS, colIS);
868: return(0);
869: }
871: /* Get size of matrix blocks */
872: mInt = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
873: m = grid->order->numVars;
874: /* Reorder interior block */
875: GMatReorderForNonzeroDiagonal_Private(mat, 0, mInt, atol, rowIS, colIS);
876: /* Reorder constrained block */
877: GMatReorderForNonzeroDiagonal_Private(mat, mInt, m, atol, rowIS, colIS);
878: return(0);
879: }
881: #undef __FUNCT__
883: /*@
884: GMatReorder - Reorders the matrix based on the ordering type provided.
886: Collective on GMat
888: Input Parameters:
889: . mat - The grid matrix to reorder
890: . rowIS - The row permutation, usually obtained from MatGetOrdering()
891: . colIS - The column permutation
892: . sparse - The flag for sparsification
893: . bw - [Optional] The sparsified bandwidth, PETSC_DECIDE gives the default
894: . frac - [Optional] The sparsified fractional bandwidth, 0.0 gives the default
895: . tol - [Optional] The sparsification drop tolerance, 0.0 is the default
897: Output Parameter:
898: . newmat - The reordered, and possibly sparsified, matrix
900: Options Database Keys:
901: . -gmat_order_nonzero_diagonal
903: Level: advanced
905: .keywords: grid matrix, ordering
906: .seealso: MatGetOrdering(), MatPermute(), MatPermuteSparsify()
907: @*/
908: int GMatReorder(GMat mat, IS rowIS, IS colIS, PetscTruth sparse, int bw, double frac, double tol, GMat *newmat)
909: {
910: Grid grid;
911: int ierr;
919: if (sparse == PETSC_TRUE) {
920: MatPermuteSparsify(mat, bw, frac, tol, rowIS, colIS, newmat);
921: } else {
922: MatPermute(mat, rowIS, colIS, newmat);
923: }
924: GMatGetGrid(mat, &grid);
925: PetscObjectCompose((PetscObject) *newmat, "Grid", (PetscObject) grid);
926: return(0);
927: }
929: #undef __FUNCT__
931: /*@
932: GMatEvaluateALEOperatorGalerkin - Evaluates the weak form of an operator over
933: a field on the locations defined by the underlying grid and its discretization.
935: Collective on GMat
937: Input Parameters:
938: + M - The grid matrix
939: . numFields - The number of fields in sFields and tFields
940: . sFields - The shape function fields
941: . sOrder - The global variable ordering for the shape functions
942: . sLocOrder - The local variable ordering for the shape functions
943: . tFields - The test function fields
944: . tOrder - The global variable ordering for the test functions
945: . tLocOrder - The local variable ordering for the test functions
946: . op - The operator
947: . alpha - A scalar multiple for the operator
948: . type - The matrix assembly type
949: - ctx - An optional user provided context for the function
951: Level: intermediate
953: .keywords: grid matrix, evaluate, ALE, operator
954: .seealso: GMatEvaluateSystemMatrix()
955: @*/
956: int GMatEvaluateALEOperatorGalerkin(GMat M, int numFields, int *sFields, VarOrdering sOrder, LocalVarOrdering sLocOrder,
957: int *tFields, VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
958: MatAssemblyType type, void *ctx)
959: {
960: Grid grid;
961: int field;
962: int ierr;
973: GMatGetGrid(M, &grid);
975: for(field = 0; field < numFields; field++)
976: {
977: /* Check for valid fields */
978: if ((sFields[field] < 0) || (sFields[field] > grid->numFields)) {
979: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
980: }
981: if ((tFields[field] < 0) || (tFields[field] > grid->numFields)) {
982: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
983: }
984: /* Check for valid operator */
985: if ((op < 0) || (op >= grid->fields[sFields[field]].disc->numOps) || (!grid->fields[sFields[field]].disc->operators[op])) {
986: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
987: }
988: if ((grid->fields[sFields[field]].disc->operators[op]->precompInt == PETSC_NULL) &&
989: (grid->fields[sFields[field]].disc->operators[op]->opFunc == PETSC_NULL) &&
990: (grid->fields[sFields[field]].disc->operators[op]->ALEOpFunc == PETSC_NULL)) {
991: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
992: }
993: if (grid->fields[sFields[field]].disc->numQuadPoints != grid->fields[tFields[field]].disc->numQuadPoints) {
994: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
995: }
996: }
997: /* Check for compatible orderings */
998: if ((tOrder->numVars != M->M) || (tOrder->numLocVars != M->m)) {
999: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible test function ordering");
1000: }
1001: if ((sOrder->numVars != M->N) || (sOrder->numLocVars != M->n)) {
1002: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible shape function ordering");
1003: }
1005: PetscLogEventBegin(GMAT_EvaluateOperatorGalerkin, M, grid, sOrder, tOrder);
1006: (*grid->ops->gmatevaluatealeoperatorgalerkin)(grid, M, numFields, sFields, sOrder, sLocOrder,
1007: tFields, tOrder, tLocOrder, op, alpha, type, ctx);
1008:
1009: PetscLogEventEnd(GMAT_EvaluateOperatorGalerkin, M, grid, sOrder, tOrder);
1010: return(0);
1011: }
1013: #undef __FUNCT__
1015: /*@
1016: GMatEvaluateOperatorGalerkin - Evaluates the weak form of an operator over
1017: a field on the locations defined by the underlying grid and its discretization.
1019: Collective on GMat
1021: Input Parameter:
1022: + M - The grid matrix
1023: . x - The argument vector
1024: . numFields - The number of fields in sFields and tFields
1025: . sFields - The shape function fields
1026: . tFields - The test function fields
1027: . op - The operator
1028: . alpha - The scalar multiple for the operator
1029: . type - The matrix assembly type
1030: - ctx - [Optional] A user provided context for the function
1032: Level: intermediate
1034: .keywords: grid matrix, evaluate, operator, galerkin
1035: .seealso: GMatEvaluateOperatorGalerkin(), GMatEvaluateSystemMatrix()
1036: @*/
1037: int GMatEvaluateOperatorGalerkin(GMat M, GVec x, int numFields, int *sFields, int *tFields, int op, PetscScalar alpha,
1038: MatAssemblyType type, void *ctx)
1039: {
1040: Grid grid;
1041: VarOrdering sOldOrder, tOldOrder;
1042: VarOrdering sOrder, tOrder;
1043: LocalVarOrdering sLocOrder, tLocOrder;
1044: VarOrdering xOrder;
1045: PetscTruth compat;
1046: int numTotalFields;
1047: int f;
1048: int ierr;
1055: GMatGetGrid(M, &grid);
1056: GMatGetOrder(M, &tOldOrder, &sOldOrder);
1057: if (x != PETSC_NULL) {
1059: GVecGetOrder(x, &xOrder);
1060: VarOrderingCompatible(xOrder, tOldOrder, &compat);
1061: if (compat == PETSC_FALSE) SETERRQ(PETSC_ERR_ARG_INCOMP, "Matrix and vector must have compatible maps");
1062: }
1063: GridGetNumFields(grid, &numTotalFields);
1064: for(f = 0; f < numFields; f++) {
1065: /* Check for valid fields */
1066: if ((sFields[f] < 0) || (sFields[f] >= numTotalFields)) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", sFields[f]);
1067: if ((tFields[f] < 0) || (tFields[f] >= numTotalFields)) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", tFields[f]);
1068: /* Check for valid operator */
1069: if ((op < 0) || (op >= grid->fields[sFields[f]].disc->numOps) || (!grid->fields[sFields[f]].disc->operators[op])) {
1070: SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid operator %d", op);
1071: }
1072: if ((grid->fields[sFields[f]].disc->operators[op]->precompInt == PETSC_NULL) &&
1073: (grid->fields[sFields[f]].disc->operators[op]->opFunc == PETSC_NULL) &&
1074: (grid->fields[sFields[f]].disc->operators[op]->ALEOpFunc == PETSC_NULL)) {
1075: SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid operator %d", op);
1076: }
1077: if (grid->fields[sFields[f]].disc->numQuadPoints != grid->fields[tFields[f]].disc->numQuadPoints) {
1078: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
1079: }
1080: }
1081: /* Create orderings */
1082: VarOrderingCreateSubset(sOldOrder, numFields, sFields, PETSC_FALSE, &sOrder);
1083: LocalVarOrderingCreate(grid, numFields, sFields, &sLocOrder);
1084: VarOrderingCreateSubset(tOldOrder, numFields, tFields, PETSC_FALSE, &tOrder);
1085: LocalVarOrderingCreate(grid, numFields, tFields, &tLocOrder);
1086: /* Check for compatible orderings */
1087: if ((tOrder->numVars != M->M) || (tOrder->numLocVars != M->m)) {
1088: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible test function ordering");
1089: }
1090: if ((sOrder->numVars != M->N) || (sOrder->numLocVars != M->n)) {
1091: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible shape function ordering");
1092: }
1093: /* Calculate operator */
1094: PetscLogEventBegin(GMAT_EvaluateOperatorGalerkin, M, grid, sOrder, tOrder);
1095: (*grid->ops->gmatevaluateoperatorgalerkin)(grid, M, x, sOrder, sLocOrder, tOrder, tLocOrder, op, alpha, type, ctx);
1096:
1097: PetscLogEventEnd(GMAT_EvaluateOperatorGalerkin, M, grid, sOrder, tOrder);
1098: /* Destroy orderings */
1099: VarOrderingDestroy(sOrder);
1100: LocalVarOrderingDestroy(sLocOrder);
1101: VarOrderingDestroy(tOrder);
1102: LocalVarOrderingDestroy(tLocOrder);
1103: return(0);
1104: }
1106: #undef __FUNCT__
1108: /*@
1109: GMatEvaluateALEConstrainedOperatorGalerkin - Evaluates the weak form of an operator over
1110: a field on the locations defined by the underlying grid and its discretization. The
1111: constrained variable space is used for the shape and test functions.
1113: Collective on GMat
1115: Input Parameters:
1116: + M - The grid matrix
1117: . numFields - The number of fields in sFields and tFields
1118: . sFields - The shape function fields
1119: . sOrder - The global variable ordering for the shape functions
1120: . sLocOrder - The local variable ordering for the shape functions
1121: . tFields - The test function fields
1122: . tOrder - The global variable ordering for the test functions
1123: . tLocOrder - The local variable ordering for the test functions
1124: . op - The operator
1125: . alpha - A scalar multiple for the operator
1126: . type - The matrix assembly type
1127: - ctx - An optional user provided context for the function
1129: Level: intermediate
1131: .keywords: grid matrix, evaluate, ALE, operator, constraint
1132: .seealso: GMatEvaluateALEOperatorGalerkin(), GMatEvaluateSystemMatrix()
1133: @*/
1134: int GMatEvaluateALEConstrainedOperatorGalerkin(GMat M, int numFields, int *sFields, VarOrdering sOrder, LocalVarOrdering sLocOrder,
1135: int *tFields, VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
1136: MatAssemblyType type, void *ctx)
1137: {
1138: Grid grid;
1139: int field;
1140: int ierr;
1151: GMatGetGrid(M, &grid);
1153: if (grid->isConstrained == PETSC_FALSE) {
1154: GMatEvaluateALEOperatorGalerkin(M, numFields, sFields, sOrder, sLocOrder, tFields, tOrder, tLocOrder, op, alpha, type, ctx);
1155:
1156: return(0);
1157: }
1158: for(field = 0; field < numFields; field++) {
1159: /* Check for valid fields */
1160: if ((sFields[field] < 0) || (sFields[field] > grid->numFields)) {
1161: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
1162: }
1163: if ((tFields[field] < 0) || (tFields[field] > grid->numFields)) {
1164: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid field number");
1165: }
1166: /* Check for valid operator */
1167: if ((op < 0) || (op >= grid->fields[sFields[field]].disc->numOps) || (!grid->fields[sFields[field]].disc->operators[op])) {
1168: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
1169: }
1170: if ((grid->fields[sFields[field]].disc->operators[op]->precompInt == PETSC_NULL) &&
1171: (grid->fields[sFields[field]].disc->operators[op]->opFunc == PETSC_NULL) &&
1172: (grid->fields[sFields[field]].disc->operators[op]->ALEOpFunc == PETSC_NULL)) {
1173: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid operator");
1174: }
1175: if (grid->fields[sFields[field]].disc->numQuadPoints != grid->fields[tFields[field]].disc->numQuadPoints) {
1176: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
1177: }
1178: }
1179: /* Check for compatible orderings */
1180: if ((tOrder->numVars != M->M) || (tOrder->numLocVars != M->m)) {
1181: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible test function ordering");
1182: }
1183: if ((sOrder->numVars != M->N) || (sOrder->numLocVars != M->n)) {
1184: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible shape function ordering");
1185: }
1187: (*grid->ops->gmatevaluatealeconstrainedoperatorgalerkin)(grid, M, numFields, sFields, sOrder, sLocOrder,
1188: tFields, tOrder, tLocOrder, op, alpha, type, ctx);
1189:
1190: return(0);
1191: }
1193: #undef __FUNCT__
1195: /*@
1196: GMatEvaluateBoundaryOperatorGalerkin - Evaluates the weak form of an operator over
1197: a field on the locations defined by the boundary of the underlying grid and its
1198: discretization. The test function are defined on the entire grid, but the only nonzeros
1199: come from functions with support on the boundary.
1201: Input Parameter:
1202: . M - The grid matrix
1203: . numFields - The number of fields in sFields and tFields
1204: . sFields - The shape function fields
1205: . sOrder - The global variable ordering for the shape functions
1206: . sLocOrder - The local variable ordering for the shape functions
1207: . tFields - The test function fields
1208: . tOrder - The global variable ordering for the test functions
1209: . tLocOrder - The local variable ordering for the test functions
1210: . op - The operator
1211: . alpha - A scalar multiple for the operator
1212: . type - The matrix assembly type
1213: . ctx - An optional user provided context for the function
1215: Level: intermediate
1217: .seealso: GMatEvaluateOperatorGalerkin, GMatEvaluateSystemMatrix
1218: @*/
1219: int GMatEvaluateBoundaryOperatorGalerkin(GMat M, int numFields, int *sFields, VarOrdering sOrder, LocalVarOrdering sLocOrder,
1220: int *tFields, VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
1221: MatAssemblyType type, void *ctx)
1222: {
1223: Grid grid;
1224: int numTotalFields;
1225: int f;
1226: int ierr;
1237: GMatGetGrid(M, &grid);
1238: GridGetNumFields(grid, &numTotalFields);
1239: for(f = 0; f < numFields; f++) {
1240: /* Check for valid fields */
1241: if ((sFields[f] < 0) || (sFields[f] >= numTotalFields)) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", sFields[f]);
1242: if ((tFields[f] < 0) || (tFields[f] >= numTotalFields)) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid field number %d", tFields[f]);
1243: /* Check for valid operator */
1244: if ((op < 0) || (op >= grid->fields[sFields[f]].disc->numOps) || (!grid->fields[sFields[f]].disc->operators[op])) {
1245: SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid operator %d", op);
1246: }
1247: if ((grid->fields[sFields[f]].disc->operators[op]->precompInt == PETSC_NULL) &&
1248: (grid->fields[sFields[f]].disc->operators[op]->opFunc == PETSC_NULL) &&
1249: (grid->fields[sFields[f]].disc->operators[op]->ALEOpFunc == PETSC_NULL)) {
1250: SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid operator %d", op);
1251: }
1252: if (grid->fields[sFields[f]].disc->numQuadPoints != grid->fields[tFields[f]].disc->numQuadPoints) {
1253: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible quadrature sets");
1254: }
1255: }
1256: /* Check for compatible orderings */
1257: if ((tOrder->numVars != M->M) || (tOrder->numLocVars != M->m)) {
1258: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible test function ordering");
1259: }
1260: if ((sOrder->numVars != M->N) || (sOrder->numLocVars != M->n)) {
1261: SETERRQ(PETSC_ERR_ARG_INCOMP, "Incompatible shape function ordering");
1262: }
1264: (*grid->ops->gmatevaluateboundaryoperatorgalerkin)(grid, M, PETSC_NULL, sOrder, sLocOrder, tOrder, tLocOrder,
1265: op, alpha, type, ctx);
1266:
1267: return(0);
1268: }
1270: #undef __FUNCT__
1272: /*@
1273: GMatEvaluateNewFields - Evaluates the weak form of the system operators over
1274: the new fields introduced by constraints. These fields are generally not
1275: discretized using the computational mesh.
1277: Collective on GMat
1279: Input Parameters:
1280: + M - The grid matrix
1281: - type - The matrix assembly type
1283: Level: intermediate
1285: .keywords grid matrix, evaluate, new field
1286: .seealso: GMatEvaluateOperatorGalerkin()
1287: @*/
1288: int GMatEvaluateNewFields(GMat M, int numFields, int *sFields, VarOrdering sOrder, LocalVarOrdering sLocOrder,
1289: int *tFields, VarOrdering tOrder, LocalVarOrdering tLocOrder, PetscScalar alpha, MatAssemblyType type)
1290: {
1291: Grid grid;
1292: int ierr;
1296: GMatGetGrid(M, &grid);
1304: if (numFields != 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only one constrained field allowed currently");
1305: if (sFields[0] != tFields[0]) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Must form with constrained field currently");
1306: if (grid->fields[sFields[0]].isConstrained == PETSC_FALSE) {
1307: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only valid with constrained field currently");
1308: }
1309: #if 0
1310: PetscLogEventBegin(GMAT_EvaluateSystemMatrix, M, grid, 0, 0);
1311: GMatEvaluateNewFields_Triangular_2D(grid, M, numFields, sFields, sOrder, sLocOrder, tFields,
1312: tOrder, tLocOrder, alpha, type, grid->constraintCtx);
1313:
1314: PetscLogEventEnd(GMAT_EvaluateSystemMatrix, M, grid, 0, 0);
1315: return(0);
1316: #else
1317: SETERRQ(PETSC_ERR_SUP, "Coming soon");
1318: #endif
1319: }
1321: #undef __FUNCT__
1323: /*@
1324: GMatMatMultConstrained - This function applies P^T A P to a vector,
1325: where P is the constraint matrix for the grid.
1327: Input Parameters:
1328: . A - The grid matrix
1329: . x - The input grid vector
1331: Output Parameter:
1332: . y - The output grid vector P^T A P x
1334: Level: intermediate
1336: .seealso: GMatEvaluateOperatorGalerkin
1337: @*/
1338: int GMatMatMultConstrained(GMat mat, GVec x, GVec y)
1339: {
1340: Grid grid;
1341: Mat P;
1342: PetscConstraintObject ctx;
1343: GTSContext GTSctx;
1344: int ierr;
1350: GMatGetGrid(mat, &grid);
1352: GridGetConstraintMatrix(grid, &P);
1353: GridGetConstraintContext(grid, &ctx);
1354: PetscObjectQuery((PetscObject) ctx, "GTSContext", (PetscObject *) >Sctx);
1355: PetscLogEventBegin(GMAT_MatMultConstrained, mat, x, y, 0);
1356: /* End the current MatMult() timing */
1357: PetscLogEventEnd(MAT_Mult, mat, x, y, 0);
1358: MatMult(P, x, GTSctx->work[0]);
1359: /* Time the multiply with the unconstrained matrix */
1360: PetscLogEventBegin(MAT_Mult, mat, GTSctx->work[0], GTSctx->work[1], 0);
1361: MatMultConstrained(mat, GTSctx->work[0], GTSctx->work[1]);
1362: PetscLogEventEnd(MAT_Mult, mat, GTSctx->work[0], GTSctx->work[1], 0);
1363: MatMultTranspose(P, GTSctx->work[1], y);
1364: /* Add in new fields */
1365: (*grid->constraintCtx->ops->applyjac)(grid, x, y);
1366: /* Restart the current MatMult() timing */
1367: PetscLogEventBegin(MAT_Mult, mat, x, y, 0);
1368: PetscLogEventEnd(GMAT_MatMultConstrained, mat, x, y, 0);
1369: return(0);
1370: }
1372: #undef __FUNCT__
1374: /*@
1375: GMatMatMultMF - This function applies the operator matrix-free to the vector.
1377: Input Parameters:
1378: + A - The grid matrix
1379: - x - The input grid vector
1381: Output Parameter:
1382: . y - The output grid vector A x
1384: Level: intermediate
1386: .seealso: GMatMatMultMFConstrained
1387: @*/
1388: int GMatMatMultMF(GMat mat, GVec x, GVec y)
1389: {
1390: PetscScalar zero = 0.0;
1391: Grid grid;
1392: int ierr;
1398: GMatGetGrid(mat, &grid);
1400: VecSet(&zero, y);
1401: GVecEvaluateJacobian(y, grid->matrixFreeArg, x, grid->matrixFreeContext);
1402: return(0);
1403: }
1405: #undef __FUNCT__
1407: /*@
1408: GMatMatMultTransposeConstrained - This function applies P^T A^T P to a vector,
1409: where P is the constraint matrix for the grid.
1411: Input Parameters:
1412: . A - The grid matrix
1413: . x - The input grid vector
1415: Output Parameter:
1416: . y - The output grid vector P^T A^T P x
1418: Level: intermediate
1420: .seealso: GMatEvaluateOperatorGalerkin
1421: @*/
1422: int GMatMatMultTransposeConstrained(GMat mat, GVec x, GVec y)
1423: {
1424: Grid grid;
1425: Mat P;
1426: PetscConstraintObject ctx;
1427: GTSContext GTSctx;
1428: int ierr;
1434: GMatGetGrid(mat, &grid);
1436: GridGetConstraintMatrix(grid, &P);
1437: GridGetConstraintContext(grid, &ctx);
1438: PetscObjectQuery((PetscObject) ctx, "GTSContext", (PetscObject *) >Sctx);
1439: PetscLogEventBegin(GMAT_MatMultTransposeConstrained, mat, x, y, 0);
1440: /* End the current MatMult() timing */
1441: PetscLogEventEnd(MAT_MultTranspose, mat, x, y, 0);
1442: MatMult(P, x, GTSctx->work[0]);
1443: /* Time the multiply with the unconstrained matrix */
1444: PetscLogEventBegin(MAT_MultTranspose, mat, GTSctx->work[0], GTSctx->work[1], 0);
1445: MatMultTransposeConstrained(mat, GTSctx->work[0], GTSctx->work[1]);
1446: PetscLogEventEnd(MAT_MultTranspose, mat, GTSctx->work[0], GTSctx->work[1], 0);
1447: MatMultTranspose(P, GTSctx->work[1], y);
1448: /* Add in new fields */
1449: (*grid->constraintCtx->ops->applyjac)(grid, x, y);
1450: /* Restart the current MatMult() timing */
1451: PetscLogEventBegin(MAT_MultTranspose, mat, x, y, 0);
1452: PetscLogEventEnd(GMAT_MatMultTransposeConstrained, mat, x, y, 0);
1453: return(0);
1454: }
1456: #undef __FUNCT__
1458: /*@
1459: GMatSetBoundary - Applies the specified Dirichlet boundary conditions to this matrix.
1461: Input Parameter:
1462: . M - The grid matrix
1463: . diag - The scalar to place on the diagonal
1464: . ctx - An optional user provided context for the function
1466: Level: intermediate
1468: .seealso: GridSetBC(), GridAddBC()
1469: @*/
1470: int GMatSetBoundary(GMat M, PetscScalar diag, void *ctx)
1471: {
1472: Grid grid;
1473: int bc;
1474: int num;
1475: int *bd;
1476: int *field;
1477: int ierr;
1481: GMatGetGrid(M, &grid);
1483: PetscLogEventBegin(GMAT_SetBoundary, M, grid, 0, 0);
1484: for(bc = 0, num = 0; bc < grid->numBC; bc++) {
1485: if (grid->bc[bc].reduce == PETSC_FALSE) num++;
1486: }
1487: if (num > 0) {
1488: PetscMalloc(num * sizeof(int), &bd);
1489: PetscMalloc(num * sizeof(int), &field);
1490: for(bc = 0, num = 0; bc < grid->numBC; bc++) {
1491: if (grid->bc[bc].reduce == PETSC_FALSE) {
1492: bd[num] = grid->bc[bc].boundary;
1493: field[num++] = grid->bc[bc].field;
1494: }
1495: }
1496: GridSetMatBoundaryRectangular(num, bd, field, diag, grid->order, M, ctx);
1497: PetscFree(bd);
1498: PetscFree(field);
1499: }
1500: for(bc = 0; bc < grid->numPointBC; bc++) {
1501: if (grid->pointBC[bc].reduce == PETSC_FALSE) {
1502: GridSetMatPointBoundary(grid->pointBC[bc].node, grid->pointBC[bc].field, diag, M, ctx);
1503: }
1504: }
1505: PetscLogEventEnd(GMAT_SetBoundary, M, grid, 0, 0);
1506: return(0);
1507: }
1509: #undef __FUNCT__
1511: /*@
1512: GMatCreate - Creates a sparse matrix with the appropriate preallocation
1513: of nonzeros for a discretization of an operator on the grid.
1515: Input Parameter:
1516: . grid - The grid
1518: Output Parameters:
1519: . gmat - The discrete operator
1521: Level: beginner
1523: .seealso: GVecCreate()
1524: @*/
1525: int GMatCreate(Grid grid, GMat *gmat)
1526: {
1532: GMatCreateRectangular(grid, grid->order, grid->order, gmat);
1533: return(0);
1534: }
1536: #undef __FUNCT__
1538: /*@
1539: GMatCreateRectangular - Creates a sparse matrix with the appropriate preallocation
1540: of nonzeros for a discretization of an operator on the grid. A different
1541: set of fields may be speicifed for the shape and test discretizations.
1543: Input Parameter:
1544: . grid - The grid
1545: . sOrder - The shape function ordering
1546: . tOrder - The test function ordering
1548: Output Parameters:
1549: . gmat - The discrete operator
1551: Level: beginner
1553: .seealso: GMatCreate()
1554: @*/
1555: int GMatCreateRectangular(Grid grid, VarOrdering sOrder, VarOrdering tOrder, GMat *gmat)
1556: {
1557: VarOrdering shapeOrder = sOrder;
1558: VarOrdering testOrder = tOrder;
1559: int ierr;
1564: GridSetUp(grid);
1566: if (shapeOrder == PETSC_NULL) shapeOrder = grid->order;
1567: if (testOrder == PETSC_NULL) testOrder = grid->order;
1571: PetscLogEventBegin(GMAT_CreateRectangular, grid, shapeOrder, testOrder, 0);
1572: (*grid->ops->gridcreategmat)(grid, shapeOrder, testOrder, PETSC_FALSE, gmat);
1573: PetscLogEventEnd(GMAT_CreateRectangular, grid, shapeOrder, testOrder, 0);
1574: PetscObjectCompose((PetscObject) *gmat, "RowOrder", (PetscObject) tOrder);
1575: PetscObjectCompose((PetscObject) *gmat, "ColOrder", (PetscObject) sOrder);
1576: return(0);
1577: }
1579: #undef __FUNCT__
1581: int GMatDummy0(GMat mat) {
1583: return(0);
1584: }
1586: #undef __FUNCT__
1588: /*@
1589: GMatCreateMF - Creates a matrix-free operator. A different
1590: set of fields may be speicifed for the shape and test discretizations.
1592: Input Parameter:
1593: . grid - The grid
1594: . sOrder - The shape function ordering
1595: . tOrder - The test function ordering
1597: Output Parameters:
1598: . gmat - The discrete operator
1600: Level: beginner
1602: .seealso: GMatCreateRectangular()
1603: @*/
1604: int GMatCreateMF(Grid grid, VarOrdering sOrder, VarOrdering tOrder, GMat *gmat)
1605: {
1606: VarOrdering shapeOrder = sOrder;
1607: VarOrdering testOrder = tOrder;
1608: int ierr;
1611: if (shapeOrder == PETSC_NULL) shapeOrder = grid->order;
1612: if (testOrder == PETSC_NULL) testOrder = grid->order;
1618: GridSetUp(grid);
1619: PetscLogEventBegin(GMAT_CreateRectangular, grid, shapeOrder, testOrder, 0);
1620: MatCreateShell(grid->comm, tOrder->numLocVars, sOrder->numLocVars, tOrder->numVars, sOrder->numVars, grid, gmat);
1621:
1622: if (grid->isConstrained == PETSC_TRUE) {
1623: if (grid->explicitConstraints == PETSC_FALSE) {
1624: MatShellSetOperation(*gmat, MATOP_MULT, (void (*)(void)) GMatMatMultConstrained);
1625: MatShellSetOperation(*gmat, MATOP_MULT_CONSTRAINED, (void (*)(void)) GMatMatMultMF);
1626: } else {
1627: MatShellSetOperation(*gmat, MATOP_MULT, (void (*)(void)) GMatMatMultMF);
1628: }
1629: } else {
1630: MatShellSetOperation(*gmat, MATOP_MULT, (void (*)(void)) GMatMatMultMF);
1631: }
1632: MatShellSetOperation(*gmat, MATOP_ZERO_ENTRIES, (void (*)(void)) GMatDummy0);
1633: MatShellSetOperation(*gmat, MATOP_GET_DIAGONAL, (void (*)(void)) GMatGetDiagonalMF);
1634: PetscObjectCompose((PetscObject) *gmat, "Grid", (PetscObject) grid);
1635: MatSetOption(*gmat, MAT_NEW_NONZERO_ALLOCATION_ERR);
1636: PetscLogEventEnd(GMAT_CreateRectangular, grid, shapeOrder, testOrder, 0);
1637: PetscObjectCompose((PetscObject) *gmat, "RowOrder", (PetscObject) tOrder);
1638: PetscObjectCompose((PetscObject) *gmat, "ColOrder", (PetscObject) sOrder);
1639: return(0);
1640: }
1642: #undef __FUNCT__
1644: /*@
1645: GMatCreateBoundaryRestriction - Creates a sparse matrix with the
1646: appropriate preallocation of nonzeros for a discretization of an
1647: operator on the boundary of the grid.
1649: Collective on Grid
1651: Input Parameter:
1652: . grid - The grid
1654: Output Parameter:
1655: . gmat - The grid matrix
1657: Level: beginner
1659: .keywords: grid matrix, boundary, create
1660: .seealso: GMatCreate(), GVecCreate()
1661: @*/
1662: int GMatCreateBoundaryRestriction(Grid grid, GMat *gmat)
1663: {
1669: GridSetUp(grid);
1670: GridSetupBoundary(grid);
1671: (*grid->ops->gridcreategmat)(grid, grid->bdOrder, grid->order, PETSC_TRUE, gmat);
1672: PetscObjectCompose((PetscObject) *gmat, "RowOrder", (PetscObject) grid->order);
1673: PetscObjectCompose((PetscObject) *gmat, "ColOrder", (PetscObject) grid->bdOrder);
1674: return(0);
1675: }