Actual source code: ex13.c
1: /*$Id: ex13.c,v 1.29 2001/08/07 21:30:54 bsmith Exp $*/
3: static char help[] = "Solves a variable Poisson problem with SLES.nn";
5: /*T
6: Concepts: SLES^basic sequential example
7: Concepts: SLES^Laplacian, 2d
8: Concepts: Laplacian, 2d
9: Processors: 1
10: T*/
12: /*
13: Include "petscsles.h" so that we can use SLES solvers. Note that this file
14: automatically includes:
15: petsc.h - base PETSc routines petscvec.h - vectors
16: petscsys.h - system routines petscmat.h - matrices
17: petscis.h - index sets petscksp.h - Krylov subspace methods
18: petscviewer.h - viewers petscpc.h - preconditioners
19: */
20: #include petscsles.h
22: /*
23: User-defined context that contains all the data structures used
24: in the linear solution process.
25: */
26: typedef struct {
27: Vec x,b; /* solution vector, right-hand-side vector */
28: Mat A; /* sparse matrix */
29: SLES sles; /* linear solver context */
30: int m,n; /* grid dimensions */
31: PetscScalar hx2,hy2; /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
32: } UserCtx;
34: extern int UserInitializeLinearSolver(int,int,UserCtx *);
35: extern int UserFinalizeLinearSolver(UserCtx *);
36: extern int UserDoLinearSolver(PetscScalar *,UserCtx *userctx,PetscScalar *b,PetscScalar *x);
38: #undef __FUNCT__
40: int main(int argc,char **args)
41: {
42: UserCtx userctx;
43: int ierr,m = 6,n = 7,t,tmax = 2,i,I,j,N;
44: PetscScalar *userx,*rho,*solution,*userb,hx,hy,x,y;
45: PetscReal enorm;
47: /*
48: Initialize the PETSc libraries
49: */
50: PetscInitialize(&argc,&args,(char *)0,help);
52: /*
53: The next two lines are for testing only; these allow the user to
54: decide the grid size at runtime.
55: */
56: PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
57: PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);
59: /*
60: Create the empty sparse matrix and linear solver data structures
61: */
62: UserInitializeLinearSolver(m,n,&userctx);
63: N = m*n;
65: /*
66: Allocate arrays to hold the solution to the linear system.
67: This is not normally done in PETSc programs, but in this case,
68: since we are calling these routines from a non-PETSc program, we
69: would like to reuse the data structures from another code. So in
70: the context of a larger application these would be provided by
71: other (non-PETSc) parts of the application code.
72: */
73: PetscMalloc(N*sizeof(PetscScalar),&userx);
74: PetscMalloc(N*sizeof(PetscScalar),&userb);
75: PetscMalloc(N*sizeof(PetscScalar),&solution);
77: /*
78: Allocate an array to hold the coefficients in the elliptic operator
79: */
80: PetscMalloc(N*sizeof(PetscScalar),&rho);
82: /*
83: Fill up the array rho[] with the function rho(x,y) = x; fill the
84: right-hand-side b[] and the solution with a known problem for testing.
85: */
86: hx = 1.0/(m+1);
87: hy = 1.0/(n+1);
88: y = hy;
89: I = 0;
90: for (j=0; j<n; j++) {
91: x = hx;
92: for (i=0; i<m; i++) {
93: rho[I] = x;
94: solution[I] = PetscSinScalar(2.*PETSC_PI*x)*PetscSinScalar(2.*PETSC_PI*y);
95: userb[I] = -2*PETSC_PI*PetscCosScalar(2*PETSC_PI*x)*PetscSinScalar(2*PETSC_PI*y) +
96: 8*PETSC_PI*PETSC_PI*x*PetscSinScalar(2*PETSC_PI*x)*PetscSinScalar(2*PETSC_PI*y);
97: x += hx;
98: I++;
99: }
100: y += hy;
101: }
103: /*
104: Loop over a bunch of timesteps, setting up and solver the linear
105: system for each time-step.
107: Note this is somewhat artificial. It is intended to demonstrate how
108: one may reuse the linear solver stuff in each time-step.
109: */
110: for (t=0; t<tmax; t++) {
111: UserDoLinearSolver(rho,&userctx,userb,userx);
113: /*
114: Compute error: Note that this could (and usually should) all be done
115: using the PETSc vector operations. Here we demonstrate using more
116: standard programming practices to show how they may be mixed with
117: PETSc.
118: */
119: enorm = 0.0;
120: for (i=0; i<N; i++) {
121: enorm += PetscRealPart(PetscConj(solution[i]-userx[i])*(solution[i]-userx[i]));
122: }
123: enorm *= PetscRealPart(hx*hy);
124: printf("m %d n %d error norm %gn",m,n,enorm);
125: }
127: /*
128: We are all finished solving linear systems, so we clean up the
129: data structures.
130: */
131: PetscFree(rho);
132: PetscFree(solution);
133: PetscFree(userx);
134: PetscFree(userb);
135: UserFinalizeLinearSolver(&userctx);
136: PetscFinalize();
138: return 0;
139: }
141: /* ------------------------------------------------------------------------*/
142: #undef __FUNCT__
144: int UserInitializeLinearSolver(int m,int n,UserCtx *userctx)
145: {
146: int N,ierr;
148: /*
149: Here we assume use of a grid of size m x n, with all points on the
150: interior of the domain, i.e., we do not include the points corresponding
151: to homogeneous Dirichlet boundary conditions. We assume that the domain
152: is [0,1]x[0,1].
153: */
154: userctx->m = m;
155: userctx->n = n;
156: userctx->hx2 = (m+1)*(m+1);
157: userctx->hy2 = (n+1)*(n+1);
158: N = m*n;
160: /*
161: Create the sparse matrix. Preallocate 5 nonzeros per row.
162: */
163: MatCreateSeqAIJ(PETSC_COMM_SELF,N,N,5,0,&userctx->A);
165: /*
166: Create vectors. Here we create vectors with no memory allocated.
167: This way, we can use the data structures already in the program
168: by using VecPlaceArray() subroutine at a later stage.
169: */
170: VecCreateSeqWithArray(PETSC_COMM_SELF,N,PETSC_NULL,&userctx->b);
171: VecDuplicate(userctx->b,&userctx->x);
173: /*
174: Create linear solver context. This will be used repeatedly for all
175: the linear solves needed.
176: */
177: SLESCreate(PETSC_COMM_SELF,&userctx->sles);
179: return 0;
180: }
182: #undef __FUNCT__
184: /*
185: Solves -div (rho grad psi) = F using finite differences.
186: rho is a 2-dimensional array of size m by n, stored in Fortran
187: style by columns. userb is a standard one-dimensional array.
188: */
189: /* ------------------------------------------------------------------------*/
190: int UserDoLinearSolver(PetscScalar *rho,UserCtx *userctx,PetscScalar *userb,PetscScalar *userx)
191: {
192: int ierr,i,j,I,J,m = userctx->m,n = userctx->n,its;
193: Mat A = userctx->A;
194: PC pc;
195: PetscScalar v,hx2 = userctx->hx2,hy2 = userctx->hy2;
197: /*
198: This is not the most efficient way of generating the matrix
199: but let's not worry about it. We should have separate code for
200: the four corners, each edge and then the interior. Then we won't
201: have the slow if-tests inside the loop.
203: Computes the operator
204: -div rho grad
205: on an m by n grid with zero Dirichlet boundary conditions. The rho
206: is assumed to be given on the same grid as the finite difference
207: stencil is applied. For a staggered grid, one would have to change
208: things slightly.
209: */
210: I = 0;
211: for (j=0; j<n; j++) {
212: for (i=0; i<m; i++) {
213: if (j>0) {
214: J = I - m;
215: v = -.5*(rho[I] + rho[J])*hy2;
216: MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);
217: }
218: if (j<n-1) {
219: J = I + m;
220: v = -.5*(rho[I] + rho[J])*hy2;
221: MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);
222: }
223: if (i>0) {
224: J = I - 1;
225: v = -.5*(rho[I] + rho[J])*hx2;
226: MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);
227: }
228: if (i<m-1) {
229: J = I + 1;
230: v = -.5*(rho[I] + rho[J])*hx2;
231: MatSetValues(A,1,&I,1,&J,&v,INSERT_VALUES);
232: }
233: v = 2.0*rho[I]*(hx2+hy2);
234: MatSetValues(A,1,&I,1,&I,&v,INSERT_VALUES);
235: I++;
236: }
237: }
239: /*
240: Assemble matrix
241: */
242: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
243: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
245: /*
246: Set operators. Here the matrix that defines the linear system
247: also serves as the preconditioning matrix. Since all the matrices
248: will have the same nonzero pattern here, we indicate this so the
249: linear solvers can take advantage of this.
250: */
251: SLESSetOperators(userctx->sles,A,A,SAME_NONZERO_PATTERN);
253: /*
254: Set linear solver defaults for this problem (optional).
255: - Here we set it to use direct LU factorization for the solution
256: */
257: SLESGetPC(userctx->sles,&pc);
258: PCSetType(pc,PCLU);
260: /*
261: Set runtime options, e.g.,
262: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
263: These options will override those specified above as long as
264: SLESSetFromOptions() is called _after_ any other customization
265: routines.
266:
267: Run the program with the option -help to see all the possible
268: linear solver options.
269: */
270: SLESSetFromOptions(userctx->sles);
272: /*
273: This allows the PETSc linear solvers to compute the solution
274: directly in the user's array rather than in the PETSc vector.
275:
276: This is essentially a hack and not highly recommend unless you
277: are quite comfortable with using PETSc. In general, users should
278: write their entire application using PETSc vectors rather than
279: arrays.
280: */
281: VecPlaceArray(userctx->x,userx);
282: VecPlaceArray(userctx->b,userb);
284: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
285: Solve the linear system
286: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
288: SLESSolve(userctx->sles,userctx->b,userctx->x,&its);
290: /*
291: Put back the PETSc array that belongs in the vector xuserctx->x
292: */
294: return 0;
295: }
297: /* ------------------------------------------------------------------------*/
298: #undef __FUNCT__
300: int UserFinalizeLinearSolver(UserCtx *userctx)
301: {
303: /*
304: We are all done and don't need to solve any more linear systems, so
305: we free the work space. All PETSc objects should be destroyed when
306: they are no longer needed.
307: */
308: SLESDestroy(userctx->sles);
309: VecDestroy(userctx->x);
310: VecDestroy(userctx->b);
311: MatDestroy(userctx->A);
312: return 0;
313: }