Actual source code: ex13f90.F
1: !
2: ! "$Id: ex13f90.F,v 1.24 2001/08/07 03:04:00 balay Exp $";
3: !
4: !
5: !/*T
6: ! Concepts: SLES^basic sequential example
7: ! Concepts: SLES^Laplacian, 2d
8: ! Concepts: Laplacian, 2d
9: ! Processors: 1
10: !T*/
11: ! -----------------------------------------------------------------------
13: program main
14: implicit none
16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
17: ! Include files
18: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
19: !
20: ! The following include statements are required for SLES Fortran programs:
21: ! petsc.h - base PETSc routines
22: ! petscvec.h - vectors
23: ! petscmat.h - matrices
24: ! petscksp.h - Krylov subspace methods
25: ! petscpc.h - preconditioners
26: ! petscsles.h - SLES interface
27: !
28: #include include/finclude/petsc.h
29: #include include/finclude/petscvec.h
30: #include include/finclude/petscmat.h
31: #include include/finclude/petscksp.h
32: #include include/finclude/petscpc.h
33: #include include/finclude/petscsles.h
35: ! User-defined context that contains all the data structures used
36: ! in the linear solution process.
38: ! Vec x,b /* solution vector, right hand side vector and work vector */
39: ! Mat A /* sparse matrix */
40: ! SLES sles /* linear solver context */
41: ! int m,n /* grid dimensions */
42: !
43: ! Since we cannot store Scalars and integers in the same context,
44: ! we store the integers/pointers in the user-defined context, and
45: ! the scalar values are carried in the common block.
46: ! The scalar values in this simplistic example could easily
47: ! be recalculated in each routine, where they are needed.
48: !
49: ! Scalar hx2,hy2 /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */
51: ! Note: Any user-defined Fortran routines MUST be declared as external.
53: external UserInitializeLinearSolver
54: external UserFinalizeLinearSolver
55: external UserDoLinearSolver
57: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
58: ! Variable declarations
59: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
61: PetscScalar hx,hy,x,y
62: PetscFortranAddr userctx(6)
63: integer ierr,m,n,t,tmax,flg,i,j,Ntot
64: integer size,rank
65: double precision enorm
66: PetscScalar,ALLOCATABLE :: userx(:,:),userb(:,:)
67: PetscScalar,ALLOCATABLE :: solution(:,:),rho(:,:)
69: double precision hx2,hy2
70: common /param/ hx2,hy2
72: tmax = 2
73: m = 6
74: n = 7
76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
77: ! Beginning of program
78: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
80: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
81: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
82: if (size .ne. 1) then
83: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
84: if (rank .eq. 0) then
85: write(6,*) 'This is a uniprocessor example only!'
86: endif
87: SETERRQ(1,' ',ierr)
88: endif
90: ! The next two lines are for testing only; these allow the user to
91: ! decide the grid size at runtime.
93: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
94: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)
96: ! Create the empty sparse matrix and linear solver data structures
98: call UserInitializeLinearSolver(m,n,userctx,ierr)
99: Ntot = m*n
101: ! Allocate arrays to hold the solution to the linear system. This
102: ! approach is not normally done in PETSc programs, but in this case,
103: ! since we are calling these routines from a non-PETSc program, we
104: ! would like to reuse the data structures from another code. So in
105: ! the context of a larger application these would be provided by
106: ! other (non-PETSc) parts of the application code.
108: ALLOCATE (userx(m,n),userb(m,n),solution(m,n))
110: ! Allocate an array to hold the coefficients in the elliptic operator
112: ALLOCATE (rho(m,n))
114: ! Fill up the array rho[] with the function rho(x,y) = x; fill the
115: ! right-hand-side b[] and the solution with a known problem for testing.
117: hx = 1.0/(m+1)
118: hy = 1.0/(n+1)
119: y = hy
120: do 20 j=1,n
121: x = hx
122: do 10 i=1,m
123: rho(i,j) = x
124: solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
125: userb(i,j) = -2.*PETSC_PI*cos(2.*PETSC_PI*x)* &
126: & sin(2.*PETSC_PI*y) + &
127: & 8*PETSC_PI*PETSC_PI*x* &
128: & sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
129: x = x + hx
130: 10 continue
131: y = y + hy
132: 20 continue
134: ! Loop over a bunch of timesteps, setting up and solver the linear
135: ! system for each time-step.
136: ! Note that this loop is somewhat artificial. It is intended to
137: ! demonstrate how one may reuse the linear solvers in each time-step.
139: do 100 t=1,tmax
140: call UserDoLinearSolver(rho,userctx,userb,userx,ierr)
142: ! Compute error: Note that this could (and usually should) all be done
143: ! using the PETSc vector operations. Here we demonstrate using more
144: ! standard programming practices to show how they may be mixed with
145: ! PETSc.
146: enorm = 0.0
147: do 90 j=1,n
148: do 80 i=1,m
149: enorm = enorm + &
150: & (solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
151: 80 continue
152: 90 continue
153: enorm = enorm * PetscRealPart(hx*hy)
154: write(6,115) m,n,enorm
155: 115 format ('m = ',I2,' n = ',I2,' error norm = ',1PE10.4)
156: 100 continue
158: ! We are finished solving linear systems, so we clean up the
159: ! data structures.
161: DEALLOCATE (userx,userb,solution,rho)
163: call UserFinalizeLinearSolver(userctx,ierr)
164: call PetscFinalize(ierr)
165: end
167: ! ----------------------------------------------------------------
168: subroutine UserInitializeLinearSolver(m,n,userctx,ierr)
170: implicit none
172: #include include/finclude/petsc.h
173: #include include/finclude/petscvec.h
174: #include include/finclude/petscmat.h
175: #include include/finclude/petscksp.h
176: #include include/finclude/petscpc.h
177: #include include/finclude/petscsles.h
179: integer m,n,ierr
180: PetscFortranAddr userctx(*)
182: common /param/ hx2,hy2
183: double precision hx2,hy2
185: ! Local variable declararions
186: Mat A
187: Vec b,x
188: SLES sles
189: integer Ntot
192: ! Here we assume use of a grid of size m x n, with all points on the
193: ! interior of the domain, i.e., we do not include the points corresponding
194: ! to homogeneous Dirichlet boundary conditions. We assume that the domain
195: ! is [0,1]x[0,1].
197: hx2 = (m+1)*(m+1)
198: hy2 = (n+1)*(n+1)
199: Ntot = m*n
201: ! Create the sparse matrix. Preallocate 5 nonzeros per row.
203: call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,5, &
204: & PETSC_NULL_INTEGER,A,ierr)
205: !
206: ! Create vectors. Here we create vectors with no memory allocated.
207: ! This way, we can use the data structures already in the program
208: ! by using VecPlaceArray() subroutine at a later stage.
209: !
210: call VecCreateSeqWithArray(PETSC_COMM_SELF,Ntot, &
211: & PETSC_NULL_SCALAR,b,ierr)
212: call VecDuplicate(b,x,ierr)
214: ! Create linear solver context. This will be used repeatedly for all
215: ! the linear solves needed.
217: call SLESCreate(PETSC_COMM_SELF,sles,ierr)
219: userctx(1) = x
220: userctx(2) = b
221: userctx(3) = A
222: userctx(4) = sles
223: userctx(5) = m
224: userctx(6) = n
226: return
227: end
228: ! -----------------------------------------------------------------------
230: ! Solves -div (rho grad psi) = F using finite differences.
231: ! rho is a 2-dimensional array of size m by n, stored in Fortran
232: ! style by columns. userb is a standard one-dimensional array.
234: subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)
236: implicit none
238: #include include/finclude/petsc.h
239: #include include/finclude/petscvec.h
240: #include include/finclude/petscmat.h
241: #include include/finclude/petscksp.h
242: #include include/finclude/petscpc.h
243: #include include/finclude/petscsles.h
245: integer ierr
246: PetscFortranAddr userctx(*)
247: PetscScalar rho(*),userb(*),userx(*)
250: common /param/ hx2,hy2
251: double precision hx2,hy2
253: PC pc
254: SLES sles
255: Vec b,x
256: Mat A
257: integer m,n
259: integer i,j,II,JJ,its
260: PetscScalar v
262: ! PetscScalar tmpx(*),tmpb(*)
264: x = userctx(1)
265: b = userctx(2)
266: A = userctx(3)
267: sles = userctx(4)
268: m = userctx(5)
269: n = userctx(6)
271: ! This is not the most efficient way of generating the matrix,
272: ! but let's not worry about it. We should have separate code for
273: ! the four corners, each edge and then the interior. Then we won't
274: ! have the slow if-tests inside the loop.
275: !
276: ! Compute the operator
277: ! -div rho grad
278: ! on an m by n grid with zero Dirichlet boundary conditions. The rho
279: ! is assumed to be given on the same grid as the finite difference
280: ! stencil is applied. For a staggered grid, one would have to change
281: ! things slightly.
283: II = 0
284: do 110 j=1,n
285: do 100 i=1,m
286: if (j .gt. 1) then
287: JJ = II - m
288: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
289: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
290: endif
291: if (j .lt. n) then
292: JJ = II + m
293: v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
294: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
295: endif
296: if (i .gt. 1) then
297: JJ = II - 1
298: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
299: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
300: endif
301: if (i .lt. m) then
302: JJ = II + 1
303: v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
304: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
305: endif
306: v = 2*rho(II+1)*(hx2+hy2)
307: call MatSetValues(A,1,II,1,II,v,INSERT_VALUES,ierr)
308: II = II+1
309: 100 continue
310: 110 continue
311: !
312: ! Assemble matrix
313: !
314: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
315: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)
317: !
318: ! Set operators. Here the matrix that defines the linear system
319: ! also serves as the preconditioning matrix. Since all the matrices
320: ! will have the same nonzero pattern here, we indicate this so the
321: ! linear solvers can take advantage of this.
322: !
323: call SLESSetOperators(sles,A,A,SAME_NONZERO_PATTERN,ierr)
325: !
326: ! Set linear solver defaults for this problem (optional).
327: ! - Here we set it to use direct LU factorization for the solution
328: !
329: call SLESGetPC(sles,pc,ierr)
330: call PCSetType(pc,PCLU,ierr)
332: !
333: ! Set runtime options, e.g.,
334: ! -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
335: ! These options will override those specified above as long as
336: ! SLESSetFromOptions() is called _after_ any other customization
337: ! routines.
338: !
339: ! Run the program with the option -help to see all the possible
340: ! linear solver options.
341: !
342: call SLESSetFromOptions(sles,ierr)
344: !
345: ! This allows the PETSc linear solvers to compute the solution
346: ! directly in the user's array rather than in the PETSc vector.
347: !
348: ! This is essentially a hack and not highly recommend unless you
349: ! are quite comfortable with using PETSc. In general, users should
350: ! write their entire application using PETSc vectors rather than
351: ! arrays.
352: !
353: call VecPlaceArray(x,userx,ierr)
354: call VecPlaceArray(b,userb,ierr)
356: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
357: ! Solve the linear system
358: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
360: call SLESSolve(sles,b,x,its,ierr)
362: return
363: end
365: ! ------------------------------------------------------------------------
367: subroutine UserFinalizeLinearSolver(userctx,ierr)
369: implicit none
371: #include include/finclude/petsc.h
372: #include include/finclude/petscvec.h
373: #include include/finclude/petscmat.h
374: #include include/finclude/petscksp.h
375: #include include/finclude/petscpc.h
376: #include include/finclude/petscsles.h
378: integer ierr
379: PetscFortranAddr userctx(*)
381: ! Local variable declararions
383: Vec x,b
384: Mat A
385: SLES sles
386: !
387: ! We are all done and don't need to solve any more linear systems, so
388: ! we free the work space. All PETSc objects should be destroyed when
389: ! they are no longer needed.
390: !
391: x = userctx(1)
392: b = userctx(2)
393: A = userctx(3)
394: sles = userctx(4)
396: call VecDestroy(x,ierr)
397: call VecDestroy(b,ierr)
398: call MatDestroy(A,ierr)
399: call SLESDestroy(sles,ierr)
401: return
402: end