Actual source code: ex2f.F
1: !
2: ! "$Id: ex2f.F,v 1.65 2001/08/07 03:04:00 balay Exp $";
3: !
4: ! Description: Solves a linear system in parallel with SLES (Fortran code).
5: ! Also shows how to set a user-defined monitoring routine.
6: !
7: ! Program usage: mpirun -np <procs> ex2f [-help] [all PETSc options]
8: !
9: !/*T
10: ! Concepts: SLES^basic parallel example
11: ! Concepts: SLES^setting a user-defined monitoring routine
12: ! Processors: n
13: !T*/
14: !
15: ! -----------------------------------------------------------------------
17: program main
18: implicit none
20: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
21: ! Include files
22: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
23: !
24: ! This program uses CPP for preprocessing, as indicated by the use of
25: ! PETSc include files in the directory petsc/include/finclude. This
26: ! convention enables use of the CPP preprocessor, which allows the use
27: ! of the #include statements that define PETSc objects and variables.
28: !
29: ! Use of the conventional Fortran include statements is also supported
30: ! In this case, the PETsc include files are located in the directory
31: ! petsc/include/foldinclude.
32: !
33: ! Since one must be very careful to include each file no more than once
34: ! in a Fortran routine, application programmers must exlicitly list
35: ! each file needed for the various PETSc components within their
36: ! program (unlike the C/C++ interface).
37: !
38: ! See the Fortran section of the PETSc users manual for details.
39: !
40: ! The following include statements are required for SLES Fortran programs:
41: ! petsc.h - base PETSc routines
42: ! petscvec.h - vectors
43: ! petscmat.h - matrices
44: ! petscpc.h - preconditioners
45: ! petscksp.h - Krylov subspace methods
46: ! petscsles.h - SLES interface
47: ! Include the following to use PETSc random numbers:
48: ! petscsys.h - system routines
49: ! Additional include statements may be needed if using additional
50: ! PETSc routines in a Fortran program, e.g.,
51: ! petscviewer.h - viewers
52: ! petscis.h - index sets
53: !
54: #include include/finclude/petsc.h
55: #include include/finclude/petscvec.h
56: #include include/finclude/petscmat.h
57: #include include/finclude/petscpc.h
58: #include include/finclude/petscksp.h
59: #include include/finclude/petscsles.h
60: #include include/finclude/petscsys.h
61: !
62: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
63: ! Variable declarations
64: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
65: !
66: ! Variables:
67: ! sles - linear solver context
68: ! ksp - Krylov subspace method context
69: ! pc - preconditioner context
70: ! x, b, u - approx solution, right-hand-side, exact solution vectors
71: ! A - matrix that defines linear system
72: ! its - iterations for convergence
73: ! norm - norm of error in solution
74: ! rctx - random number generator context
75: !
76: ! Note that vectors are declared as PETSc "Vec" objects. These vectors
77: ! are mathematical objects that contain more than just an array of
78: ! double precision numbers. I.e., vectors in PETSc are not just
79: ! double precision x(*).
80: ! However, local vector data can be easily accessed via VecGetArray().
81: ! See the Fortran section of the PETSc users manual for details.
82: !
83: double precision norm
84: integer i,j,II,JJ,ierr,m,n
85: integer rank,size,its,Istart,Iend
86: PetscTruth flg
87: PetscScalar v,one,neg_one
88: Vec x,b,u
89: Mat A
90: SLES sles
91: KSP ksp
92: PetscRandom rctx
93: ! These variables are not currently used.
94: ! PC pc
95: ! PCType ptype
96: ! double precision tol
99: ! Note: Any user-defined Fortran routines (such as MyKSPMonitor)
100: ! MUST be declared as external.
102: external MyKSPMonitor,MyKSPConverged
104: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
105: ! Beginning of program
106: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
108: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
109: m = 3
110: n = 3
111: one = 1.0
112: neg_one = -1.0
113: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
114: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)
115: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
116: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
118: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
119: ! Compute the matrix and right-hand-side vector that define
120: ! the linear system, Ax = b.
121: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
123: ! Create parallel matrix, specifying only its global dimensions.
124: ! When using MatCreate(), the matrix format can be specified at
125: ! runtime. Also, the parallel partitioning of the matrix is
126: ! determined by PETSc at runtime.
128: call MatCreate(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,m*n, &
129: & m*n,A,ierr)
130: call MatSetFromOptions(A,ierr)
132: ! Currently, all PETSc parallel matrix formats are partitioned by
133: ! contiguous chunks of rows across the processors. Determine which
134: ! rows of the matrix are locally owned.
136: call MatGetOwnershipRange(A,Istart,Iend,ierr)
138: ! Set matrix elements for the 2-D, five-point stencil in parallel.
139: ! - Each processor needs to insert only elements that it owns
140: ! locally (but any non-local elements will be sent to the
141: ! appropriate processor during matrix assembly).
142: ! - Always specify global row and columns of matrix entries.
143: ! - Note that MatSetValues() uses 0-based row and column numbers
144: ! in Fortran as well as in C.
146: ! Note: this uses the less common natural ordering that orders first
147: ! all the unknowns for x = h then for x = 2h etc; Hence you see JH = II +- n
148: ! instead of JJ = II +- m as you might expect. The more standard ordering
149: ! would first do all variables for y = h, then y = 2h etc.
151: do 10, II=Istart,Iend-1
152: v = -1.0
153: i = II/n
154: j = II - i*n
155: if (i.gt.0) then
156: JJ = II - n
157: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
158: endif
159: if (i.lt.m-1) then
160: JJ = II + n
161: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
162: endif
163: if (j.gt.0) then
164: JJ = II - 1
165: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
166: endif
167: if (j.lt.n-1) then
168: JJ = II + 1
169: call MatSetValues(A,1,II,1,JJ,v,INSERT_VALUES,ierr)
170: endif
171: v = 4.0
172: call MatSetValues(A,1,II,1,II,v,INSERT_VALUES,ierr)
173: 10 continue
175: ! Assemble matrix, using the 2-step process:
176: ! MatAssemblyBegin(), MatAssemblyEnd()
177: ! Computations can be done while messages are in transition,
178: ! by placing code between these two statements.
180: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
181: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)
183: ! Create parallel vectors.
184: ! - Here, the parallel partitioning of the vector is determined by
185: ! PETSc at runtime. We could also specify the local dimensions
186: ! if desired -- or use the more general routine VecCreate().
187: ! - When solving a linear system, the vectors and matrices MUST
188: ! be partitioned accordingly. PETSc automatically generates
189: ! appropriately partitioned matrices and vectors when MatCreate()
190: ! and VecCreate() are used with the same communicator.
191: ! - Note: We form 1 vector from scratch and then duplicate as needed.
193: call VecCreateMPI(PETSC_COMM_WORLD,PETSC_DECIDE,m*n,u,ierr)
194: call VecSetFromOptions(u,ierr)
195: call VecDuplicate(u,b,ierr)
196: call VecDuplicate(b,x,ierr)
198: ! Set exact solution; then compute right-hand-side vector.
199: ! By default we use an exact solution of a vector with all
200: ! elements of 1.0; Alternatively, using the runtime option
201: ! -random_sol forms a solution vector with random components.
203: call PetscOptionsHasName(PETSC_NULL_CHARACTER, &
204: & "-random_exact_sol",flg,ierr)
205: if (flg .eq. 1) then
206: call PetscRandomCreate(PETSC_COMM_WORLD,RANDOM_DEFAULT, &
207: & rctx,ierr)
208: call VecSetRandom(rctx,u,ierr)
209: call PetscRandomDestroy(rctx,ierr)
210: else
211: call VecSet(one,u,ierr)
212: endif
213: call MatMult(A,u,b,ierr)
215: ! View the exact solution vector if desired
217: call PetscOptionsHasName(PETSC_NULL_CHARACTER, &
218: & "-view_exact_sol",flg,ierr)
219: if (flg .eq. 1) then
220: call VecView(u,PETSC_VIEWER_STDOUT_WORLD,ierr)
221: endif
222: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
223: ! Create the linear solver and set various options
224: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
226: ! Create linear solver context
228: call SLESCreate(PETSC_COMM_WORLD,sles,ierr)
230: ! Set operators. Here the matrix that defines the linear system
231: ! also serves as the preconditioning matrix.
233: call SLESSetOperators(sles,A,A,DIFFERENT_NONZERO_PATTERN, &
234: & ierr)
236: ! Set linear solver defaults for this problem (optional).
237: ! - By extracting the KSP and PC contexts from the SLES context,
238: ! we can then directly directly call any KSP and PC routines
239: ! to set various options.
240: ! - The following four statements are optional; all of these
241: ! parameters could alternatively be specified at runtime via
242: ! SLESSetFromOptions(). All of these defaults can be
243: ! overridden at runtime, as indicated below.
245: ! We comment out this section of code since the Jacobi
246: ! preconditioner is not a good general default.
248: ! call SLESGetPC(sles,pc,ierr)
249: ! ptype = PCJACOBI
250: ! call PCSetType(pc,ptype,ierr)
251: ! call SLESGetKSP(sles,ksp,ierr)
252: ! tol = 1.e-7
253: ! call KSPSetTolerances(ksp,tol,PETSC_DEFAULT_DOUBLE_PRECISION,
254: ! & PETSC_DEFAULT_DOUBLE_PRECISION,PETSC_DEFAULT_INTEGER,ierr)
256: ! Set user-defined monitoring routine if desired
258: call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-my_ksp_monitor', &
259: & flg,ierr)
260: if (flg .eq. 1) then
261: call SLESGetKSP(sles,ksp,ierr)
262: call KSPSetMonitor(ksp,MyKSPMonitor,PETSC_NULL_OBJECT, &
263: & PETSC_NULL_FUNCTION,ierr)
264: endif
267: ! Set runtime options, e.g.,
268: ! -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
269: ! These options will override those specified above as long as
270: ! SLESSetFromOptions() is called _after_ any other customization
271: ! routines.
273: call SLESSetFromOptions(sles,ierr)
275: ! Set convergence test routine if desired
277: call PetscOptionsHasName(PETSC_NULL_CHARACTER, &
278: & '-my_ksp_convergence',flg,ierr)
279: if (flg .eq. 1) then
280: call SLESGetKSP(sles,ksp,ierr)
281: call KSPSetConvergenceTest(ksp,MyKSPConverged, &
282: & PETSC_NULL_OBJECT,ierr)
283: endif
284: !
285: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
286: ! Solve the linear system
287: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
289: call SLESSolve(sles,b,x,its,ierr)
291: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
292: ! Check solution and clean up
293: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
295: ! Check the error
297: call VecAXPY(neg_one,u,x,ierr)
298: call VecNorm(x,NORM_2,norm,ierr)
299: if (rank .eq. 0) then
300: if (norm .gt. 1.e-12) then
301: write(6,100) norm,its
302: else
303: write(6,110) its
304: endif
305: endif
306: 100 format('Norm of error ',e10.4,' iterations ',i5)
307: 110 format('Norm of error < 1.e-12,iterations ',i5)
309: ! Free work space. All PETSc objects should be destroyed when they
310: ! are no longer needed.
312: call SLESDestroy(sles,ierr)
313: call VecDestroy(u,ierr)
314: call VecDestroy(x,ierr)
315: call VecDestroy(b,ierr)
316: call MatDestroy(A,ierr)
318: ! Always call PetscFinalize() before exiting a program. This routine
319: ! - finalizes the PETSc libraries as well as MPI
320: ! - provides summary and diagnostic information if certain runtime
321: ! options are chosen (e.g., -log_summary). See PetscFinalize()
322: ! manpage for more information.
324: call PetscFinalize(ierr)
325: end
326: ! --------------------------------------------------------------
327: !
328: ! MyKSPMonitor - This is a user-defined routine for monitoring
329: ! the SLES iterative solvers.
330: !
331: ! Input Parameters:
332: ! ksp - iterative context
333: ! n - iteration number
334: ! rnorm - 2-norm (preconditioned) residual value (may be estimated)
335: ! dummy - optional user-defined monitor context (unused here)
336: !
337: subroutine MyKSPMonitor(ksp,n,rnorm,dummy,ierr)
339: implicit none
341: #include include/finclude/petsc.h
342: #include include/finclude/petscvec.h
343: #include include/finclude/petscksp.h
345: KSP ksp
346: Vec x
347: integer ierr,n,dummy,rank
348: double precision rnorm
350: ! Build the solution vector
352: call KSPBuildSolution(ksp,PETSC_NULL_OBJECT,x,ierr)
354: ! Write the solution vector and residual norm to stdout
355: ! - Note that the parallel viewer PETSC_VIEWER_STDOUT_WORLD
356: ! handles data from multiple processors so that the
357: ! output is not jumbled.
359: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
360: if (rank .eq. 0) write(6,100) n
361: call VecView(x,PETSC_VIEWER_STDOUT_WORLD,ierr)
362: if (rank .eq. 0) write(6,200) n,rnorm
364: 100 format('iteration ',i5,' solution vector:')
365: 200 format('iteration ',i5,' residual norm ',e10.4)
366: 0
367: end
369: ! --------------------------------------------------------------
370: !
371: ! MyKSPConverged - This is a user-defined routine for testing
372: ! convergence of the SLES iterative solvers.
373: !
374: ! Input Parameters:
375: ! ksp - iterative context
376: ! n - iteration number
377: ! rnorm - 2-norm (preconditioned) residual value (may be estimated)
378: ! dummy - optional user-defined monitor context (unused here)
379: !
380: subroutine MyKSPConverged(ksp,n,rnorm,flag,dummy,ierr)
382: implicit none
384: #include include/finclude/petsc.h
385: #include include/finclude/petscvec.h
386: #include include/finclude/petscksp.h
388: KSP ksp
389: integer ierr,n,dummy,flag
390: double precision rnorm
392: if (rnorm .le. .05) then
393: flag = 1
394: else
395: flag = 0
396: endif
397: 0
399: end