Actual source code: ex2.c
petsc-3.7.5 2017-01-01
2: static char help[] = "Solves a linear system in parallel with KSP.\n\
3: Input parameters include:\n\
4: -random_exact_sol : use a random exact solution vector\n\
5: -view_exact_sol : write exact solution vector to stdout\n\
6: -m <mesh_x> : number of mesh points in x-direction\n\
7: -n <mesh_n> : number of mesh points in y-direction\n\n";
9: /*T
10: Concepts: KSP^basic parallel example;
11: Concepts: KSP^Laplacian, 2d
12: Concepts: Laplacian, 2d
13: Processors: n
14: T*/
16: /*
17: Include "petscksp.h" so that we can use KSP solvers. Note that this file
18: automatically includes:
19: petscsys.h - base PETSc routines petscvec.h - vectors
20: petscmat.h - matrices
21: petscis.h - index sets petscksp.h - Krylov subspace methods
22: petscviewer.h - viewers petscpc.h - preconditioners
23: */
24: #include <petscksp.h>
28: int main(int argc,char **args)
29: {
30: Vec x,b,u; /* approx solution, RHS, exact solution */
31: Mat A; /* linear system matrix */
32: KSP ksp; /* linear solver context */
33: PetscRandom rctx; /* random number generator context */
34: PetscReal norm; /* norm of solution error */
35: PetscInt i,j,Ii,J,Istart,Iend,m = 8,n = 7,its;
37: PetscBool flg = PETSC_FALSE;
38: PetscScalar v;
39: #if defined(PETSC_USE_LOG)
40: PetscLogStage stage;
41: #endif
43: PetscInitialize(&argc,&args,(char*)0,help);
44: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
45: PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL);
46: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
47: Compute the matrix and right-hand-side vector that define
48: the linear system, Ax = b.
49: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
50: /*
51: Create parallel matrix, specifying only its global dimensions.
52: When using MatCreate(), the matrix format can be specified at
53: runtime. Also, the parallel partitioning of the matrix is
54: determined by PETSc at runtime.
56: Performance tuning note: For problems of substantial size,
57: preallocation of matrix memory is crucial for attaining good
58: performance. See the matrix chapter of the users manual for details.
59: */
60: MatCreate(PETSC_COMM_WORLD,&A);
61: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
62: MatSetFromOptions(A);
63: MatMPIAIJSetPreallocation(A,5,NULL,5,NULL);
64: MatSeqAIJSetPreallocation(A,5,NULL);
65: MatSeqSBAIJSetPreallocation(A,1,5,NULL);
67: /*
68: Currently, all PETSc parallel matrix formats are partitioned by
69: contiguous chunks of rows across the processors. Determine which
70: rows of the matrix are locally owned.
71: */
72: MatGetOwnershipRange(A,&Istart,&Iend);
74: /*
75: Set matrix elements for the 2-D, five-point stencil in parallel.
76: - Each processor needs to insert only elements that it owns
77: locally (but any non-local elements will be sent to the
78: appropriate processor during matrix assembly).
79: - Always specify global rows and columns of matrix entries.
81: Note: this uses the less common natural ordering that orders first
82: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
83: instead of J = I +- m as you might expect. The more standard ordering
84: would first do all variables for y = h, then y = 2h etc.
86: */
87: PetscLogStageRegister("Assembly", &stage);
88: PetscLogStagePush(stage);
89: for (Ii=Istart; Ii<Iend; Ii++) {
90: v = -1.0; i = Ii/n; j = Ii - i*n;
91: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
92: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
93: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
94: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,ADD_VALUES);}
95: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,ADD_VALUES);
96: }
98: /*
99: Assemble matrix, using the 2-step process:
100: MatAssemblyBegin(), MatAssemblyEnd()
101: Computations can be done while messages are in transition
102: by placing code between these two statements.
103: */
104: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
105: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
106: PetscLogStagePop();
108: /* A is symmetric. Set symmetric flag to enable ICC/Cholesky preconditioner */
109: MatSetOption(A,MAT_SYMMETRIC,PETSC_TRUE);
111: /*
112: Create parallel vectors.
113: - We form 1 vector from scratch and then duplicate as needed.
114: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
115: in this example, we specify only the
116: vector's global dimension; the parallel partitioning is determined
117: at runtime.
118: - When solving a linear system, the vectors and matrices MUST
119: be partitioned accordingly. PETSc automatically generates
120: appropriately partitioned matrices and vectors when MatCreate()
121: and VecCreate() are used with the same communicator.
122: - The user can alternatively specify the local vector and matrix
123: dimensions when more sophisticated partitioning is needed
124: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
125: below).
126: */
127: VecCreate(PETSC_COMM_WORLD,&u);
128: VecSetSizes(u,PETSC_DECIDE,m*n);
129: VecSetFromOptions(u);
130: VecDuplicate(u,&b);
131: VecDuplicate(b,&x);
133: /*
134: Set exact solution; then compute right-hand-side vector.
135: By default we use an exact solution of a vector with all
136: elements of 1.0; Alternatively, using the runtime option
137: -random_sol forms a solution vector with random components.
138: */
139: PetscOptionsGetBool(NULL,NULL,"-random_exact_sol",&flg,NULL);
140: if (flg) {
141: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
142: PetscRandomSetFromOptions(rctx);
143: VecSetRandom(u,rctx);
144: PetscRandomDestroy(&rctx);
145: } else {
146: VecSet(u,1.0);
147: }
148: MatMult(A,u,b);
150: /*
151: View the exact solution vector if desired
152: */
153: flg = PETSC_FALSE;
154: PetscOptionsGetBool(NULL,NULL,"-view_exact_sol",&flg,NULL);
155: if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
157: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
158: Create the linear solver and set various options
159: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
161: /*
162: Create linear solver context
163: */
164: KSPCreate(PETSC_COMM_WORLD,&ksp);
166: /*
167: Set operators. Here the matrix that defines the linear system
168: also serves as the preconditioning matrix.
169: */
170: KSPSetOperators(ksp,A,A);
172: /*
173: Set linear solver defaults for this problem (optional).
174: - By extracting the KSP and PC contexts from the KSP context,
175: we can then directly call any KSP and PC routines to set
176: various options.
177: - The following two statements are optional; all of these
178: parameters could alternatively be specified at runtime via
179: KSPSetFromOptions(). All of these defaults can be
180: overridden at runtime, as indicated below.
181: */
182: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
183: PETSC_DEFAULT);
185: /*
186: Set runtime options, e.g.,
187: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
188: These options will override those specified above as long as
189: KSPSetFromOptions() is called _after_ any other customization
190: routines.
191: */
192: KSPSetFromOptions(ksp);
194: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
195: Solve the linear system
196: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
198: KSPSolve(ksp,b,x);
200: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
201: Check solution and clean up
202: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
204: /*
205: Check the error
206: */
207: VecAXPY(x,-1.0,u);
208: VecNorm(x,NORM_2,&norm);
209: KSPGetIterationNumber(ksp,&its);
211: /*
212: Print convergence information. PetscPrintf() produces a single
213: print statement from all processes that share a communicator.
214: An alternative is PetscFPrintf(), which prints to a file.
215: */
216: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g iterations %D\n",(double)norm,its);
218: /*
219: Free work space. All PETSc objects should be destroyed when they
220: are no longer needed.
221: */
222: KSPDestroy(&ksp);
223: VecDestroy(&u); VecDestroy(&x);
224: VecDestroy(&b); MatDestroy(&A);
226: /*
227: Always call PetscFinalize() before exiting a program. This routine
228: - finalizes the PETSc libraries as well as MPI
229: - provides summary and diagnostic information if certain runtime
230: options are chosen (e.g., -log_summary).
231: */
232: PetscFinalize();
233: return 0;
234: }