Actual source code: ex5.c
petsc-3.7.5 2017-01-01
2: static char help[] = "Solves two linear systems in parallel with KSP. The code\n\
3: illustrates repeated solution of linear systems with the same preconditioner\n\
4: method but different matrices (having the same nonzero structure). The code\n\
5: also uses multiple profiling stages. Input arguments are\n\
6: -m <size> : problem size\n\
7: -mat_nonsym : use nonsymmetric matrix (default is symmetric)\n\n";
9: /*T
10: Concepts: KSP^repeatedly solving linear systems;
11: Concepts: PetscLog^profiling multiple stages of code;
12: Processors: n
13: T*/
15: /*
16: Include "petscksp.h" so that we can use KSP solvers. Note that this file
17: automatically includes:
18: petscsys.h - base PETSc routines petscvec.h - vectors
19: petscmat.h - matrices
20: petscis.h - index sets petscksp.h - Krylov subspace methods
21: petscviewer.h - viewers petscpc.h - preconditioners
22: */
23: #include <petscksp.h>
27: int main(int argc,char **args)
28: {
29: KSP ksp; /* linear solver context */
30: Mat C; /* matrix */
31: Vec x,u,b; /* approx solution, RHS, exact solution */
32: PetscReal norm; /* norm of solution error */
33: PetscScalar v,none = -1.0;
34: PetscInt Ii,J,ldim,low,high,iglobal,Istart,Iend;
36: PetscInt i,j,m = 3,n = 2,its;
37: PetscMPIInt size,rank;
38: PetscBool mat_nonsymmetric = PETSC_FALSE;
39: PetscBool testnewC = PETSC_FALSE;
40: #if defined(PETSC_USE_LOG)
41: PetscLogStage stages[2];
42: #endif
44: PetscInitialize(&argc,&args,(char*)0,help);
45: PetscOptionsGetInt(NULL,NULL,"-m",&m,NULL);
46: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
47: MPI_Comm_size(PETSC_COMM_WORLD,&size);
48: n = 2*size;
50: /*
51: Set flag if we are doing a nonsymmetric problem; the default is symmetric.
52: */
53: PetscOptionsGetBool(NULL,NULL,"-mat_nonsym",&mat_nonsymmetric,NULL);
55: /*
56: Register two stages for separate profiling of the two linear solves.
57: Use the runtime option -log_summary for a printout of performance
58: statistics at the program's conlusion.
59: */
60: PetscLogStageRegister("Original Solve",&stages[0]);
61: PetscLogStageRegister("Second Solve",&stages[1]);
63: /* -------------- Stage 0: Solve Original System ---------------------- */
64: /*
65: Indicate to PETSc profiling that we're beginning the first stage
66: */
67: PetscLogStagePush(stages[0]);
69: /*
70: Create parallel matrix, specifying only its global dimensions.
71: When using MatCreate(), the matrix format can be specified at
72: runtime. Also, the parallel partitioning of the matrix is
73: determined by PETSc at runtime.
74: */
75: MatCreate(PETSC_COMM_WORLD,&C);
76: MatSetSizes(C,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
77: MatSetFromOptions(C);
78: MatSetUp(C);
80: /*
81: Currently, all PETSc parallel matrix formats are partitioned by
82: contiguous chunks of rows across the processors. Determine which
83: rows of the matrix are locally owned.
84: */
85: MatGetOwnershipRange(C,&Istart,&Iend);
87: /*
88: Set matrix entries matrix in parallel.
89: - Each processor needs to insert only elements that it owns
90: locally (but any non-local elements will be sent to the
91: appropriate processor during matrix assembly).
92: - Always specify global row and columns of matrix entries.
93: */
94: for (Ii=Istart; Ii<Iend; Ii++) {
95: v = -1.0; i = Ii/n; j = Ii - i*n;
96: if (i>0) {J = Ii - n; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
97: if (i<m-1) {J = Ii + n; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
98: if (j>0) {J = Ii - 1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
99: if (j<n-1) {J = Ii + 1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
100: v = 4.0; MatSetValues(C,1,&Ii,1,&Ii,&v,ADD_VALUES);
101: }
103: /*
104: Make the matrix nonsymmetric if desired
105: */
106: if (mat_nonsymmetric) {
107: for (Ii=Istart; Ii<Iend; Ii++) {
108: v = -1.5; i = Ii/n;
109: if (i>1) {J = Ii-n-1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
110: }
111: } else {
112: MatSetOption(C,MAT_SYMMETRIC,PETSC_TRUE);
113: MatSetOption(C,MAT_SYMMETRY_ETERNAL,PETSC_TRUE);
114: }
116: /*
117: Assemble matrix, using the 2-step process:
118: MatAssemblyBegin(), MatAssemblyEnd()
119: Computations can be done while messages are in transition
120: by placing code between these two statements.
121: */
122: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
123: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
125: /*
126: Create parallel vectors.
127: - When using VecSetSizes(), we specify only the vector's global
128: dimension; the parallel partitioning is determined at runtime.
129: - Note: We form 1 vector from scratch and then duplicate as needed.
130: */
131: VecCreate(PETSC_COMM_WORLD,&u);
132: VecSetSizes(u,PETSC_DECIDE,m*n);
133: VecSetFromOptions(u);
134: VecDuplicate(u,&b);
135: VecDuplicate(b,&x);
137: /*
138: Currently, all parallel PETSc vectors are partitioned by
139: contiguous chunks across the processors. Determine which
140: range of entries are locally owned.
141: */
142: VecGetOwnershipRange(x,&low,&high);
144: /*
145: Set elements within the exact solution vector in parallel.
146: - Each processor needs to insert only elements that it owns
147: locally (but any non-local entries will be sent to the
148: appropriate processor during vector assembly).
149: - Always specify global locations of vector entries.
150: */
151: VecGetLocalSize(x,&ldim);
152: for (i=0; i<ldim; i++) {
153: iglobal = i + low;
154: v = (PetscScalar)(i + 100*rank);
155: VecSetValues(u,1,&iglobal,&v,INSERT_VALUES);
156: }
158: /*
159: Assemble vector, using the 2-step process:
160: VecAssemblyBegin(), VecAssemblyEnd()
161: Computations can be done while messages are in transition,
162: by placing code between these two statements.
163: */
164: VecAssemblyBegin(u);
165: VecAssemblyEnd(u);
167: /*
168: Compute right-hand-side vector
169: */
170: MatMult(C,u,b);
172: /*
173: Create linear solver context
174: */
175: KSPCreate(PETSC_COMM_WORLD,&ksp);
177: /*
178: Set operators. Here the matrix that defines the linear system
179: also serves as the preconditioning matrix.
180: */
181: KSPSetOperators(ksp,C,C);
183: /*
184: Set runtime options (e.g., -ksp_type <type> -pc_type <type>)
185: */
186: KSPSetFromOptions(ksp);
188: /*
189: Solve linear system. Here we explicitly call KSPSetUp() for more
190: detailed performance monitoring of certain preconditioners, such
191: as ICC and ILU. This call is optional, as KSPSetUp() will
192: automatically be called within KSPSolve() if it hasn't been
193: called already.
194: */
195: KSPSetUp(ksp);
196: KSPSolve(ksp,b,x);
198: /*
199: Check the error
200: */
201: VecAXPY(x,none,u);
202: VecNorm(x,NORM_2,&norm);
203: KSPGetIterationNumber(ksp,&its);
204: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g, Iterations %D\n",(double)norm,its);
206: /* -------------- Stage 1: Solve Second System ---------------------- */
207: /*
208: Solve another linear system with the same method. We reuse the KSP
209: context, matrix and vector data structures, and hence save the
210: overhead of creating new ones.
212: Indicate to PETSc profiling that we're concluding the first
213: stage with PetscLogStagePop(), and beginning the second stage with
214: PetscLogStagePush().
215: */
216: PetscLogStagePop();
217: PetscLogStagePush(stages[1]);
219: /*
220: Initialize all matrix entries to zero. MatZeroEntries() retains the
221: nonzero structure of the matrix for sparse formats.
222: */
223: MatZeroEntries(C);
225: /*
226: Assemble matrix again. Note that we retain the same matrix data
227: structure and the same nonzero pattern; we just change the values
228: of the matrix entries.
229: */
230: for (i=0; i<m; i++) {
231: for (j=2*rank; j<2*rank+2; j++) {
232: v = -1.0; Ii = j + n*i;
233: if (i>0) {J = Ii - n; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
234: if (i<m-1) {J = Ii + n; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
235: if (j>0) {J = Ii - 1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
236: if (j<n-1) {J = Ii + 1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
237: v = 6.0; MatSetValues(C,1,&Ii,1,&Ii,&v,ADD_VALUES);
238: }
239: }
240: if (mat_nonsymmetric) {
241: for (Ii=Istart; Ii<Iend; Ii++) {
242: v = -1.5; i = Ii/n;
243: if (i>1) {J = Ii-n-1; MatSetValues(C,1,&Ii,1,&J,&v,ADD_VALUES);}
244: }
245: }
246: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
247: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
249: PetscOptionsGetBool(NULL,NULL,"-test_newMat",&testnewC,NULL);
250: if (testnewC) {
251: /*
252: User may use a new matrix C with same nonzero pattern, e.g.
253: ./ex5 -ksp_monitor -mat_type sbaij -pc_type cholesky -pc_factor_mat_solver_package mumps -test_newMat
254: */
255: Mat Ctmp;
256: MatDuplicate(C,MAT_COPY_VALUES,&Ctmp);
257: MatDestroy(&C);
258: MatDuplicate(Ctmp,MAT_COPY_VALUES,&C);
259: MatDestroy(&Ctmp);
260: }
261: /*
262: Compute another right-hand-side vector
263: */
264: MatMult(C,u,b);
266: /*
267: Set operators. Here the matrix that defines the linear system
268: also serves as the preconditioning matrix.
269: */
270: KSPSetOperators(ksp,C,C);
272: /*
273: Solve linear system
274: */
275: KSPSetUp(ksp);
276: KSPSolve(ksp,b,x);
278: /*
279: Check the error
280: */
281: VecAXPY(x,none,u);
282: VecNorm(x,NORM_2,&norm);
283: KSPGetIterationNumber(ksp,&its);
284: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %g, Iterations %D\n",(double)norm,its);
286: /*
287: Free work space. All PETSc objects should be destroyed when they
288: are no longer needed.
289: */
290: KSPDestroy(&ksp);
291: VecDestroy(&u);
292: VecDestroy(&x);
293: VecDestroy(&b);
294: MatDestroy(&C);
296: /*
297: Indicate to PETSc profiling that we're concluding the second stage
298: */
299: PetscLogStagePop();
301: PetscFinalize();
302: return 0;
303: }