Actual source code: ex5s.c
1: /*$Id: ex5s.c,v 1.29 2001/08/07 21:31:17 bsmith Exp $*/
3: static char help[] = "2d Bratu problem in shared memory parallel with SNES.n
4: We solve the Bratu (SFI - solid fuel ignition) problem in a 2D rectangularn
5: domain, uses SHARED MEMORY to evaluate the user function.n
6: The command line options include:n
7: -par <parameter>, where <parameter> indicates the problem's nonlinearityn
8: problem SFI: <parameter> = Bratu parameter (0 <= par <= 6.81)n
9: -mx <xg>, where <xg> = number of grid points in the x-directionn
10: -my <yg>, where <yg> = number of grid points in the y-directionn
11: -use_fortran_function: use Fortran coded function, rather than Cn";
13: /*
14: This code compiles ONLY on SGI systems
15: ========================================
16: */
17: /*T
18: Concepts: SNES^parallel Bratu example
19: Concepts: shared memory
20: Processors: n
21: T*/
23: /*
25: Programming model: Combination of
26: 1) MPI message passing for PETSc routines
27: 2) automatic loop parallism (using shared memory) for user
28: provided function.
30: While the user function is being evaluated all MPI processes except process
31: 0 blocks. Process zero spawns nt threads to evaluate the user function. Once
32: the user function is complete, the worker threads are suspended and all the MPI processes
33: continue.
35: Other useful options:
37: -snes_mf : use matrix free operator and no preconditioner
38: -snes_mf_operator : use matrix free operator but compute Jacobian via
39: finite differences to form preconditioner
41: Environmental variable:
43: setenv MPC_NUM_THREADS nt <- set number of threads processor 0 should
44: use to evaluate user provided function
46: Note: The number of MPI processes (set with the mpirun option -np) can
47: be set completely independently from the number of threads process 0
48: uses to evaluate the function (though usually one would make them the same).
49: */
50:
51: /* ------------------------------------------------------------------------
53: Solid Fuel Ignition (SFI) problem. This problem is modeled by
54: the partial differential equation
55:
56: -Laplacian u - lambda*exp(u) = 0, 0 < x,y < 1,
57:
58: with boundary conditions
59:
60: u = 0 for x = 0, x = 1, y = 0, y = 1.
61:
62: A finite difference approximation with the usual 5-point stencil
63: is used to discretize the boundary value problem to obtain a nonlinear
64: system of equations.
66: The uniprocessor version of this code is snes/examples/tutorials/ex4.c
67: A parallel distributed memory version is snes/examples/tutorials/ex5.c and ex5f.F
69: ------------------------------------------------------------------------- */
71: /*
72: Include "petscsnes.h" so that we can use SNES solvers. Note that this
73: file automatically includes:
74: petsc.h - base PETSc routines petscvec.h - vectors
75: petscsys.h - system routines petscmat.h - matrices
76: petscis.h - index sets petscksp.h - Krylov subspace methods
77: petscviewer.h - viewers petscpc.h - preconditioners
78: petscsles.h - linear solvers
79: */
80: #include petscsnes.h
82: /*
83: User-defined application context - contains data needed by the
84: application-provided call-back routines FormFunction().
85: */
86: typedef struct {
87: PetscReal param; /* test problem parameter */
88: int mx,my; /* discretization in x, y directions */
89: int rank; /* processor rank */
90: } AppCtx;
92: /*
93: User-defined routines
94: */
95: extern int FormFunction(SNES,Vec,Vec,void*),FormInitialGuess(AppCtx*,Vec);
96: extern int FormFunctionFortran(SNES,Vec,Vec,void*);
98: #undef __FUNCT__
100: /*
101: The main program is written in C while the user provided function
102: is given in both Fortran and C. The main program could also be written
103: in Fortran; the ONE PROBLEM is that VecGetArray() cannot be called from
104: Fortran on the SGI machines; thus the routine FormFunctionFortran() must
105: be written in C.
106: */
107: int main(int argc,char **argv)
108: {
109: SNES snes; /* nonlinear solver */
110: Vec x,r; /* solution, residual vectors */
111: AppCtx user; /* user-defined work context */
112: int its; /* iterations for convergence */
113: int N,ierr,rstart,rend,*colors,i,ii,ri,rj;
114: int (*fnc)(SNES,Vec,Vec,void*);
115: PetscReal bratu_lambda_max = 6.81,bratu_lambda_min = 0.;
116: MatFDColoring fdcoloring;
117: ISColoring iscoloring;
118: Mat J;
119: PetscScalar zero = 0.0;
120: PetscTruth flg;
122: PetscInitialize(&argc,&argv,(char *)0,help);
123: MPI_Comm_rank(PETSC_COMM_WORLD,&user.rank);
125: /*
126: Initialize problem parameters
127: */
128: user.mx = 4; user.my = 4; user.param = 6.0;
129: PetscOptionsGetInt(PETSC_NULL,"-mx",&user.mx,PETSC_NULL);
130: PetscOptionsGetInt(PETSC_NULL,"-my",&user.my,PETSC_NULL);
131: PetscOptionsGetReal(PETSC_NULL,"-par",&user.param,PETSC_NULL);
132: if (user.param >= bratu_lambda_max || user.param <= bratu_lambda_min) {
133: SETERRQ(1,"Lambda is out of range");
134: }
135: N = user.mx*user.my;
137: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
138: Create nonlinear solver context
139: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
141: SNESCreate(PETSC_COMM_WORLD,&snes);
143: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
144: Create vector data structures; set function evaluation routine
145: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
147: /*
148: The routine VecCreateShared() creates a parallel vector with each processor
149: assigned its own segment, BUT, in addition, the first processor has access to the
150: entire array. This is to allow the users function to be based on loop level
151: parallelism rather than MPI.
152: */
153: VecCreateShared(PETSC_COMM_WORLD,PETSC_DECIDE,N,&x);
154: VecDuplicate(x,&r);
156: PetscOptionsHasName(PETSC_NULL,"-use_fortran_function",&flg);
157: if (flg) {
158: fnc = FormFunctionFortran;
159: } else {
160: fnc = FormFunction;
161: }
163: /*
164: Set function evaluation routine and vector
165: */
166: SNESSetFunction(snes,r,fnc,&user);
168: /*
169: Currently when using VecCreateShared() and using loop level parallelism
170: to automatically parallelise the user function it makes no sense for the
171: Jacobian to be computed via loop level parallelism, because all the threads
172: would be simultaneously calling MatSetValues() causing a bottle-neck.
174: Thus this example uses the PETSc Jacobian calculations via finite differencing
175: to approximate the Jacobian
176: */
178: /*
180: */
181: VecGetOwnershipRange(r,&rstart,&rend);
182: PetscMalloc((rend-rstart)*sizeof(int),&colors);
183: for (i=rstart; i<rend; i++) {
184: colors[i - rstart] = 3*((i/user.mx) % 3) + (i % 3);
185: }
186: ierr = ISColoringCreate(PETSC_COMM_WORLD,rend-rstart,colors,&iscoloring);
187: PetscFree(colors);
189: /*
190: Create and set the nonzero pattern for the Jacobian: This is not done
191: particularly efficiently. One should process the boundary nodes seperately and
192: then use a simple loop for the interior nodes.
193: Note that for this code we use the "natural" number of the nodes on the
194: grid (since that is what is good for the user provided function). In the
195: DA examples we must use the DA numbering where each processor is assigned a
196: chunk of data.
197: */
198: MatCreateMPIAIJ(PETSC_COMM_WORLD,rend-rstart,rend-rstart,N,
199: N,5,0,0,0,&J);
200: for (i=rstart; i<rend; i++) {
201: rj = i % user.mx; /* column in grid */
202: ri = i / user.mx; /* row in grid */
203: if (ri != 0) { /* first row does not have neighbor below */
204: ii = i - user.mx;
205: MatSetValues(J,1,&i,1,&ii,&zero,INSERT_VALUES);
206: }
207: if (ri != user.my - 1) { /* last row does not have neighbors above */
208: ii = i + user.mx;
209: MatSetValues(J,1,&i,1,&ii,&zero,INSERT_VALUES);
210: }
211: if (rj != 0) { /* first column does not have neighbor to left */
212: ii = i - 1;
213: MatSetValues(J,1,&i,1,&ii,&zero,INSERT_VALUES);
214: }
215: if (rj != user.mx - 1) { /* last column does not have neighbor to right */
216: ii = i + 1;
217: MatSetValues(J,1,&i,1,&ii,&zero,INSERT_VALUES);
218: }
219: MatSetValues(J,1,&i,1,&i,&zero,INSERT_VALUES);
220: }
221: MatAssemblyBegin(J,MAT_FINAL_ASSEMBLY);
222: MatAssemblyEnd(J,MAT_FINAL_ASSEMBLY);
224: /*
225: Create the data structure that SNESDefaultComputeJacobianColor() uses
226: to compute the actual Jacobians via finite differences.
227: */
228: MatFDColoringCreate(J,iscoloring,&fdcoloring);
229: MatFDColoringSetFunction(fdcoloring,(int (*)(void))fnc,&user);
230: MatFDColoringSetFromOptions(fdcoloring);
231: /*
232: Tell SNES to use the routine SNESDefaultComputeJacobianColor()
233: to compute Jacobians.
234: */
235: SNESSetJacobian(snes,J,J,SNESDefaultComputeJacobianColor,fdcoloring);
236: ISColoringDestroy(iscoloring);
239: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
240: Customize nonlinear solver; set runtime options
241: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
243: /*
244: Set runtime options (e.g., -snes_monitor -snes_rtol <rtol> -ksp_type <type>)
245: */
246: SNESSetFromOptions(snes);
248: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
249: Evaluate initial guess; then solve nonlinear system
250: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
251: /*
252: Note: The user should initialize the vector, x, with the initial guess
253: for the nonlinear solver prior to calling SNESSolve(). In particular,
254: to employ an initial guess of zero, the user should explicitly set
255: this vector to zero by calling VecSet().
256: */
257: FormInitialGuess(&user,x);
258: SNESSolve(snes,x,&its);
259: PetscPrintf(PETSC_COMM_WORLD,"Number of Newton iterations = %dn",its);
261: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
262: Free work space. All PETSc objects should be destroyed when they
263: are no longer needed.
264: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
265: VecDestroy(x);
266: VecDestroy(r);
267: SNESDestroy(snes);
268: PetscFinalize();
270: return 0;
271: }
272: /* ------------------------------------------------------------------- */
274: #undef __FUNCT__
276: /*
277: FormInitialGuess - Forms initial approximation.
279: Input Parameters:
280: user - user-defined application context
281: X - vector
283: Output Parameter:
284: X - vector
285: */
286: int FormInitialGuess(AppCtx *user,Vec X)
287: {
288: int i,j,row,mx,my,ierr;
289: PetscReal one = 1.0,lambda,temp1,temp,hx,hy,hxdhy,hydhx,sc;
290: PetscScalar *x;
292: /*
293: Process 0 has to wait for all other processes to get here
294: before proceeding to write in the shared vector
295: */
296: PetscBarrier((PetscObject)X);
297: if (user->rank) {
298: /*
299: All the non-busy processors have to wait here for process 0 to finish
300: evaluating the function; otherwise they will start using the vector values
301: before they have been computed
302: */
303: PetscBarrier((PetscObject)X);
304: return 0;
305: }
307: mx = user->mx; my = user->my; lambda = user->param;
308: hx = one/(PetscReal)(mx-1); hy = one/(PetscReal)(my-1);
309: sc = hx*hy*lambda; hxdhy = hx/hy; hydhx = hy/hx;
310: temp1 = lambda/(lambda + one);
312: /*
313: Get a pointer to vector data.
314: - For default PETSc vectors, VecGetArray() returns a pointer to
315: the data array. Otherwise, the routine is implementation dependent.
316: - You MUST call VecRestoreArray() when you no longer need access to
317: the array.
318: */
319: VecGetArray(X,&x);
321: /*
322: Compute initial guess over the locally owned part of the grid
323: */
324: #pragma arl(4)
325: #pragma distinct (*x,*f)
326: #pragma no side effects (sqrt)
327: for (j=0; j<my; j++) {
328: temp = (PetscReal)(PetscMin(j,my-j-1))*hy;
329: for (i=0; i<mx; i++) {
330: row = i + j*mx;
331: if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
332: x[row] = 0.0;
333: continue;
334: }
335: x[row] = temp1*sqrt(PetscMin((PetscReal)(PetscMin(i,mx-i-1))*hx,temp));
336: }
337: }
339: /*
340: Restore vector
341: */
342: VecRestoreArray(X,&x);
344: PetscBarrier((PetscObject)X);
345: return 0;
346: }
347: /* ------------------------------------------------------------------- */
348: #undef __FUNCT__
350: /*
351: FormFunction - Evaluates nonlinear function, F(x).
353: Input Parameters:
354: . snes - the SNES context
355: . X - input vector
356: . ptr - optional user-defined context, as set by SNESSetFunction()
358: Output Parameter:
359: . F - function vector
360: */
361: int FormFunction(SNES snes,Vec X,Vec F,void *ptr)
362: {
363: AppCtx *user = (AppCtx*)ptr;
364: int ierr,i,j,row,mx,my;
365: PetscReal two = 2.0,one = 1.0,lambda,hx,hy,hxdhy,hydhx,sc;
366: PetscScalar u,uxx,uyy,*x,*f;
368: /*
369: Process 0 has to wait for all other processes to get here
370: before proceeding to write in the shared vector
371: */
372: PetscBarrier((PetscObject)X);
374: if (user->rank) {
375: /*
376: All the non-busy processors have to wait here for process 0 to finish
377: evaluating the function; otherwise they will start using the vector values
378: before they have been computed
379: */
380: PetscBarrier((PetscObject)X);
381: return 0;
382: }
384: mx = user->mx; my = user->my; lambda = user->param;
385: hx = one/(PetscReal)(mx-1); hy = one/(PetscReal)(my-1);
386: sc = hx*hy*lambda; hxdhy = hx/hy; hydhx = hy/hx;
388: /*
389: Get pointers to vector data
390: */
391: VecGetArray(X,&x);
392: VecGetArray(F,&f);
394: /*
395: The next line tells the SGI compiler that x and f contain no overlapping
396: regions and thus it can use addition optimizations.
397: */
398: #pragma arl(4)
399: #pragma distinct (*x,*f)
400: #pragma no side effects (exp)
402: /*
403: Compute function over the entire grid
404: */
405: for (j=0; j<my; j++) {
406: for (i=0; i<mx; i++) {
407: row = i + j*mx;
408: if (i == 0 || j == 0 || i == mx-1 || j == my-1) {
409: f[row] = x[row];
410: continue;
411: }
412: u = x[row];
413: uxx = (two*u - x[row-1] - x[row+1])*hydhx;
414: uyy = (two*u - x[row-mx] - x[row+mx])*hxdhy;
415: f[row] = uxx + uyy - sc*exp(u);
416: }
417: }
419: /*
420: Restore vectors
421: */
422: VecRestoreArray(X,&x);
423: VecRestoreArray(F,&f);
425: PetscLogFlops(11*(mx-2)*(my-2))
426: PetscBarrier((PetscObject)X);
427: return 0;
428: }
430: #if defined(PETSC_HAVE_FORTRAN_CAPS)
431: #define applicationfunctionfortran_ APPLICATIONFUNCTIONFORTRAN
432: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
433: #define applicationfunctionfortran_ applicationfunctionfortran
434: #endif
436: /* ------------------------------------------------------------------- */
437: #undef __FUNCT__
439: /*
440: FormFunctionFortran - Evaluates nonlinear function, F(x) in Fortran.
442: */
443: int FormFunctionFortran(SNES snes,Vec X,Vec F,void *ptr)
444: {
445: AppCtx *user = (AppCtx*)ptr;
446: int ierr;
447: PetscScalar *x,*f;
449: /*
450: Process 0 has to wait for all other processes to get here
451: before proceeding to write in the shared vector
452: */
453: PetscBarrier((PetscObject)snes);
454: if (!user->rank) {
455: VecGetArray(X,&x);
456: VecGetArray(F,&f);
457: applicationfunctionfortran_(&user->param,&user->mx,&user->my,x,f,&ierr);
458: VecRestoreArray(X,&x);
459: VecRestoreArray(F,&f);
460: PetscLogFlops(11*(user->mx-2)*(user->my-2))
461: }
462: /*
463: All the non-busy processors have to wait here for process 0 to finish
464: evaluating the function; otherwise they will start using the vector values
465: before they have been computed
466: */
467: PetscBarrier((PetscObject)snes);
468: return 0;
469: }