Actual source code: ex14f.F

  1: !
  2: !    "$Id: ex14f.F,v 1.24 2001/08/07 03:04:00 balay Exp $";
  3: !
  4: !  Solves a nonlinear system in parallel with a user-defined
  5: !  Newton method that uses SLES to solve the linearized Newton sytems.  This solver
  6: !  is a very simplistic inexact Newton method.  The intent of this code is to
  7: !  demonstrate the repeated solution of linear sytems with the same nonzero pattern.
  8: !
  9: !  This is NOT the recommended approach for solving nonlinear problems with PETSc!
 10: !  We urge users to employ the SNES component for solving nonlinear problems whenever
 11: !  possible, as it offers many advantages over coding nonlinear solvers independently.
 12: !
 13: !  We solve the  Bratu (SFI - solid fuel ignition) problem in a 2D rectangular
 14: !  domain, using distributed arrays (DAs) to partition the parallel grid.
 15: !
 16: !  The command line options include:
 17: !  -par <parameter>, where <parameter> indicates the problem's nonlinearity
 18: !     problem SFI:  <parameter> = Bratu parameter (0 <= par <= 6.81)
 19: !  -mx <xg>, where <xg> = number of grid points in the x-direction
 20: !  -my <yg>, where <yg> = number of grid points in the y-direction
 21: !  -Nx <npx>, where <npx> = number of processors in the x-direction
 22: !  -Ny <npy>, where <npy> = number of processors in the y-direction
 23: !  -mf use matrix free for matrix vector product
 24: !
 25: !/*T
 26: !   Concepts: SLES^writing a user-defined nonlinear solver
 27: !   Concepts: DA^using distributed arrays
 28: !   Processors: n
 29: !T*/
 30: !  ------------------------------------------------------------------------
 31: !
 32: !    Solid Fuel Ignition (SFI) problem.  This problem is modeled by
 33: !    the partial differential equation
 34: !
 35: !            -Laplacian u - lambda*exp(u) = 0,  0 < x,y < 1,
 36: !
 37: !    with boundary conditions
 38: !
 39: !             u = 0  for  x = 0, x = 1, y = 0, y = 1.
 40: !
 41: !    A finite difference approximation with the usual 5-point stencil
 42: !    is used to discretize the boundary value problem to obtain a nonlinear
 43: !    system of equations.
 44: !
 45: !    The SNES version of this problem is:  snes/examples/tutorials/ex5f.F
 46: !
 47: !  -------------------------------------------------------------------------

 49:       program main
 50:       implicit none

 52: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 53: !                    Include files
 54: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 55: !
 56: !     petsc.h       - base PETSc routines   petscvec.h - vectors
 57: !     petscsys.h    - system routines       petscmat.h - matrices
 58: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
 59: !     petscviewer.h - viewers               petscpc.h  - preconditioners

 61:  #include include/finclude/petsc.h
 62:  #include include/finclude/petscis.h
 63:  #include include/finclude/petscvec.h
 64:  #include include/finclude/petscmat.h
 65:  #include include/finclude/petscpc.h
 66:  #include include/finclude/petscsles.h
 67:  #include include/finclude/petscda.h

 69:       MPI_Comm comm
 70:       SLES     sles
 71:       Vec      X,Y,F,localX,localF
 72:       Mat      J,B
 73:       DA       da
 74:       integer  Nx,Ny,flg,N,ierr,mx,my
 75:       integer  usemf,nooutput
 76:       common   /mycommon/ B,mx,my,localX,localF,da
 77: !
 78: !
 79: !      This is the routine to use for matrix-free approach
 80: !
 81:       external mymult

 83: !     --------------- Data to define nonlinear solver --------------
 84:       double precision   rtol,xtol,ttol
 85:       double precision   fnorm,ynorm,xnorm
 86:       integer            max_nonlin_its
 87:       integer            lin_its
 88:       integer            i,m
 89:       PetscScalar        mone

 91:       mone           = -1.d0
 92:       rtol           = 1.d-8
 93:       xtol           = 1.d-8
 94:       max_nonlin_its = 10

 96:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 97:       comm = PETSC_COMM_WORLD

 99: !  Initialize problem parameters

101: !
102:       mx = 4
103:       my = 4
104:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-mx',mx,flg,ierr)
105:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-my',my,flg,ierr)
106:       N = mx*my

108:       nooutput = 0
109:       call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-no_output',       &
110:      &     nooutput,ierr)

112: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
113: !     Create linear solver context
114: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

116:       call SLESCreate(comm,sles,ierr)

118: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
119: !     Create vector data structures
120: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

122: !
123: !  Create distributed array (DA) to manage parallel grid and vectors
124: !
125:       Nx = PETSC_DECIDE
126:       Ny = PETSC_DECIDE
127:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-Nx',Nx,flg,ierr)
128:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-Ny',Ny,flg,ierr)
129:       call DACreate2d(comm,DA_NONPERIODIC,DA_STENCIL_STAR,mx,           &
130:      &     my,Nx,Ny,1,1,PETSC_NULL_INTEGER,PETSC_NULL_INTEGER,          &
131:      &     da,ierr)

133: !
134: !  Extract global and local vectors from DA then duplicate for remaining
135: !  vectors that are the same types
136: !
137:        call DACreateGlobalVector(da,X,ierr)
138:        call DACreateLocalVector(da,localX,ierr)
139:        call VecDuplicate(X,F,ierr)
140:        call VecDuplicate(X,Y,ierr)
141:        call VecDuplicate(localX,localF,ierr)


144: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
145: !     Create matrix data structure for Jacobian
146: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
147: !
148: !     Note:  For the parallel case, vectors and matrices MUST be partitioned
149: !     accordingly.  When using distributed arrays (DAs) to create vectors,
150: !     the DAs determine the problem partitioning.  We must explicitly
151: !     specify the local matrix dimensions upon its creation for compatibility
152: !     with the vector distribution.  Thus, the generic MatCreate() routine
153: !     is NOT sufficient when working with distributed arrays.
154: !
155: !     Note: Here we only approximately preallocate storage space for the
156: !     Jacobian.  See the users manual for a discussion of better techniques
157: !     for preallocating matrix memory.
158: !
159:       call VecGetLocalSize(X,m,ierr)
160:       call MatCreateMPIAIJ(comm,m,m,N,N,5,PETSC_NULL_INTEGER,3,         &
161:      &     PETSC_NULL_INTEGER,B,ierr)

163: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
164: !     if usemf is on then matrix vector product is done via matrix free
165: !     approach. Note this is just an example, and not realistic because
166: !     we still use the actual formed matrix, but in reality one would
167: !     provide their own subroutine that would directly do the matrix
168: !     vector product and not call MatMult()
169: !     Note: we put B into a common block so it will be visible to the
170: !     mymult() routine
171:       usemf = 0
172:       call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-mf',usemf,ierr)
173:       if (usemf .eq. 1) then
174:          call MatCreateShell(comm,m,m,N,N,PETSC_NULL_INTEGER,J,ierr)
175:          call MatShellSetOperation(J,MATOP_MULT,mymult,ierr)
176:       else
177: !        If not doing matrix free then matrix operator, J,  and matrix used
178: !        to construct preconditioner, B, are the same
179:         J = B
180:       endif

182: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
183: !     Customize linear solver set runtime options
184: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
185: !
186: !     Set runtime options (e.g., -ksp_monitor -ksp_rtol <rtol> -ksp_type <type>)
187: !
188:        call SLESSetFromOptions(sles,ierr)

190: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
191: !     Evaluate initial guess
192: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

194:        call FormInitialGuess(X,ierr)
195:        call ComputeFunction(X,F,ierr)
196:        call VecNorm(F,NORM_2,fnorm,ierr)
197:        ttol = fnorm*rtol
198:        if (nooutput .eq. 0) then
199:          print*, 'Initial function norm ',fnorm
200:        endif

202: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
203: !     Solve nonlinear system with a user-defined method
204: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

206: !  This solver is a very simplistic inexact Newton method, with no
207: !  no damping strategies or bells and whistles. The intent of this code
208: !  is merely to demonstrate the repeated solution with SLES of linear
209: !  sytems with the same nonzero structure.
210: !
211: !  This is NOT the recommended approach for solving nonlinear problems
212: !  with PETSc!  We urge users to employ the SNES component for solving
213: !  nonlinear problems whenever possible with application codes, as it
214: !  offers many advantages over coding nonlinear solvers independently.

216:        do 10 i=0,max_nonlin_its

218: !  Compute the Jacobian matrix.  See the comments in this routine for
219: !  important information about setting the flag mat_flag.

221:          call ComputeJacobian(X,B,ierr)

223: !  Solve J Y = F, where J is the Jacobian matrix.
224: !    - First, set the SLES linear operators.  Here the matrix that
225: !      defines the linear system also serves as the preconditioning
226: !      matrix.
227: !    - Then solve the Newton system.

229:          call SLESSetOperators(sles,J,B,SAME_NONZERO_PATTERN,ierr)
230:          call SLESSolve(sles,F,Y,lin_its,ierr)

232: !  Compute updated iterate

234:          call VecNorm(Y,NORM_2,ynorm,ierr)
235:          call VecAYPX(mone,X,Y,ierr)
236:          call VecCopy(Y,X,ierr)
237:          call VecNorm(X,NORM_2,xnorm,ierr)
238:          if (nooutput .eq. 0) then
239:            print*,'linear solve iterations = ',lin_its,' xnorm = ',     &
240:      &         xnorm,' ynorm = ',ynorm
241:          endif

243: !  Evaluate nonlinear function at new location

245:          call ComputeFunction(X,F,ierr)
246:          call VecNorm(F,NORM_2,fnorm,ierr)
247:          if (nooutput .eq. 0) then
248:            print*, 'Iteration ',i+1,' function norm',fnorm
249:          endif

251: !  Test for convergence

253:        if (fnorm .le. ttol) then
254:          if (nooutput .eq. 0) then
255:            print*,'Converged: function norm ',fnorm,' tolerance ',ttol
256:          endif
257:          goto 20
258:        endif
259:  10   continue
260:  20   continue

262:       write(6,100) i+1
263:  100  format('Number of Newton iterations =',I2)

265: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
266: !     Free work space.  All PETSc objects should be destroyed when they
267: !     are no longer needed.
268: !  - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

270:        call MatDestroy(B,ierr)
271:        if (usemf .ne. 0) then
272:          call MatDestroy(J,ierr)
273:        endif
274:        call VecDestroy(localX,ierr)
275:        call VecDestroy(X,ierr)
276:        call VecDestroy(Y,ierr)
277:        call VecDestroy(localF,ierr)
278:        call VecDestroy(F,ierr)
279:        call SLESDestroy(sles,ierr)
280:        call DADestroy(da,ierr)
281:        call PetscFinalize(ierr)
282:        end

284: ! -------------------------------------------------------------------
285: !
286: !   FormInitialGuess - Forms initial approximation.
287: !
288: !   Input Parameters:
289: !   X - vector
290: !
291: !   Output Parameter:
292: !   X - vector
293: !
294:       subroutine FormInitialGuess(X,ierr)
295:       implicit none

297: !     petsc.h       - base PETSc routines   petscvec.h - vectors
298: !     petscsys.h    - system routines       petscmat.h - matrices
299: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
300: !     petscviewer.h - viewers               petscpc.h  - preconditioners

302:  #include include/finclude/petsc.h
303:  #include include/finclude/petscis.h
304:  #include include/finclude/petscvec.h
305:  #include include/finclude/petscmat.h
306:  #include include/finclude/petscpc.h
307:  #include include/finclude/petscsles.h
308:  #include include/finclude/petscda.h
309:       integer          ierr
310:       PetscOffset      idx
311:       Vec              X,localX,localF
312:       integer          i,j,row,mx,my, xs,ys,xm
313:       integer          ym,gxm,gym,gxs,gys
314:       double precision one,lambda,temp1,temp,hx,hy
315:       double precision hxdhy,hydhx,sc
316:       PetscScalar      xx(1)
317:       DA               da
318:       Mat              B
319:       common   /mycommon/ B,mx,my,localX,localF,da
320: 
321:       one    = 1.d0
322:       lambda = 6.d0
323:       hx     = one/(mx-1)
324:       hy     = one/(my-1)
325:       sc     = hx*hy*lambda
326:       hxdhy  = hx/hy
327:       hydhx  = hy/hx
328:       temp1  = lambda/(lambda + one)

330: !  Get a pointer to vector data.
331: !    - VecGetArray() returns a pointer to the data array.
332: !    - You MUST call VecRestoreArray() when you no longer need access to
333: !      the array.
334:        call VecGetArray(localX,xx,idx,ierr)

336: !  Get local grid boundaries (for 2-dimensional DA):
337: !    xs, ys   - starting grid indices (no ghost points)
338: !    xm, ym   - widths of local grid (no ghost points)
339: !    gxs, gys - starting grid indices (including ghost points)
340: !    gxm, gym - widths of local grid (including ghost points)

342:        call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,             &
343:      &      PETSC_NULL_INTEGER,ierr)
344:        call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,    &
345:      &      PETSC_NULL_INTEGER,ierr)

347: !  Compute initial guess over the locally owned part of the grid

349:       do 30 j=ys,ys+ym-1
350:         temp = (min(j,my-j-1))*hy
351:         do 40 i=xs,xs+xm-1
352:           row = i - gxs + (j - gys)*gxm + 1
353:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. mx-1 .or.              &
354:      &        j .eq. my-1) then
355:             xx(idx+row) = 0.d0
356:             continue
357:           endif
358:           xx(idx+row) = temp1*sqrt(min((min(i,mx-i-1))*hx,temp))
359:  40     continue
360:  30   continue

362: !     Restore vector

364:        call VecRestoreArray(localX,xx,idx,ierr)

366: !     Insert values into global vector

368:        call DALocalToGlobal(da,localX,INSERT_VALUES,X,ierr)
369:        return
370:        end

372: ! -------------------------------------------------------------------
373: !
374: !   ComputeFunction - Evaluates nonlinear function, F(x).
375: !
376: !   Input Parameters:
377: !.  X - input vector
378: !
379: !   Output Parameter:
380: !.  F - function vector
381: !
382:       subroutine  ComputeFunction(X,F,ierr)
383:       implicit none

385: !     petsc.h       - base PETSc routines   petscvec.h - vectors
386: !     petscsys.h    - system routines       petscmat.h - matrices
387: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
388: !     petscviewer.h - viewers               petscpc.h  - preconditioners

390:  #include include/finclude/petsc.h
391:  #include include/finclude/petscis.h
392:  #include include/finclude/petscvec.h
393:  #include include/finclude/petscmat.h
394:  #include include/finclude/petscpc.h
395:  #include include/finclude/petscsles.h
396:  #include include/finclude/petscda.h

398:       Vec              X,F,localX,localF
399:       integer          gys,gxm,gym
400:       PetscOffset      idx,idf
401:       integer          ierr,i,j,row,mx,my,xs,ys,xm,ym,gxs
402:       double precision two,one,lambda,hx
403:       double precision hy,hxdhy,hydhx,sc
404:       PetscScalar      u,uxx,uyy,xx(1),ff(1)
405:       DA               da
406:       Mat              B
407:       common   /mycommon/ B,mx,my,localX,localF,da

409:       two    = 2.d0
410:       one    = 1.d0
411:       lambda = 6.d0

413:       hx     = one/(mx-1)
414:       hy     = one/(my-1)
415:       sc     = hx*hy*lambda
416:       hxdhy  = hx/hy
417:       hydhx  = hy/hx

419: !  Scatter ghost points to local vector, using the 2-step process
420: !     DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
421: !  By placing code between these two statements, computations can be
422: !  done while messages are in transition.
423: !
424:       call DAGlobalToLocalBegin(da,X,INSERT_VALUES,localX,ierr)
425:       call DAGlobalToLocalEnd(da,X,INSERT_VALUES,localX,ierr)

427: !  Get pointers to vector data

429:       call VecGetArray(localX,xx,idx,ierr)
430:       call VecGetArray(localF,ff,idf,ierr)

432: !  Get local grid boundaries

434:       call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,              &
435:      &     PETSC_NULL_INTEGER,ierr)
436:       call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,     &
437:      &     PETSC_NULL_INTEGER,ierr)

439: !  Compute function over the locally owned part of the grid

441:       do 50 j=ys,ys+ym-1

443:         row = (j - gys)*gxm + xs - gxs
444:         do 60 i=xs,xs+xm-1
445:           row = row + 1

447:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. mx-1 .or.              &
448:      &        j .eq. my-1) then
449:             ff(idf+row) = xx(idx+row)
450:             goto 60
451:           endif
452:           u   = xx(idx+row)
453:           uxx = (two*u - xx(idx+row-1) - xx(idx+row+1))*hydhx
454:           uyy = (two*u - xx(idx+row-gxm) - xx(idx+row+gxm))*hxdhy
455:           ff(idf+row) = uxx + uyy - sc*exp(u)
456:  60     continue
457:  50   continue

459: !  Restore vectors

461:        call VecRestoreArray(localX,xx,idx,ierr)
462:        call VecRestoreArray(localF,ff,idf,ierr)

464: !  Insert values into global vector

466:        call DALocalToGlobal(da,localF,INSERT_VALUES,F,ierr)
467:        return
468:        end

470: ! -------------------------------------------------------------------
471: !
472: !   ComputeJacobian - Evaluates Jacobian matrix.
473: !
474: !   Input Parameters:
475: !   x - input vector
476: !
477: !   Output Parameters:
478: !   jac - Jacobian matrix
479: !   flag - flag indicating matrix structure
480: !
481: !   Notes:
482: !   Due to grid point reordering with DAs, we must always work
483: !   with the local grid points, and then transform them to the new
484: !   global numbering with the 'ltog' mapping (via DAGetGlobalIndices()).
485: !   We cannot work directly with the global numbers for the original
486: !   uniprocessor grid!
487: !
488:       subroutine ComputeJacobian(X,jac,ierr)
489:       implicit none

491: !     petsc.h  - base PETSc routines   petscvec.h - vectors
492: !     petscsys.h    - system routines       petscmat.h - matrices
493: !     petscis.h     - index sets            petscksp.h - Krylov subspace methods
494: !     petscviewer.h - viewers               petscpc.h  - preconditioners

496:  #include include/finclude/petsc.h
497:  #include include/finclude/petscis.h
498:  #include include/finclude/petscvec.h
499:  #include include/finclude/petscmat.h
500:  #include include/finclude/petscpc.h
501:  #include include/finclude/petscsles.h
502:  #include include/finclude/petscda.h

504:       Vec         X
505:       Mat         jac
506:       Vec         localX,localF
507:       DA          da
508:       integer     ltog(1)
509:       PetscOffset idltog,idx
510:       integer     ierr,i,j,row,mx,my,col(5)
511:       integer     nloc,xs,ys,xm,ym,gxs,gys,gxm,gym,grow
512:       PetscScalar two,one,lambda,v(5),hx,hy,hxdhy
513:       PetscScalar hydhx,sc,xx(1)
514:       Mat         B
515:       common   /mycommon/ B,mx,my,localX,localF,da

517:       one    = 1.d0
518:       two    = 2.d0
519:       hx     = one/(mx-1)
520:       hy     = one/(my-1)
521:       sc     = hx*hy
522:       hxdhy  = hx/hy
523:       hydhx  = hy/hx
524:       lambda = 6.d0

526: !  Scatter ghost points to local vector, using the 2-step process
527: !     DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
528: !  By placing code between these two statements, computations can be
529: !  done while messages are in transition.

531:       call DAGlobalToLocalBegin(da,X,INSERT_VALUES,localX,ierr)
532:       call DAGlobalToLocalEnd(da,X,INSERT_VALUES,localX,ierr)

534: !  Get pointer to vector data

536:       call VecGetArray(localX,xx,idx,ierr)

538: !  Get local grid boundaries

540:       call DAGetCorners(da,xs,ys,PETSC_NULL_INTEGER,xm,ym,              &
541:      &     PETSC_NULL_INTEGER,ierr)
542:       call DAGetGhostCorners(da,gxs,gys,PETSC_NULL_INTEGER,gxm,gym,     &
543:      &                        PETSC_NULL_INTEGER,ierr)

545: !  Get the global node numbers for all local nodes, including ghost points

547:       call DAGetGlobalIndices(da,nloc,ltog,idltog,ierr)

549: !  Compute entries for the locally owned part of the Jacobian.
550: !   - Currently, all PETSc parallel matrix formats are partitioned by
551: !     contiguous chunks of rows across the processors. The 'grow'
552: !     parameter computed below specifies the global row number
553: !     corresponding to each local grid point.
554: !   - Each processor needs to insert only elements that it owns
555: !     locally (but any non-local elements will be sent to the
556: !     appropriate processor during matrix assembly).
557: !   - Always specify global row and columns of matrix entries.
558: !   - Here, we set all entries for a particular row at once.

560:       do 10 j=ys,ys+ym-1
561:         row = (j - gys)*gxm + xs - gxs
562:         do 20 i=xs,xs+xm-1
563:           row = row + 1
564:           grow = ltog(idltog+row)
565:           if (i .eq. 0 .or. j .eq. 0 .or. i .eq. (mx-1) .or.            &
566:      &        j .eq. (my-1)) then
567:              call MatSetValues(jac,1,grow,1,grow,one,INSERT_VALUES,ierr)
568:              go to 20
569:           endif
570:           v(1)   = -hxdhy
571:           col(1) = ltog(idltog+row - gxm)
572:           v(2)   = -hydhx
573:           col(2) = ltog(idltog+row - 1)
574:           v(3)   = two*(hydhx + hxdhy) - sc*lambda*exp(xx(idx+row))
575:           col(3) = grow
576:           v(4)   = -hydhx
577:           col(4) = ltog(idltog+row + 1)
578:           v(5)   = -hxdhy
579:           col(5) = ltog(idltog+row + gxm)
580:           call MatSetValues(jac,1,grow,5,col,v,INSERT_VALUES,ierr)
581:  20     continue
582:  10   continue

584: !  Assemble matrix, using the 2-step process:
585: !    MatAssemblyBegin(), MatAssemblyEnd().
586: !  By placing code between these two statements, computations can be
587: !  done while messages are in transition.

589:       call MatAssemblyBegin(jac,MAT_FINAL_ASSEMBLY,ierr)
590:       call VecRestoreArray(localX,xx,idx,ierr)
591:       call MatAssemblyEnd(jac,MAT_FINAL_ASSEMBLY,ierr)
592:       return
593:       end


596: ! -------------------------------------------------------------------
597: !
598: !   MyMult - user provided matrix multiply
599: !
600: !   Input Parameters:
601: !.  X - input vector
602: !
603: !   Output Parameter:
604: !.  F - function vector
605: !
606:       subroutine  MyMult(J,X,F,ierr)
607:       implicit none
608:       Mat     J,B
609:       Vec     X,F
610:       integer ierr,mx,my
611:       DA      da
612:       Vec     localX,localF

614:       common   /mycommon/ B,mx,my,localX,localF,da
615: !
616: !       Here we use the actual formed matrix B; users would
617: !     instead write their own matrix vector product routine
618: !
619:       call MatMult(B,X,F,ierr)
620:       return
621:       end