Actual source code: ex26.c

  1: /*$Id: ex2.c,v 1.94 2001/08/07 21:30:54 bsmith Exp $*/

  3: /* Program usage:  mpirun -np <procs> ex2 [-help] [all PETSc options] */

  5: static char help[] = "Solves a linear system in parallel with ESI.n
  6: Input parameters include:n
  7:   -n <mesh_n>       : number of mesh points in x-directionnn";

  9: /*T
 10:    Concepts: ESI^basic parallel example;
 11:    Concepts: ESI^Laplacian, 1d
 12:    Concepts: Laplacian, 1d
 13:    Processors: n
 14: T*/

 16: /* 

 18:    Note the usual PETSc objects all work. Those related to vec, mat, pc, ksp, and sles
 19: are prefixed with esi_

 21: */
 22: #include "esi/ESI.h"

 24:  #include petsc.h

 26: #undef __FUNCT__
 28: int main(int argc,char **args)
 29: {
 30:   ::esi::IndexSpace<int>                      *indexspace;
 31:   ::esi::Vector<double,int>                   *x,*b;
 32:   ::esi::Operator<double,int>                 *op;
 33:   ::esi::SolverIterative<double,int>          *solver;
 34:   ::esi::MatrixRowWriteAccess<double,int>     *A;
 35:   int                                         ierr,i,n = 3,Istart,Iend,c[3],N;
 36:   double                                      v[3],*barray;
 37:   ::esi::IndexSpace<int>::Factory             *ifactory;
 38:   ::esi::Vector<double,int>::Factory          *vfactory;
 39:   ::esi::Operator<double,int>::Factory        *ofactory;
 40:   ::esi::SolverIterative<double,int>::Factory *sfactory;     /* linear solver context */

 42:   PetscInitialize(&argc,&args,(char *)0,help);
 43:   /*
 44:            Load up the factorys we will need to create our objects
 45:   */
 46:   ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::IndexSpace",reinterpret_cast<void *&>(ifactory));
 47:   ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::Matrix",reinterpret_cast<void *&>(ofactory));
 48:   ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::Vector",reinterpret_cast<void *&>(vfactory));
 49:   ESILoadFactory("MPI",(void*)&PETSC_COMM_WORLD,"esi::petsc::SolverIterative",reinterpret_cast<void *&>(sfactory));

 51:   PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);

 53:   /*
 54:         Define the layout of the vectors and matrices across the processors
 55:   */
 56:   ifactory->create("MPI",(void*)&PETSC_COMM_WORLD,n,PETSC_DECIDE,PETSC_DECIDE,indexspace);

 58:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
 59:          Compute the matrix and right-hand-side vector that define
 60:          the linear system, Ax = b.
 61:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 62:   /* 
 63:      Create parallel matrix, specifying only its global dimensions.

 65:      Performance tuning note:  For problems of substantial size,
 66:      preallocation of matrix memory is crucial for attaining good 
 67:      performance.  Preallocation is not possible via the generic
 68:      matrix creation routine
 69:   */
 70:   ofactory->create(*indexspace,*indexspace,op);
 71: 
 72:   /* 
 73:      ESI parallel matrix formats are partitioned by
 74:      contiguous chunks of rows across the processors.  Determine which
 75:      rows of the matrix are locally owned. 
 76:   */
 77:   ierr  = indexspace->getLocalPartitionOffset(Istart);
 78:   ierr  = indexspace->getLocalSize(Iend);
 79:   ierr  = indexspace->getGlobalSize(N);
 80:   Iend += Istart;

 82:   /* 
 83:      Set matrix elements for the 1-D, three-point stencil in parallel.
 84:       - Each processor needs to insert only elements that it owns
 85:         locally (but any non-local elements will be sent to the
 86:         appropriate processor during matrix assembly). 
 87:       - Always specify global rows and columns of matrix entries.

 89:    */
 90:   op->getInterface("esi::MatrixRowWriteAccess",reinterpret_cast<void *&>(A));
 91:   if (Istart == 0) {
 92:     v[0] = 1.0;
 93:     A->copyIntoRow(Istart,v,&Istart,1);
 94:     Istart++;
 95:   }
 96:   if (Iend == N) {
 97:     Iend--;
 98:     v[0] = 1.0;
 99:     A->copyIntoRow(Iend,v,&Iend,1);
100:   }
101:   v[0] = -1.0; v[1] = 2.0; v[2] = -1.0;
102:   for (i=Istart; i<Iend; i++) {
103:     c[0] = i-1; c[1] = i; c[2] = i+1;
104:     A->copyIntoRow(i,v,c,3);
105:   }

107:   /* 
108:      Assemble matrix, using the 2-step process:
109:   */
110:   A->loadComplete();


113:   /* 
114:      Create parallel vectors.i
115:       - We form 1 vector from scratch and then duplicate as needed.
116:       - When solving a linear system, the vectors and matrices MUST
117:         be partitioned accordingly.  PETSc automatically generates
118:         appropriately partitioned matrices and vectors when MatCreate()
119:         and VecCreate() are used with the same communicator.  
120:       - The user can alternatively specify the local vector and matrix
121:         dimensions when more sophisticated partitioning is needed
122:         (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
123:         below).
124:   */
125:   vfactory->create(*indexspace,x);
126:   x->clone(b);

128:   b->getCoefPtrReadWriteLock(barray);
129:   for (i=Istart; i<Iend; i++) {
130:     barray[i-Istart] = i;
131:   }
132:   b->releaseCoefPtrLock(barray);

134:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
135:                 Create the linear solver
136:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

138:   /* 
139:      Create linear solver context
140:   */
141:   sfactory->create("MPI",(void*)&PETSC_COMM_WORLD,solver);

143:   /* 
144:      Set operators. Here the matrix that defines the linear system
145:      also serves as the preconditioning matrix.
146:   */
147:   solver->setOperator(*op);

149:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 
150:                       Solve the linear system
151:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
152:   solver->solve(*b,*x);


155:   /*
156:      Always call PetscFinalize() before exiting a program.  This routine
157:        - finalizes the PETSc libraries as well as MPI
158:        - provides summary and diagnostic information if certain runtime
159:          options are chosen (e.g., -log_summary). 
160:   */
161:   indexspace->deleteReference();
162:   op->deleteReference();
163:   x->deleteReference();
164:   b->deleteReference();
165:   solver->deleteReference();
166:   delete ifactory;
167:   delete vfactory;
168:   delete ofactory;
169:   delete sfactory;
170:   PetscFinalize();
171:   return 0;
172: }