Actual source code: pbvec.c
petsc-3.7.5 2017-01-01
2: /*
3: This file contains routines for Parallel vector operations.
4: */
5: #include <petscoptions.h>
6: #include <../src/vec/vec/impls/mpi/pvecimpl.h> /*I "petscvec.h" I*/
10: static PetscErrorCode VecDot_MPI(Vec xin,Vec yin,PetscScalar *z)
11: {
12: PetscScalar sum,work;
16: VecDot_Seq(xin,yin,&work);
17: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
18: *z = sum;
19: return(0);
20: }
24: static PetscErrorCode VecTDot_MPI(Vec xin,Vec yin,PetscScalar *z)
25: {
26: PetscScalar sum,work;
30: VecTDot_Seq(xin,yin,&work);
31: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
32: *z = sum;
33: return(0);
34: }
36: extern PetscErrorCode VecView_MPI_Draw(Vec,PetscViewer);
40: static PetscErrorCode VecPlaceArray_MPI(Vec vin,const PetscScalar *a)
41: {
43: Vec_MPI *v = (Vec_MPI*)vin->data;
46: if (v->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
47: v->unplacedarray = v->array; /* save previous array so reset can bring it back */
48: v->array = (PetscScalar*)a;
49: if (v->localrep) {
50: VecPlaceArray(v->localrep,a);
51: }
52: return(0);
53: }
55: extern PetscErrorCode VecGetValues_MPI(Vec,PetscInt,const PetscInt[],PetscScalar[]);
59: static PetscErrorCode VecDuplicate_MPI(Vec win,Vec *v)
60: {
62: Vec_MPI *vw,*w = (Vec_MPI*)win->data;
63: PetscScalar *array;
66: VecCreate(PetscObjectComm((PetscObject)win),v);
67: PetscLayoutReference(win->map,&(*v)->map);
69: VecCreate_MPI_Private(*v,PETSC_TRUE,w->nghost,0);
70: vw = (Vec_MPI*)(*v)->data;
71: PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));
73: /* save local representation of the parallel vector (and scatter) if it exists */
74: if (w->localrep) {
75: VecGetArray(*v,&array);
76: VecCreateSeqWithArray(PETSC_COMM_SELF,PetscAbs(win->map->bs),win->map->n+w->nghost,array,&vw->localrep);
77: PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
78: VecRestoreArray(*v,&array);
79: PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);
81: vw->localupdate = w->localupdate;
82: if (vw->localupdate) {
83: PetscObjectReference((PetscObject)vw->localupdate);
84: }
85: }
87: /* New vector should inherit stashing property of parent */
88: (*v)->stash.donotstash = win->stash.donotstash;
89: (*v)->stash.ignorenegidx = win->stash.ignorenegidx;
91: PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
92: PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);
94: (*v)->map->bs = PetscAbs(win->map->bs);
95: (*v)->bstash.bs = win->bstash.bs;
96: return(0);
97: }
102: static PetscErrorCode VecSetOption_MPI(Vec V,VecOption op,PetscBool flag)
103: {
104: Vec_MPI *v = (Vec_MPI*)V->data;
106: switch (op) {
107: case VEC_IGNORE_OFF_PROC_ENTRIES: V->stash.donotstash = flag;
108: break;
109: case VEC_IGNORE_NEGATIVE_INDICES: V->stash.ignorenegidx = flag;
110: break;
111: case VEC_SUBSET_OFF_PROC_ENTRIES: v->assembly_subset = flag;
112: break;
113: }
114: return(0);
115: }
120: static PetscErrorCode VecResetArray_MPI(Vec vin)
121: {
122: Vec_MPI *v = (Vec_MPI*)vin->data;
126: v->array = v->unplacedarray;
127: v->unplacedarray = 0;
128: if (v->localrep) {
129: VecResetArray(v->localrep);
130: }
131: return(0);
132: }
136: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rankid,PetscMPIInt rank,void *sdata,MPI_Request req[],void *ctx)
137: {
138: Vec X = (Vec)ctx;
139: Vec_MPI *x = (Vec_MPI*)X->data;
140: VecAssemblyHeader *hdr = (VecAssemblyHeader*)sdata;
141: PetscInt bs = X->map->bs;
145: /* x->recvhdr only exists when we are reusing a communication network. In that case, some messages can be empty, but
146: * we have to send them this time if we sent them before because the receiver is expecting them. */
147: if (hdr->count || (x->recvhdr && x->sendptrs[rankid].ints)) {
148: MPI_Isend(x->sendptrs[rankid].ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
149: MPI_Isend(x->sendptrs[rankid].scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
150: }
151: if (hdr->bcount || (x->recvhdr && x->sendptrs[rankid].intb)) {
152: MPI_Isend(x->sendptrs[rankid].intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
153: MPI_Isend(x->sendptrs[rankid].scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
154: }
155: return(0);
156: }
160: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rank,void *rdata,MPI_Request req[],void *ctx)
161: {
162: Vec X = (Vec)ctx;
163: Vec_MPI *x = (Vec_MPI*)X->data;
164: VecAssemblyHeader *hdr = (VecAssemblyHeader*)rdata;
166: PetscInt bs = X->map->bs;
167: VecAssemblyFrame *frame;
170: PetscSegBufferGet(x->segrecvframe,1,&frame);
172: if (hdr->count) {
173: PetscSegBufferGet(x->segrecvint,hdr->count,&frame->ints);
174: MPI_Irecv(frame->ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
175: PetscSegBufferGet(x->segrecvscalar,hdr->count,&frame->scalars);
176: MPI_Irecv(frame->scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
177: frame->pendings = 2;
178: } else {
179: frame->ints = NULL;
180: frame->scalars = NULL;
181: frame->pendings = 0;
182: }
184: if (hdr->bcount) {
185: PetscSegBufferGet(x->segrecvint,hdr->bcount,&frame->intb);
186: MPI_Irecv(frame->intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
187: PetscSegBufferGet(x->segrecvscalar,hdr->bcount*bs,&frame->scalarb);
188: MPI_Irecv(frame->scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
189: frame->pendingb = 2;
190: } else {
191: frame->intb = NULL;
192: frame->scalarb = NULL;
193: frame->pendingb = 0;
194: }
195: return(0);
196: }
200: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
201: {
202: Vec_MPI *x = (Vec_MPI*)X->data;
204: MPI_Comm comm;
205: PetscInt i,j,jb,bs;
208: if (X->stash.donotstash) return(0);
210: PetscObjectGetComm((PetscObject)X,&comm);
211: VecGetBlockSize(X,&bs);
212: #if defined(PETSC_USE_DEBUG)
213: {
214: InsertMode addv;
215: MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
216: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
217: }
218: #endif
219: X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */
221: VecStashSortCompress_Private(&X->stash);
222: VecStashSortCompress_Private(&X->bstash);
224: if (!x->sendranks) {
225: PetscMPIInt nowners,bnowners,*owners,*bowners;
226: PetscInt ntmp;
227: VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);
228: VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);
229: PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);
230: x->nsendranks = ntmp;
231: PetscFree(owners);
232: PetscFree(bowners);
233: PetscMalloc1(x->nsendranks,&x->sendhdr);
234: PetscCalloc1(x->nsendranks,&x->sendptrs);
235: }
236: for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
237: PetscMPIInt rank = x->sendranks[i];
238: x->sendhdr[i].insertmode = X->stash.insertmode;
239: /* Initialize pointers for non-empty stashes the first time around. Subsequent assemblies with
240: * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
241: * there is nothing new to send, so that size-zero messages get sent instead. */
242: x->sendhdr[i].count = 0;
243: if (X->stash.n) {
244: x->sendptrs[i].ints = &X->stash.idx[j];
245: x->sendptrs[i].scalars = &X->stash.array[j];
246: for ( ; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
247: }
248: x->sendhdr[i].bcount = 0;
249: if (X->bstash.n) {
250: x->sendptrs[i].intb = &X->bstash.idx[jb];
251: x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
252: for ( ; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
253: }
254: }
256: if (!x->segrecvint) {PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);}
257: if (!x->segrecvscalar) {PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);}
258: if (!x->segrecvframe) {PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);}
259: if (x->recvhdr) { /* VEC_SUBSET_OFF_PROC_ENTRIES and this is not the first assembly */
260: PetscMPIInt tag[4];
261: if (!x->assembly_subset) SETERRQ(comm,PETSC_ERR_PLIB,"Attempt to reuse rendezvous when not VEC_SUBSET_OFF_PROC_ENTRIES");
262: for (i=0; i<4; i++) {PetscCommGetNewTag(comm,&tag[i]);}
263: for (i=0; i<x->nsendranks; i++) {
264: VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);
265: }
266: for (i=0; i<x->nrecvranks; i++) {
267: VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);
268: }
269: x->use_status = PETSC_TRUE;
270: } else { /* First time */
271: PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);
272: x->use_status = PETSC_FALSE;
273: }
275: {
276: PetscInt nstash,reallocs;
277: VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);
278: PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
279: VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);
280: PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
281: }
282: return(0);
283: }
287: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
288: {
289: Vec_MPI *x = (Vec_MPI*)X->data;
290: PetscInt bs = X->map->bs;
291: PetscMPIInt npending,*some_indices,r;
292: MPI_Status *some_statuses;
293: PetscScalar *xarray;
295: VecAssemblyFrame *frame;
298: if (X->stash.donotstash) {
299: X->stash.insertmode = NOT_SET_VALUES;
300: X->bstash.insertmode = NOT_SET_VALUES;
301: return(0);
302: }
304: VecGetArray(X,&xarray);
305: PetscSegBufferExtractInPlace(x->segrecvframe,&frame);
306: PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);
307: for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
308: while (npending>0) {
309: PetscMPIInt ndone,ii;
310: /* Filling MPI_Status fields requires some resources from the MPI library. We skip it on the first assembly, or
311: * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
312: * rendezvous. When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
313: * subsequent assembly can set a proper subset of the values. */
314: MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);
315: for (ii=0; ii<ndone; ii++) {
316: PetscInt i = some_indices[ii]/4,j,k;
317: InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
318: PetscInt *recvint;
319: PetscScalar *recvscalar;
320: PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0);
321: PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1);
322: npending--;
323: if (!blockmsg) { /* Scalar stash */
324: PetscMPIInt count;
325: if (--frame[i].pendings > 0) continue;
326: if (x->use_status) {
327: MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
328: } else count = x->recvhdr[i].count;
329: for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) {
330: PetscInt loc = *recvint - X->map->rstart;
331: if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend);
332: switch (imode) {
333: case ADD_VALUES:
334: xarray[loc] += *recvscalar++;
335: break;
336: case INSERT_VALUES:
337: xarray[loc] = *recvscalar++;
338: break;
339: default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
340: }
341: }
342: } else { /* Block stash */
343: PetscMPIInt count;
344: if (--frame[i].pendingb > 0) continue;
345: if (x->use_status) {
346: MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
347: if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
348: } else count = x->recvhdr[i].bcount;
349: for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) {
350: PetscInt loc = (*recvint)*bs - X->map->rstart;
351: switch (imode) {
352: case ADD_VALUES:
353: for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++;
354: break;
355: case INSERT_VALUES:
356: for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++;
357: break;
358: default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
359: }
360: }
361: }
362: }
363: }
364: VecRestoreArray(X,&xarray);
365: MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);
366: PetscFree2(some_indices,some_statuses);
367: if (x->assembly_subset) {
368: void *dummy; /* reset segbuffers */
369: PetscSegBufferExtractInPlace(x->segrecvint,&dummy);
370: PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);
371: } else {
372: VecAssemblyReset_MPI(X);
373: }
375: X->stash.insertmode = NOT_SET_VALUES;
376: X->bstash.insertmode = NOT_SET_VALUES;
377: VecStashScatterEnd_Private(&X->stash);
378: VecStashScatterEnd_Private(&X->bstash);
379: return(0);
380: }
384: PetscErrorCode VecAssemblyReset_MPI(Vec X)
385: {
386: Vec_MPI *x = (Vec_MPI*)X->data;
390: PetscFree(x->sendreqs);
391: PetscFree(x->recvreqs);
392: PetscFree(x->sendranks);
393: PetscFree(x->recvranks);
394: PetscFree(x->sendhdr);
395: PetscFree(x->recvhdr);
396: PetscFree(x->sendptrs);
397: PetscSegBufferDestroy(&x->segrecvint);
398: PetscSegBufferDestroy(&x->segrecvscalar);
399: PetscSegBufferDestroy(&x->segrecvframe);
400: return(0);
401: }
406: static PetscErrorCode VecSetFromOptions_MPI(PetscOptionItems *PetscOptionsObject,Vec X)
407: {
409: PetscBool flg = PETSC_FALSE,set;
412: PetscOptionsHead(PetscOptionsObject,"VecMPI Options");
413: PetscOptionsBool("-vec_assembly_bts","Use BuildTwoSided version of assembly","",flg,&flg,&set);
414: if (set) {
415: X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI_BTS : VecAssemblyBegin_MPI;
416: X->ops->assemblyend = flg ? VecAssemblyEnd_MPI_BTS : VecAssemblyEnd_MPI;
417: }
418: PetscOptionsTail();
419: return(0);
420: }
423: static struct _VecOps DvOps = { VecDuplicate_MPI, /* 1 */
424: VecDuplicateVecs_Default,
425: VecDestroyVecs_Default,
426: VecDot_MPI,
427: VecMDot_MPI,
428: VecNorm_MPI,
429: VecTDot_MPI,
430: VecMTDot_MPI,
431: VecScale_Seq,
432: VecCopy_Seq, /* 10 */
433: VecSet_Seq,
434: VecSwap_Seq,
435: VecAXPY_Seq,
436: VecAXPBY_Seq,
437: VecMAXPY_Seq,
438: VecAYPX_Seq,
439: VecWAXPY_Seq,
440: VecAXPBYPCZ_Seq,
441: VecPointwiseMult_Seq,
442: VecPointwiseDivide_Seq,
443: VecSetValues_MPI, /* 20 */
444: VecAssemblyBegin_MPI,
445: VecAssemblyEnd_MPI,
446: 0,
447: VecGetSize_MPI,
448: VecGetSize_Seq,
449: 0,
450: VecMax_MPI,
451: VecMin_MPI,
452: VecSetRandom_Seq,
453: VecSetOption_MPI,
454: VecSetValuesBlocked_MPI,
455: VecDestroy_MPI,
456: VecView_MPI,
457: VecPlaceArray_MPI,
458: VecReplaceArray_Seq,
459: VecDot_Seq,
460: VecTDot_Seq,
461: VecNorm_Seq,
462: VecMDot_Seq,
463: VecMTDot_Seq,
464: VecLoad_Default,
465: VecReciprocal_Default,
466: VecConjugate_Seq,
467: 0,
468: 0,
469: VecResetArray_MPI,
470: VecSetFromOptions_MPI,/*set from options */
471: VecMaxPointwiseDivide_Seq,
472: VecPointwiseMax_Seq,
473: VecPointwiseMaxAbs_Seq,
474: VecPointwiseMin_Seq,
475: VecGetValues_MPI,
476: 0,
477: 0,
478: 0,
479: 0,
480: 0,
481: 0,
482: VecStrideGather_Default,
483: VecStrideScatter_Default,
484: 0,
485: 0,
486: 0,
487: 0,
488: 0,
489: VecStrideSubSetGather_Default,
490: VecStrideSubSetScatter_Default,
491: 0,
492: 0
493: };
497: /*
498: VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
499: VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
500: VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()
502: If alloc is true and array is NULL then this routine allocates the space, otherwise
503: no space is allocated.
504: */
505: PetscErrorCode VecCreate_MPI_Private(Vec v,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
506: {
507: Vec_MPI *s;
511: PetscNewLog(v,&s);
512: v->data = (void*)s;
513: PetscMemcpy(v->ops,&DvOps,sizeof(DvOps));
514: s->nghost = nghost;
515: v->petscnative = PETSC_TRUE;
517: PetscLayoutSetUp(v->map);
519: s->array = (PetscScalar*)array;
520: s->array_allocated = 0;
521: if (alloc && !array) {
522: PetscInt n = v->map->n+nghost;
523: PetscMalloc1(n,&s->array);
524: PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));
525: PetscMemzero(s->array,n*sizeof(PetscScalar));
526: s->array_allocated = s->array;
527: }
529: /* By default parallel vectors do not have local representation */
530: s->localrep = 0;
531: s->localupdate = 0;
533: v->stash.insertmode = NOT_SET_VALUES;
534: v->bstash.insertmode = NOT_SET_VALUES;
535: /* create the stashes. The block-size for bstash is set later when
536: VecSetValuesBlocked is called.
537: */
538: VecStashCreate_Private(PetscObjectComm((PetscObject)v),1,&v->stash);
539: VecStashCreate_Private(PetscObjectComm((PetscObject)v),PetscAbs(v->map->bs),&v->bstash);
541: #if defined(PETSC_HAVE_MATLAB_ENGINE)
542: PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEnginePut_C",VecMatlabEnginePut_Default);
543: PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEngineGet_C",VecMatlabEngineGet_Default);
544: #endif
545: PetscObjectChangeTypeName((PetscObject)v,VECMPI);
546: return(0);
547: }
549: /*MC
550: VECMPI - VECMPI = "mpi" - The basic parallel vector
552: Options Database Keys:
553: . -vec_type mpi - sets the vector type to VECMPI during a call to VecSetFromOptions()
555: Level: beginner
557: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMpiWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMpi()
558: M*/
562: PETSC_EXTERN PetscErrorCode VecCreate_MPI(Vec vv)
563: {
567: VecCreate_MPI_Private(vv,PETSC_TRUE,0,0);
568: return(0);
569: }
571: /*MC
572: VECSTANDARD = "standard" - A VECSEQ on one process and VECMPI on more than one process
574: Options Database Keys:
575: . -vec_type standard - sets a vector type to standard on calls to VecSetFromOptions()
577: Level: beginner
579: .seealso: VecCreateSeq(), VecCreateMPI()
580: M*/
584: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
585: {
587: PetscMPIInt size;
590: MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
591: if (size == 1) {
592: VecSetType(v,VECSEQ);
593: } else {
594: VecSetType(v,VECMPI);
595: }
596: return(0);
597: }
601: /*@C
602: VecCreateMPIWithArray - Creates a parallel, array-style vector,
603: where the user provides the array space to store the vector values.
605: Collective on MPI_Comm
607: Input Parameters:
608: + comm - the MPI communicator to use
609: . bs - block size, same meaning as VecSetBlockSize()
610: . n - local vector length, cannot be PETSC_DECIDE
611: . N - global vector length (or PETSC_DECIDE to have calculated)
612: - array - the user provided array to store the vector values
614: Output Parameter:
615: . vv - the vector
617: Notes:
618: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
619: same type as an existing vector.
621: If the user-provided array is NULL, then VecPlaceArray() can be used
622: at a later stage to SET the array for storing the vector values.
624: PETSc does NOT free the array when the vector is destroyed via VecDestroy().
625: The user should not free the array until the vector is destroyed.
627: Level: intermediate
629: Concepts: vectors^creating with array
631: .seealso: VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
632: VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
634: @*/
635: PetscErrorCode VecCreateMPIWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
636: {
640: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
641: PetscSplitOwnership(comm,&n,&N);
642: VecCreate(comm,vv);
643: VecSetSizes(*vv,n,N);
644: VecSetBlockSize(*vv,bs);
645: VecCreate_MPI_Private(*vv,PETSC_FALSE,0,array);
646: return(0);
647: }
651: /*@C
652: VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
653: the caller allocates the array space.
655: Collective on MPI_Comm
657: Input Parameters:
658: + comm - the MPI communicator to use
659: . n - local vector length
660: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
661: . nghost - number of local ghost points
662: . ghosts - global indices of ghost points (or NULL if not needed), these do not need to be in increasing order (sorted)
663: - array - the space to store the vector values (as long as n + nghost)
665: Output Parameter:
666: . vv - the global vector representation (without ghost points as part of vector)
668: Notes:
669: Use VecGhostGetLocalForm() to access the local, ghosted representation
670: of the vector.
672: This also automatically sets the ISLocalToGlobalMapping() for this vector.
674: Level: advanced
676: Concepts: vectors^creating with array
677: Concepts: vectors^ghosted
679: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
680: VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
681: VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()
683: @*/
684: PetscErrorCode VecCreateGhostWithArray(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
685: {
686: PetscErrorCode ierr;
687: Vec_MPI *w;
688: PetscScalar *larray;
689: IS from,to;
690: ISLocalToGlobalMapping ltog;
691: PetscInt rstart,i,*indices;
694: *vv = 0;
696: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
697: if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
698: if (nghost < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
699: PetscSplitOwnership(comm,&n,&N);
700: /* Create global representation */
701: VecCreate(comm,vv);
702: VecSetSizes(*vv,n,N);
703: VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost,array);
704: w = (Vec_MPI*)(*vv)->data;
705: /* Create local representation */
706: VecGetArray(*vv,&larray);
707: VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
708: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
709: VecRestoreArray(*vv,&larray);
711: /*
712: Create scatter context for scattering (updating) ghost values
713: */
714: ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
715: ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
716: VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
717: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
718: ISDestroy(&to);
719: ISDestroy(&from);
721: /* set local to global mapping for ghosted vector */
722: PetscMalloc1(n+nghost,&indices);
723: VecGetOwnershipRange(*vv,&rstart,NULL);
724: for (i=0; i<n; i++) {
725: indices[i] = rstart + i;
726: }
727: for (i=0; i<nghost; i++) {
728: indices[n+i] = ghosts[i];
729: }
730: ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,<og);
731: VecSetLocalToGlobalMapping(*vv,ltog);
732: ISLocalToGlobalMappingDestroy(<og);
733: return(0);
734: }
738: /*@
739: VecCreateGhost - Creates a parallel vector with ghost padding on each processor.
741: Collective on MPI_Comm
743: Input Parameters:
744: + comm - the MPI communicator to use
745: . n - local vector length
746: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
747: . nghost - number of local ghost points
748: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
750: Output Parameter:
751: . vv - the global vector representation (without ghost points as part of vector)
753: Notes:
754: Use VecGhostGetLocalForm() to access the local, ghosted representation
755: of the vector.
757: This also automatically sets the ISLocalToGlobalMapping() for this vector.
759: Level: advanced
761: Concepts: vectors^ghosted
763: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
764: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
765: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
766: VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()
768: @*/
769: PetscErrorCode VecCreateGhost(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
770: {
774: VecCreateGhostWithArray(comm,n,N,nghost,ghosts,0,vv);
775: return(0);
776: }
780: /*@
781: VecMPISetGhost - Sets the ghost points for an MPI ghost vector
783: Collective on Vec
785: Input Parameters:
786: + vv - the MPI vector
787: . nghost - number of local ghost points
788: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
791: Notes:
792: Use VecGhostGetLocalForm() to access the local, ghosted representation
793: of the vector.
795: This also automatically sets the ISLocalToGlobalMapping() for this vector.
797: You must call this AFTER you have set the type of the vector (with VecSetType()) and the size (with VecSetSizes()).
799: Level: advanced
801: Concepts: vectors^ghosted
803: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
804: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
805: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
806: VecCreateGhostBlock(), VecCreateGhostBlockWithArray()
808: @*/
809: PetscErrorCode VecMPISetGhost(Vec vv,PetscInt nghost,const PetscInt ghosts[])
810: {
812: PetscBool flg;
815: PetscObjectTypeCompare((PetscObject)vv,VECMPI,&flg);
816: /* if already fully existant VECMPI then basically destroy it and rebuild with ghosting */
817: if (flg) {
818: PetscInt n,N;
819: Vec_MPI *w;
820: PetscScalar *larray;
821: IS from,to;
822: ISLocalToGlobalMapping ltog;
823: PetscInt rstart,i,*indices;
824: MPI_Comm comm;
826: PetscObjectGetComm((PetscObject)vv,&comm);
827: n = vv->map->n;
828: N = vv->map->N;
829: (*vv->ops->destroy)(vv);
830: VecSetSizes(vv,n,N);
831: VecCreate_MPI_Private(vv,PETSC_TRUE,nghost,NULL);
832: w = (Vec_MPI*)(vv)->data;
833: /* Create local representation */
834: VecGetArray(vv,&larray);
835: VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
836: PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localrep);
837: VecRestoreArray(vv,&larray);
839: /*
840: Create scatter context for scattering (updating) ghost values
841: */
842: ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
843: ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
844: VecScatterCreate(vv,from,w->localrep,to,&w->localupdate);
845: PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localupdate);
846: ISDestroy(&to);
847: ISDestroy(&from);
849: /* set local to global mapping for ghosted vector */
850: PetscMalloc1(n+nghost,&indices);
851: VecGetOwnershipRange(vv,&rstart,NULL);
853: for (i=0; i<n; i++) indices[i] = rstart + i;
854: for (i=0; i<nghost; i++) indices[n+i] = ghosts[i];
856: ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,<og);
857: VecSetLocalToGlobalMapping(vv,ltog);
858: ISLocalToGlobalMappingDestroy(<og);
859: } else if (vv->ops->create == VecCreate_MPI) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set local or global size before setting ghosting");
860: else if (!((PetscObject)vv)->type_name) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set type to VECMPI before ghosting");
861: return(0);
862: }
865: /* ------------------------------------------------------------------------------------------*/
868: /*@C
869: VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
870: the caller allocates the array space. Indices in the ghost region are based on blocks.
872: Collective on MPI_Comm
874: Input Parameters:
875: + comm - the MPI communicator to use
876: . bs - block size
877: . n - local vector length
878: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
879: . nghost - number of local ghost blocks
880: . ghosts - global indices of ghost blocks (or NULL if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
881: - array - the space to store the vector values (as long as n + nghost*bs)
883: Output Parameter:
884: . vv - the global vector representation (without ghost points as part of vector)
886: Notes:
887: Use VecGhostGetLocalForm() to access the local, ghosted representation
888: of the vector.
890: n is the local vector size (total local size not the number of blocks) while nghost
891: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
892: portion is bs*nghost
894: Level: advanced
896: Concepts: vectors^creating ghosted
897: Concepts: vectors^creating with array
899: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
900: VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
901: VecCreateGhostWithArray(), VecCreateGhostBlock()
903: @*/
904: PetscErrorCode VecCreateGhostBlockWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
905: {
906: PetscErrorCode ierr;
907: Vec_MPI *w;
908: PetscScalar *larray;
909: IS from,to;
910: ISLocalToGlobalMapping ltog;
911: PetscInt rstart,i,nb,*indices;
914: *vv = 0;
916: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
917: if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
918: if (nghost < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
919: if (n % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Local size must be a multiple of block size");
920: PetscSplitOwnership(comm,&n,&N);
921: /* Create global representation */
922: VecCreate(comm,vv);
923: VecSetSizes(*vv,n,N);
924: VecSetBlockSize(*vv,bs);
925: VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost*bs,array);
926: w = (Vec_MPI*)(*vv)->data;
927: /* Create local representation */
928: VecGetArray(*vv,&larray);
929: VecCreateSeqWithArray(PETSC_COMM_SELF,bs,n+bs*nghost,larray,&w->localrep);
930: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
931: VecRestoreArray(*vv,&larray);
933: /*
934: Create scatter context for scattering (updating) ghost values
935: */
936: ISCreateBlock(comm,bs,nghost,ghosts,PETSC_COPY_VALUES,&from);
937: ISCreateStride(PETSC_COMM_SELF,bs*nghost,n,1,&to);
938: VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
939: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
940: ISDestroy(&to);
941: ISDestroy(&from);
943: /* set local to global mapping for ghosted vector */
944: nb = n/bs;
945: PetscMalloc1(nb+nghost,&indices);
946: VecGetOwnershipRange(*vv,&rstart,NULL);
948: for (i=0; i<nb; i++) indices[i] = rstart + i;
949: for (i=0; i<nghost; i++) indices[nb+i] = ghosts[i];
951: ISLocalToGlobalMappingCreate(comm,bs,nb+nghost,indices,PETSC_OWN_POINTER,<og);
952: VecSetLocalToGlobalMapping(*vv,ltog);
953: ISLocalToGlobalMappingDestroy(<og);
954: return(0);
955: }
959: /*@
960: VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
961: The indicing of the ghost points is done with blocks.
963: Collective on MPI_Comm
965: Input Parameters:
966: + comm - the MPI communicator to use
967: . bs - the block size
968: . n - local vector length
969: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
970: . nghost - number of local ghost blocks
971: - ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)
973: Output Parameter:
974: . vv - the global vector representation (without ghost points as part of vector)
976: Notes:
977: Use VecGhostGetLocalForm() to access the local, ghosted representation
978: of the vector.
980: n is the local vector size (total local size not the number of blocks) while nghost
981: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
982: portion is bs*nghost
984: Level: advanced
986: Concepts: vectors^ghosted
988: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
989: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
990: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecCreateGhostBlockWithArray()
992: @*/
993: PetscErrorCode VecCreateGhostBlock(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
994: {
998: VecCreateGhostBlockWithArray(comm,bs,n,N,nghost,ghosts,0,vv);
999: return(0);
1000: }