Actual source code: pbvec.c
petsc-3.9.3 2018-07-02
2: /*
3: This file contains routines for Parallel vector operations.
4: */
5: #include <petscoptions.h>
6: #include <../src/vec/vec/impls/mpi/pvecimpl.h>
8: PetscErrorCode VecDot_MPI(Vec xin,Vec yin,PetscScalar *z)
9: {
10: PetscScalar sum,work;
14: VecDot_Seq(xin,yin,&work);
15: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
16: *z = sum;
17: return(0);
18: }
20: PetscErrorCode VecTDot_MPI(Vec xin,Vec yin,PetscScalar *z)
21: {
22: PetscScalar sum,work;
26: VecTDot_Seq(xin,yin,&work);
27: MPIU_Allreduce(&work,&sum,1,MPIU_SCALAR,MPIU_SUM,PetscObjectComm((PetscObject)xin));
28: *z = sum;
29: return(0);
30: }
32: extern PetscErrorCode VecView_MPI_Draw(Vec,PetscViewer);
34: static PetscErrorCode VecPlaceArray_MPI(Vec vin,const PetscScalar *a)
35: {
37: Vec_MPI *v = (Vec_MPI*)vin->data;
40: if (v->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"VecPlaceArray() was already called on this vector, without a call to VecResetArray()");
41: v->unplacedarray = v->array; /* save previous array so reset can bring it back */
42: v->array = (PetscScalar*)a;
43: if (v->localrep) {
44: VecPlaceArray(v->localrep,a);
45: }
46: return(0);
47: }
49: static PetscErrorCode VecDuplicate_MPI(Vec win,Vec *v)
50: {
52: Vec_MPI *vw,*w = (Vec_MPI*)win->data;
53: PetscScalar *array;
56: VecCreate(PetscObjectComm((PetscObject)win),v);
57: PetscLayoutReference(win->map,&(*v)->map);
59: VecCreate_MPI_Private(*v,PETSC_TRUE,w->nghost,0);
60: vw = (Vec_MPI*)(*v)->data;
61: PetscMemcpy((*v)->ops,win->ops,sizeof(struct _VecOps));
63: /* save local representation of the parallel vector (and scatter) if it exists */
64: if (w->localrep) {
65: VecGetArray(*v,&array);
66: VecCreateSeqWithArray(PETSC_COMM_SELF,PetscAbs(win->map->bs),win->map->n+w->nghost,array,&vw->localrep);
67: PetscMemcpy(vw->localrep->ops,w->localrep->ops,sizeof(struct _VecOps));
68: VecRestoreArray(*v,&array);
69: PetscLogObjectParent((PetscObject)*v,(PetscObject)vw->localrep);
71: vw->localupdate = w->localupdate;
72: if (vw->localupdate) {
73: PetscObjectReference((PetscObject)vw->localupdate);
74: }
75: }
77: /* New vector should inherit stashing property of parent */
78: (*v)->stash.donotstash = win->stash.donotstash;
79: (*v)->stash.ignorenegidx = win->stash.ignorenegidx;
81: PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*v))->olist);
82: PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*v))->qlist);
84: (*v)->map->bs = PetscAbs(win->map->bs);
85: (*v)->bstash.bs = win->bstash.bs;
86: return(0);
87: }
90: static PetscErrorCode VecSetOption_MPI(Vec V,VecOption op,PetscBool flag)
91: {
92: Vec_MPI *v = (Vec_MPI*)V->data;
94: switch (op) {
95: case VEC_IGNORE_OFF_PROC_ENTRIES: V->stash.donotstash = flag;
96: break;
97: case VEC_IGNORE_NEGATIVE_INDICES: V->stash.ignorenegidx = flag;
98: break;
99: case VEC_SUBSET_OFF_PROC_ENTRIES: v->assembly_subset = flag;
100: break;
101: }
102: return(0);
103: }
106: static PetscErrorCode VecResetArray_MPI(Vec vin)
107: {
108: Vec_MPI *v = (Vec_MPI*)vin->data;
112: v->array = v->unplacedarray;
113: v->unplacedarray = 0;
114: if (v->localrep) {
115: VecResetArray(v->localrep);
116: }
117: return(0);
118: }
120: static PetscErrorCode VecAssemblySend_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rankid,PetscMPIInt rank,void *sdata,MPI_Request req[],void *ctx)
121: {
122: Vec X = (Vec)ctx;
123: Vec_MPI *x = (Vec_MPI*)X->data;
124: VecAssemblyHeader *hdr = (VecAssemblyHeader*)sdata;
125: PetscInt bs = X->map->bs;
129: /* x->recvhdr only exists when we are reusing a communication network. In that case, some messages can be empty, but
130: * we have to send them this time if we sent them before because the receiver is expecting them. */
131: if (hdr->count || (x->recvhdr && x->sendptrs[rankid].ints)) {
132: MPI_Isend(x->sendptrs[rankid].ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
133: MPI_Isend(x->sendptrs[rankid].scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
134: }
135: if (hdr->bcount || (x->recvhdr && x->sendptrs[rankid].intb)) {
136: MPI_Isend(x->sendptrs[rankid].intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
137: MPI_Isend(x->sendptrs[rankid].scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
138: }
139: return(0);
140: }
142: static PetscErrorCode VecAssemblyRecv_MPI_Private(MPI_Comm comm,const PetscMPIInt tag[],PetscMPIInt rank,void *rdata,MPI_Request req[],void *ctx)
143: {
144: Vec X = (Vec)ctx;
145: Vec_MPI *x = (Vec_MPI*)X->data;
146: VecAssemblyHeader *hdr = (VecAssemblyHeader*)rdata;
148: PetscInt bs = X->map->bs;
149: VecAssemblyFrame *frame;
152: PetscSegBufferGet(x->segrecvframe,1,&frame);
154: if (hdr->count) {
155: PetscSegBufferGet(x->segrecvint,hdr->count,&frame->ints);
156: MPI_Irecv(frame->ints,hdr->count,MPIU_INT,rank,tag[0],comm,&req[0]);
157: PetscSegBufferGet(x->segrecvscalar,hdr->count,&frame->scalars);
158: MPI_Irecv(frame->scalars,hdr->count,MPIU_SCALAR,rank,tag[1],comm,&req[1]);
159: frame->pendings = 2;
160: } else {
161: frame->ints = NULL;
162: frame->scalars = NULL;
163: frame->pendings = 0;
164: }
166: if (hdr->bcount) {
167: PetscSegBufferGet(x->segrecvint,hdr->bcount,&frame->intb);
168: MPI_Irecv(frame->intb,hdr->bcount,MPIU_INT,rank,tag[2],comm,&req[2]);
169: PetscSegBufferGet(x->segrecvscalar,hdr->bcount*bs,&frame->scalarb);
170: MPI_Irecv(frame->scalarb,hdr->bcount*bs,MPIU_SCALAR,rank,tag[3],comm,&req[3]);
171: frame->pendingb = 2;
172: } else {
173: frame->intb = NULL;
174: frame->scalarb = NULL;
175: frame->pendingb = 0;
176: }
177: return(0);
178: }
180: static PetscErrorCode VecAssemblyBegin_MPI_BTS(Vec X)
181: {
182: Vec_MPI *x = (Vec_MPI*)X->data;
184: MPI_Comm comm;
185: PetscInt i,j,jb,bs;
188: if (X->stash.donotstash) return(0);
190: PetscObjectGetComm((PetscObject)X,&comm);
191: VecGetBlockSize(X,&bs);
192: #if defined(PETSC_USE_DEBUG)
193: {
194: InsertMode addv;
195: MPIU_Allreduce((PetscEnum*)&X->stash.insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,comm);
196: if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(comm,PETSC_ERR_ARG_NOTSAMETYPE,"Some processors inserted values while others added");
197: }
198: #endif
199: X->bstash.insertmode = X->stash.insertmode; /* Block stash implicitly tracks InsertMode of scalar stash */
201: VecStashSortCompress_Private(&X->stash);
202: VecStashSortCompress_Private(&X->bstash);
204: if (!x->sendranks) {
205: PetscMPIInt nowners,bnowners,*owners,*bowners;
206: PetscInt ntmp;
207: VecStashGetOwnerList_Private(&X->stash,X->map,&nowners,&owners);
208: VecStashGetOwnerList_Private(&X->bstash,X->map,&bnowners,&bowners);
209: PetscMergeMPIIntArray(nowners,owners,bnowners,bowners,&ntmp,&x->sendranks);
210: x->nsendranks = ntmp;
211: PetscFree(owners);
212: PetscFree(bowners);
213: PetscMalloc1(x->nsendranks,&x->sendhdr);
214: PetscCalloc1(x->nsendranks,&x->sendptrs);
215: }
216: for (i=0,j=0,jb=0; i<x->nsendranks; i++) {
217: PetscMPIInt rank = x->sendranks[i];
218: x->sendhdr[i].insertmode = X->stash.insertmode;
219: /* Initialize pointers for non-empty stashes the first time around. Subsequent assemblies with
220: * VEC_SUBSET_OFF_PROC_ENTRIES will leave the old pointers (dangling because the stash has been collected) when
221: * there is nothing new to send, so that size-zero messages get sent instead. */
222: x->sendhdr[i].count = 0;
223: if (X->stash.n) {
224: x->sendptrs[i].ints = &X->stash.idx[j];
225: x->sendptrs[i].scalars = &X->stash.array[j];
226: for ( ; j<X->stash.n && X->stash.idx[j] < X->map->range[rank+1]; j++) x->sendhdr[i].count++;
227: }
228: x->sendhdr[i].bcount = 0;
229: if (X->bstash.n) {
230: x->sendptrs[i].intb = &X->bstash.idx[jb];
231: x->sendptrs[i].scalarb = &X->bstash.array[jb*bs];
232: for ( ; jb<X->bstash.n && X->bstash.idx[jb]*bs < X->map->range[rank+1]; jb++) x->sendhdr[i].bcount++;
233: }
234: }
236: if (!x->segrecvint) {PetscSegBufferCreate(sizeof(PetscInt),1000,&x->segrecvint);}
237: if (!x->segrecvscalar) {PetscSegBufferCreate(sizeof(PetscScalar),1000,&x->segrecvscalar);}
238: if (!x->segrecvframe) {PetscSegBufferCreate(sizeof(VecAssemblyFrame),50,&x->segrecvframe);}
239: if (x->recvhdr) { /* VEC_SUBSET_OFF_PROC_ENTRIES and this is not the first assembly */
240: PetscMPIInt tag[4];
241: if (!x->assembly_subset) SETERRQ(comm,PETSC_ERR_PLIB,"Attempt to reuse rendezvous when not VEC_SUBSET_OFF_PROC_ENTRIES");
242: for (i=0; i<4; i++) {PetscCommGetNewTag(comm,&tag[i]);}
243: for (i=0; i<x->nsendranks; i++) {
244: VecAssemblySend_MPI_Private(comm,tag,i,x->sendranks[i],x->sendhdr+i,x->sendreqs+4*i,X);
245: }
246: for (i=0; i<x->nrecvranks; i++) {
247: VecAssemblyRecv_MPI_Private(comm,tag,x->recvranks[i],x->recvhdr+i,x->recvreqs+4*i,X);
248: }
249: x->use_status = PETSC_TRUE;
250: } else { /* First time */
251: PetscCommBuildTwoSidedFReq(comm,3,MPIU_INT,x->nsendranks,x->sendranks,(PetscInt*)x->sendhdr,&x->nrecvranks,&x->recvranks,&x->recvhdr,4,&x->sendreqs,&x->recvreqs,VecAssemblySend_MPI_Private,VecAssemblyRecv_MPI_Private,X);
252: x->use_status = PETSC_FALSE;
253: }
255: {
256: PetscInt nstash,reallocs;
257: VecStashGetInfo_Private(&X->stash,&nstash,&reallocs);
258: PetscInfo2(X,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
259: VecStashGetInfo_Private(&X->bstash,&nstash,&reallocs);
260: PetscInfo2(X,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
261: }
262: return(0);
263: }
265: static PetscErrorCode VecAssemblyEnd_MPI_BTS(Vec X)
266: {
267: Vec_MPI *x = (Vec_MPI*)X->data;
268: PetscInt bs = X->map->bs;
269: PetscMPIInt npending,*some_indices,r;
270: MPI_Status *some_statuses;
271: PetscScalar *xarray;
273: VecAssemblyFrame *frame;
276: if (X->stash.donotstash) {
277: X->stash.insertmode = NOT_SET_VALUES;
278: X->bstash.insertmode = NOT_SET_VALUES;
279: return(0);
280: }
282: VecGetArray(X,&xarray);
283: PetscSegBufferExtractInPlace(x->segrecvframe,&frame);
284: PetscMalloc2(4*x->nrecvranks,&some_indices,x->use_status?4*x->nrecvranks:0,&some_statuses);
285: for (r=0,npending=0; r<x->nrecvranks; r++) npending += frame[r].pendings + frame[r].pendingb;
286: while (npending>0) {
287: PetscMPIInt ndone=0,ii;
288: /* Filling MPI_Status fields requires some resources from the MPI library. We skip it on the first assembly, or
289: * when VEC_SUBSET_OFF_PROC_ENTRIES has not been set, because we could exchange exact sizes in the initial
290: * rendezvous. When the rendezvous is elided, however, we use MPI_Status to get actual message lengths, so that
291: * subsequent assembly can set a proper subset of the values. */
292: MPI_Waitsome(4*x->nrecvranks,x->recvreqs,&ndone,some_indices,x->use_status?some_statuses:MPI_STATUSES_IGNORE);
293: for (ii=0; ii<ndone; ii++) {
294: PetscInt i = some_indices[ii]/4,j,k;
295: InsertMode imode = (InsertMode)x->recvhdr[i].insertmode;
296: PetscInt *recvint;
297: PetscScalar *recvscalar;
298: PetscBool intmsg = (PetscBool)(some_indices[ii]%2 == 0);
299: PetscBool blockmsg = (PetscBool)((some_indices[ii]%4)/2 == 1);
300: npending--;
301: if (!blockmsg) { /* Scalar stash */
302: PetscMPIInt count;
303: if (--frame[i].pendings > 0) continue;
304: if (x->use_status) {
305: MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
306: } else count = x->recvhdr[i].count;
307: for (j=0,recvint=frame[i].ints,recvscalar=frame[i].scalars; j<count; j++,recvint++) {
308: PetscInt loc = *recvint - X->map->rstart;
309: if (*recvint < X->map->rstart || X->map->rend <= *recvint) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Received vector entry %D out of local range [%D,%D)]",*recvint,X->map->rstart,X->map->rend);
310: switch (imode) {
311: case ADD_VALUES:
312: xarray[loc] += *recvscalar++;
313: break;
314: case INSERT_VALUES:
315: xarray[loc] = *recvscalar++;
316: break;
317: default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
318: }
319: }
320: } else { /* Block stash */
321: PetscMPIInt count;
322: if (--frame[i].pendingb > 0) continue;
323: if (x->use_status) {
324: MPI_Get_count(&some_statuses[ii],intmsg ? MPIU_INT : MPIU_SCALAR,&count);
325: if (!intmsg) count /= bs; /* Convert from number of scalars to number of blocks */
326: } else count = x->recvhdr[i].bcount;
327: for (j=0,recvint=frame[i].intb,recvscalar=frame[i].scalarb; j<count; j++,recvint++) {
328: PetscInt loc = (*recvint)*bs - X->map->rstart;
329: switch (imode) {
330: case ADD_VALUES:
331: for (k=loc; k<loc+bs; k++) xarray[k] += *recvscalar++;
332: break;
333: case INSERT_VALUES:
334: for (k=loc; k<loc+bs; k++) xarray[k] = *recvscalar++;
335: break;
336: default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Insert mode not supported 0x%x",imode);
337: }
338: }
339: }
340: }
341: }
342: VecRestoreArray(X,&xarray);
343: MPI_Waitall(4*x->nsendranks,x->sendreqs,MPI_STATUSES_IGNORE);
344: PetscFree2(some_indices,some_statuses);
345: if (x->assembly_subset) {
346: void *dummy; /* reset segbuffers */
347: PetscSegBufferExtractInPlace(x->segrecvint,&dummy);
348: PetscSegBufferExtractInPlace(x->segrecvscalar,&dummy);
349: } else {
350: VecAssemblyReset_MPI(X);
351: }
353: X->stash.insertmode = NOT_SET_VALUES;
354: X->bstash.insertmode = NOT_SET_VALUES;
355: VecStashScatterEnd_Private(&X->stash);
356: VecStashScatterEnd_Private(&X->bstash);
357: return(0);
358: }
360: PetscErrorCode VecAssemblyReset_MPI(Vec X)
361: {
362: Vec_MPI *x = (Vec_MPI*)X->data;
366: PetscFree(x->sendreqs);
367: PetscFree(x->recvreqs);
368: PetscFree(x->sendranks);
369: PetscFree(x->recvranks);
370: PetscFree(x->sendhdr);
371: PetscFree(x->recvhdr);
372: PetscFree(x->sendptrs);
373: PetscSegBufferDestroy(&x->segrecvint);
374: PetscSegBufferDestroy(&x->segrecvscalar);
375: PetscSegBufferDestroy(&x->segrecvframe);
376: return(0);
377: }
380: static PetscErrorCode VecSetFromOptions_MPI(PetscOptionItems *PetscOptionsObject,Vec X)
381: {
382: #if !defined(PETSC_HAVE_MPIUNI)
384: PetscBool flg = PETSC_FALSE,set;
387: PetscOptionsHead(PetscOptionsObject,"VecMPI Options");
388: PetscOptionsBool("-vec_assembly_legacy","Use MPI 1 version of assembly","",flg,&flg,&set);
389: if (set) {
390: X->ops->assemblybegin = flg ? VecAssemblyBegin_MPI : VecAssemblyBegin_MPI_BTS;
391: X->ops->assemblyend = flg ? VecAssemblyEnd_MPI : VecAssemblyEnd_MPI_BTS;
392: }
393: PetscOptionsTail();
394: #else
395: X->ops->assemblybegin = VecAssemblyBegin_MPI;
396: X->ops->assemblyend = VecAssemblyEnd_MPI;
397: #endif
398: return(0);
399: }
402: static struct _VecOps DvOps = { VecDuplicate_MPI, /* 1 */
403: VecDuplicateVecs_Default,
404: VecDestroyVecs_Default,
405: VecDot_MPI,
406: VecMDot_MPI,
407: VecNorm_MPI,
408: VecTDot_MPI,
409: VecMTDot_MPI,
410: VecScale_Seq,
411: VecCopy_Seq, /* 10 */
412: VecSet_Seq,
413: VecSwap_Seq,
414: VecAXPY_Seq,
415: VecAXPBY_Seq,
416: VecMAXPY_Seq,
417: VecAYPX_Seq,
418: VecWAXPY_Seq,
419: VecAXPBYPCZ_Seq,
420: VecPointwiseMult_Seq,
421: VecPointwiseDivide_Seq,
422: VecSetValues_MPI, /* 20 */
423: VecAssemblyBegin_MPI_BTS,
424: VecAssemblyEnd_MPI_BTS,
425: 0,
426: VecGetSize_MPI,
427: VecGetSize_Seq,
428: 0,
429: VecMax_MPI,
430: VecMin_MPI,
431: VecSetRandom_Seq,
432: VecSetOption_MPI,
433: VecSetValuesBlocked_MPI,
434: VecDestroy_MPI,
435: VecView_MPI,
436: VecPlaceArray_MPI,
437: VecReplaceArray_Seq,
438: VecDot_Seq,
439: VecTDot_Seq,
440: VecNorm_Seq,
441: VecMDot_Seq,
442: VecMTDot_Seq,
443: VecLoad_Default,
444: VecReciprocal_Default,
445: VecConjugate_Seq,
446: 0,
447: 0,
448: VecResetArray_MPI,
449: VecSetFromOptions_MPI,/*set from options */
450: VecMaxPointwiseDivide_Seq,
451: VecPointwiseMax_Seq,
452: VecPointwiseMaxAbs_Seq,
453: VecPointwiseMin_Seq,
454: VecGetValues_MPI,
455: 0,
456: 0,
457: 0,
458: 0,
459: 0,
460: 0,
461: VecStrideGather_Default,
462: VecStrideScatter_Default,
463: 0,
464: 0,
465: 0,
466: 0,
467: 0,
468: VecStrideSubSetGather_Default,
469: VecStrideSubSetScatter_Default,
470: 0,
471: 0
472: };
474: /*
475: VecCreate_MPI_Private - Basic create routine called by VecCreate_MPI() (i.e. VecCreateMPI()),
476: VecCreateMPIWithArray(), VecCreate_Shared() (i.e. VecCreateShared()), VecCreateGhost(),
477: VecDuplicate_MPI(), VecCreateGhostWithArray(), VecDuplicate_MPI(), and VecDuplicate_Shared()
479: If alloc is true and array is NULL then this routine allocates the space, otherwise
480: no space is allocated.
481: */
482: PetscErrorCode VecCreate_MPI_Private(Vec v,PetscBool alloc,PetscInt nghost,const PetscScalar array[])
483: {
484: Vec_MPI *s;
488: PetscNewLog(v,&s);
489: v->data = (void*)s;
490: PetscMemcpy(v->ops,&DvOps,sizeof(DvOps));
491: s->nghost = nghost;
492: v->petscnative = PETSC_TRUE;
494: PetscLayoutSetUp(v->map);
496: s->array = (PetscScalar*)array;
497: s->array_allocated = 0;
498: if (alloc && !array) {
499: PetscInt n = v->map->n+nghost;
500: PetscMalloc1(n,&s->array);
501: PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));
502: PetscMemzero(s->array,n*sizeof(PetscScalar));
503: s->array_allocated = s->array;
504: }
506: /* By default parallel vectors do not have local representation */
507: s->localrep = 0;
508: s->localupdate = 0;
510: v->stash.insertmode = NOT_SET_VALUES;
511: v->bstash.insertmode = NOT_SET_VALUES;
512: /* create the stashes. The block-size for bstash is set later when
513: VecSetValuesBlocked is called.
514: */
515: VecStashCreate_Private(PetscObjectComm((PetscObject)v),1,&v->stash);
516: VecStashCreate_Private(PetscObjectComm((PetscObject)v),PetscAbs(v->map->bs),&v->bstash);
518: #if defined(PETSC_HAVE_MATLAB_ENGINE)
519: PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEnginePut_C",VecMatlabEnginePut_Default);
520: PetscObjectComposeFunction((PetscObject)v,"PetscMatlabEngineGet_C",VecMatlabEngineGet_Default);
521: #endif
522: PetscObjectChangeTypeName((PetscObject)v,VECMPI);
523: return(0);
524: }
526: /*MC
527: VECMPI - VECMPI = "mpi" - The basic parallel vector
529: Options Database Keys:
530: . -vec_type mpi - sets the vector type to VECMPI during a call to VecSetFromOptions()
532: Level: beginner
534: .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateMpiWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateMpi()
535: M*/
537: PETSC_EXTERN PetscErrorCode VecCreate_MPI(Vec vv)
538: {
542: VecCreate_MPI_Private(vv,PETSC_TRUE,0,0);
543: return(0);
544: }
546: /*MC
547: VECSTANDARD = "standard" - A VECSEQ on one process and VECMPI on more than one process
549: Options Database Keys:
550: . -vec_type standard - sets a vector type to standard on calls to VecSetFromOptions()
552: Level: beginner
554: .seealso: VecCreateSeq(), VecCreateMPI()
555: M*/
557: PETSC_EXTERN PetscErrorCode VecCreate_Standard(Vec v)
558: {
560: PetscMPIInt size;
563: MPI_Comm_size(PetscObjectComm((PetscObject)v),&size);
564: if (size == 1) {
565: VecSetType(v,VECSEQ);
566: } else {
567: VecSetType(v,VECMPI);
568: }
569: return(0);
570: }
572: /*@C
573: VecCreateMPIWithArray - Creates a parallel, array-style vector,
574: where the user provides the array space to store the vector values.
576: Collective on MPI_Comm
578: Input Parameters:
579: + comm - the MPI communicator to use
580: . bs - block size, same meaning as VecSetBlockSize()
581: . n - local vector length, cannot be PETSC_DECIDE
582: . N - global vector length (or PETSC_DECIDE to have calculated)
583: - array - the user provided array to store the vector values
585: Output Parameter:
586: . vv - the vector
588: Notes:
589: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
590: same type as an existing vector.
592: If the user-provided array is NULL, then VecPlaceArray() can be used
593: at a later stage to SET the array for storing the vector values.
595: PETSc does NOT free the array when the vector is destroyed via VecDestroy().
596: The user should not free the array until the vector is destroyed.
598: Level: intermediate
600: Concepts: vectors^creating with array
602: .seealso: VecCreateSeqWithArray(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost(),
603: VecCreateMPI(), VecCreateGhostWithArray(), VecPlaceArray()
605: @*/
606: PetscErrorCode VecCreateMPIWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,const PetscScalar array[],Vec *vv)
607: {
611: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size of vector");
612: PetscSplitOwnership(comm,&n,&N);
613: VecCreate(comm,vv);
614: VecSetSizes(*vv,n,N);
615: VecSetBlockSize(*vv,bs);
616: VecCreate_MPI_Private(*vv,PETSC_FALSE,0,array);
617: return(0);
618: }
620: /*@C
621: VecCreateGhostWithArray - Creates a parallel vector with ghost padding on each processor;
622: the caller allocates the array space.
624: Collective on MPI_Comm
626: Input Parameters:
627: + comm - the MPI communicator to use
628: . n - local vector length
629: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
630: . nghost - number of local ghost points
631: . ghosts - global indices of ghost points (or NULL if not needed), these do not need to be in increasing order (sorted)
632: - array - the space to store the vector values (as long as n + nghost)
634: Output Parameter:
635: . vv - the global vector representation (without ghost points as part of vector)
637: Notes:
638: Use VecGhostGetLocalForm() to access the local, ghosted representation
639: of the vector.
641: This also automatically sets the ISLocalToGlobalMapping() for this vector.
643: Level: advanced
645: Concepts: vectors^creating with array
646: Concepts: vectors^ghosted
648: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
649: VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
650: VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()
652: @*/
653: PetscErrorCode VecCreateGhostWithArray(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
654: {
655: PetscErrorCode ierr;
656: Vec_MPI *w;
657: PetscScalar *larray;
658: IS from,to;
659: ISLocalToGlobalMapping ltog;
660: PetscInt rstart,i,*indices;
663: *vv = 0;
665: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
666: if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
667: if (nghost < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
668: PetscSplitOwnership(comm,&n,&N);
669: /* Create global representation */
670: VecCreate(comm,vv);
671: VecSetSizes(*vv,n,N);
672: VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost,array);
673: w = (Vec_MPI*)(*vv)->data;
674: /* Create local representation */
675: VecGetArray(*vv,&larray);
676: VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
677: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
678: VecRestoreArray(*vv,&larray);
680: /*
681: Create scatter context for scattering (updating) ghost values
682: */
683: ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
684: ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
685: VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
686: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
687: ISDestroy(&to);
688: ISDestroy(&from);
690: /* set local to global mapping for ghosted vector */
691: PetscMalloc1(n+nghost,&indices);
692: VecGetOwnershipRange(*vv,&rstart,NULL);
693: for (i=0; i<n; i++) {
694: indices[i] = rstart + i;
695: }
696: for (i=0; i<nghost; i++) {
697: indices[n+i] = ghosts[i];
698: }
699: ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,<og);
700: VecSetLocalToGlobalMapping(*vv,ltog);
701: ISLocalToGlobalMappingDestroy(<og);
702: return(0);
703: }
705: /*@
706: VecCreateGhost - Creates a parallel vector with ghost padding on each processor.
708: Collective on MPI_Comm
710: Input Parameters:
711: + comm - the MPI communicator to use
712: . n - local vector length
713: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
714: . nghost - number of local ghost points
715: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
717: Output Parameter:
718: . vv - the global vector representation (without ghost points as part of vector)
720: Notes:
721: Use VecGhostGetLocalForm() to access the local, ghosted representation
722: of the vector.
724: This also automatically sets the ISLocalToGlobalMapping() for this vector.
726: Level: advanced
728: Concepts: vectors^ghosted
730: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
731: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
732: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
733: VecCreateGhostBlock(), VecCreateGhostBlockWithArray(), VecMPISetGhost()
735: @*/
736: PetscErrorCode VecCreateGhost(MPI_Comm comm,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
737: {
741: VecCreateGhostWithArray(comm,n,N,nghost,ghosts,0,vv);
742: return(0);
743: }
745: /*@
746: VecMPISetGhost - Sets the ghost points for an MPI ghost vector
748: Collective on Vec
750: Input Parameters:
751: + vv - the MPI vector
752: . nghost - number of local ghost points
753: - ghosts - global indices of ghost points, these do not need to be in increasing order (sorted)
756: Notes:
757: Use VecGhostGetLocalForm() to access the local, ghosted representation
758: of the vector.
760: This also automatically sets the ISLocalToGlobalMapping() for this vector.
762: You must call this AFTER you have set the type of the vector (with VecSetType()) and the size (with VecSetSizes()).
764: Level: advanced
766: Concepts: vectors^ghosted
768: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
769: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(), VecGhostUpdateBegin(),
770: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecGhostUpdateEnd(),
771: VecCreateGhostBlock(), VecCreateGhostBlockWithArray()
773: @*/
774: PetscErrorCode VecMPISetGhost(Vec vv,PetscInt nghost,const PetscInt ghosts[])
775: {
777: PetscBool flg;
780: PetscObjectTypeCompare((PetscObject)vv,VECMPI,&flg);
781: /* if already fully existant VECMPI then basically destroy it and rebuild with ghosting */
782: if (flg) {
783: PetscInt n,N;
784: Vec_MPI *w;
785: PetscScalar *larray;
786: IS from,to;
787: ISLocalToGlobalMapping ltog;
788: PetscInt rstart,i,*indices;
789: MPI_Comm comm;
791: PetscObjectGetComm((PetscObject)vv,&comm);
792: n = vv->map->n;
793: N = vv->map->N;
794: (*vv->ops->destroy)(vv);
795: VecSetSizes(vv,n,N);
796: VecCreate_MPI_Private(vv,PETSC_TRUE,nghost,NULL);
797: w = (Vec_MPI*)(vv)->data;
798: /* Create local representation */
799: VecGetArray(vv,&larray);
800: VecCreateSeqWithArray(PETSC_COMM_SELF,1,n+nghost,larray,&w->localrep);
801: PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localrep);
802: VecRestoreArray(vv,&larray);
804: /*
805: Create scatter context for scattering (updating) ghost values
806: */
807: ISCreateGeneral(comm,nghost,ghosts,PETSC_COPY_VALUES,&from);
808: ISCreateStride(PETSC_COMM_SELF,nghost,n,1,&to);
809: VecScatterCreate(vv,from,w->localrep,to,&w->localupdate);
810: PetscLogObjectParent((PetscObject)vv,(PetscObject)w->localupdate);
811: ISDestroy(&to);
812: ISDestroy(&from);
814: /* set local to global mapping for ghosted vector */
815: PetscMalloc1(n+nghost,&indices);
816: VecGetOwnershipRange(vv,&rstart,NULL);
818: for (i=0; i<n; i++) indices[i] = rstart + i;
819: for (i=0; i<nghost; i++) indices[n+i] = ghosts[i];
821: ISLocalToGlobalMappingCreate(comm,1,n+nghost,indices,PETSC_OWN_POINTER,<og);
822: VecSetLocalToGlobalMapping(vv,ltog);
823: ISLocalToGlobalMappingDestroy(<og);
824: } else if (vv->ops->create == VecCreate_MPI) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set local or global size before setting ghosting");
825: else if (!((PetscObject)vv)->type_name) SETERRQ(PetscObjectComm((PetscObject)vv),PETSC_ERR_ARG_WRONGSTATE,"Must set type to VECMPI before ghosting");
826: return(0);
827: }
830: /* ------------------------------------------------------------------------------------------*/
831: /*@C
832: VecCreateGhostBlockWithArray - Creates a parallel vector with ghost padding on each processor;
833: the caller allocates the array space. Indices in the ghost region are based on blocks.
835: Collective on MPI_Comm
837: Input Parameters:
838: + comm - the MPI communicator to use
839: . bs - block size
840: . n - local vector length
841: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
842: . nghost - number of local ghost blocks
843: . ghosts - global indices of ghost blocks (or NULL if not needed), counts are by block not by index, these do not need to be in increasing order (sorted)
844: - array - the space to store the vector values (as long as n + nghost*bs)
846: Output Parameter:
847: . vv - the global vector representation (without ghost points as part of vector)
849: Notes:
850: Use VecGhostGetLocalForm() to access the local, ghosted representation
851: of the vector.
853: n is the local vector size (total local size not the number of blocks) while nghost
854: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
855: portion is bs*nghost
857: Level: advanced
859: Concepts: vectors^creating ghosted
860: Concepts: vectors^creating with array
862: .seealso: VecCreate(), VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
863: VecCreateGhost(), VecCreateSeqWithArray(), VecCreateMPIWithArray(),
864: VecCreateGhostWithArray(), VecCreateGhostBlock()
866: @*/
867: PetscErrorCode VecCreateGhostBlockWithArray(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],const PetscScalar array[],Vec *vv)
868: {
869: PetscErrorCode ierr;
870: Vec_MPI *w;
871: PetscScalar *larray;
872: IS from,to;
873: ISLocalToGlobalMapping ltog;
874: PetscInt rstart,i,nb,*indices;
877: *vv = 0;
879: if (n == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local size");
880: if (nghost == PETSC_DECIDE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Must set local ghost size");
881: if (nghost < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ghost length must be >= 0");
882: if (n % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Local size must be a multiple of block size");
883: PetscSplitOwnership(comm,&n,&N);
884: /* Create global representation */
885: VecCreate(comm,vv);
886: VecSetSizes(*vv,n,N);
887: VecSetBlockSize(*vv,bs);
888: VecCreate_MPI_Private(*vv,PETSC_TRUE,nghost*bs,array);
889: w = (Vec_MPI*)(*vv)->data;
890: /* Create local representation */
891: VecGetArray(*vv,&larray);
892: VecCreateSeqWithArray(PETSC_COMM_SELF,bs,n+bs*nghost,larray,&w->localrep);
893: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localrep);
894: VecRestoreArray(*vv,&larray);
896: /*
897: Create scatter context for scattering (updating) ghost values
898: */
899: ISCreateBlock(comm,bs,nghost,ghosts,PETSC_COPY_VALUES,&from);
900: ISCreateStride(PETSC_COMM_SELF,bs*nghost,n,1,&to);
901: VecScatterCreate(*vv,from,w->localrep,to,&w->localupdate);
902: PetscLogObjectParent((PetscObject)*vv,(PetscObject)w->localupdate);
903: ISDestroy(&to);
904: ISDestroy(&from);
906: /* set local to global mapping for ghosted vector */
907: nb = n/bs;
908: PetscMalloc1(nb+nghost,&indices);
909: VecGetOwnershipRange(*vv,&rstart,NULL);
910: rstart = rstart/bs;
912: for (i=0; i<nb; i++) indices[i] = rstart + i;
913: for (i=0; i<nghost; i++) indices[nb+i] = ghosts[i];
915: ISLocalToGlobalMappingCreate(comm,bs,nb+nghost,indices,PETSC_OWN_POINTER,<og);
916: VecSetLocalToGlobalMapping(*vv,ltog);
917: ISLocalToGlobalMappingDestroy(<og);
918: return(0);
919: }
921: /*@
922: VecCreateGhostBlock - Creates a parallel vector with ghost padding on each processor.
923: The indicing of the ghost points is done with blocks.
925: Collective on MPI_Comm
927: Input Parameters:
928: + comm - the MPI communicator to use
929: . bs - the block size
930: . n - local vector length
931: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
932: . nghost - number of local ghost blocks
933: - ghosts - global indices of ghost blocks, counts are by block, not by individual index, these do not need to be in increasing order (sorted)
935: Output Parameter:
936: . vv - the global vector representation (without ghost points as part of vector)
938: Notes:
939: Use VecGhostGetLocalForm() to access the local, ghosted representation
940: of the vector.
942: n is the local vector size (total local size not the number of blocks) while nghost
943: is the number of blocks in the ghost portion, i.e. the number of elements in the ghost
944: portion is bs*nghost
946: Level: advanced
948: Concepts: vectors^ghosted
950: .seealso: VecCreateSeq(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateMPI(),
951: VecGhostGetLocalForm(), VecGhostRestoreLocalForm(),
952: VecCreateGhostWithArray(), VecCreateMPIWithArray(), VecCreateGhostBlockWithArray()
954: @*/
955: PetscErrorCode VecCreateGhostBlock(MPI_Comm comm,PetscInt bs,PetscInt n,PetscInt N,PetscInt nghost,const PetscInt ghosts[],Vec *vv)
956: {
960: VecCreateGhostBlockWithArray(comm,bs,n,N,nghost,ghosts,0,vv);
961: return(0);
962: }