Actual source code: vscat.c
1: #define PETSCVEC_DLL
2: /*
3: Code for creating scatters between vectors. This file
4: includes the code for scattering between sequential vectors and
5: some special cases for parallel scatters.
6: */
8: #include include/private/isimpl.h
9: #include include/private/vecimpl.h
11: /* Logging support */
12: PetscCookie VEC_SCATTER_COOKIE = 0;
14: #if defined(PETSC_USE_DEBUG)
15: /*
16: Checks if any indices are less than zero and generates an error
17: */
20: static PetscErrorCode VecScatterCheckIndices_Private(PetscInt nmax,PetscInt n,const PetscInt *idx)
21: {
22: PetscInt i;
25: for (i=0; i<n; i++) {
26: if (idx[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Negative index %D at %D location",idx[i],i);
27: if (idx[i] >= nmax) SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Index %D at %D location greater than max %D",idx[i],i,nmax);
28: }
29: return(0);
30: }
31: #endif
33: /*
34: This is special scatter code for when the entire parallel vector is copied to each processor.
36: This code was written by Cameron Cooper, Occidental College, Fall 1995,
37: will working at ANL as a SERS student.
38: */
41: PetscErrorCode VecScatterBegin_MPI_ToAll(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
42: {
43: #if defined(PETSC_USE_64BIT_INDICES)
45: SETERRQ(PETSC_ERR_SUP,"Not implemented due to limited MPI support for extremely long gathers");
46: #else
48: PetscInt yy_n,xx_n;
49: PetscScalar *xv,*yv;
52: VecGetLocalSize(y,&yy_n);
53: VecGetLocalSize(x,&xx_n);
54: VecGetArray(y,&yv);
55: if (x != y) {VecGetArray(x,&xv);}
57: if (mode & SCATTER_REVERSE) {
58: PetscScalar *xvt,*xvt2;
59: VecScatter_MPI_ToAll *scat = (VecScatter_MPI_ToAll*)ctx->todata;
60: PetscInt i;
62: if (addv == INSERT_VALUES) {
63: PetscInt rstart,rend;
64: /*
65: copy the correct part of the local vector into the local storage of
66: the MPI one. Note: this operation only makes sense if all the local
67: vectors have the same values
68: */
69: VecGetOwnershipRange(y,&rstart,&rend);
70: PetscMemcpy(yv,xv+rstart,yy_n*sizeof(PetscScalar));
71: } else {
72: MPI_Comm comm;
73: PetscMPIInt rank;
74: PetscObjectGetComm((PetscObject)y,&comm);
75: MPI_Comm_rank(comm,&rank);
76: if (scat->work1) xvt = scat->work1;
77: else {
78: PetscMalloc(xx_n*sizeof(PetscScalar),&xvt);
79: scat->work1 = xvt;
80: }
81: if (!rank) { /* I am the zeroth processor, values are accumulated here */
82: if (scat->work2) xvt2 = scat->work2;
83: else {
84: PetscMalloc(xx_n*sizeof(PetscScalar),& xvt2);
85: scat->work2 = xvt2;
86: }
87: MPI_Gatherv(yv,yy_n,MPIU_SCALAR,xvt2,scat->count,y->map.range,MPIU_SCALAR,0,ctx->comm);
88: #if defined(PETSC_USE_COMPLEX)
89: MPI_Reduce(xv,xvt,2*xx_n,MPIU_REAL,MPI_SUM,0,ctx->comm);
90: #else
91: MPI_Reduce(xv,xvt,xx_n,MPIU_SCALAR,MPI_SUM,0,ctx->comm);
92: #endif
93: if (addv == ADD_VALUES) {
94: for (i=0; i<xx_n; i++) {
95: xvt[i] += xvt2[i];
96: }
97: #if !defined(PETSC_USE_COMPLEX)
98: } else if (addv == MAX_VALUES) {
99: for (i=0; i<xx_n; i++) {
100: xvt[i] = PetscMax(xvt[i],xvt2[i]);
101: }
102: #endif
103: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
104: MPI_Scatterv(xvt,scat->count,y->map.range,MPIU_SCALAR,yv,yy_n,MPIU_SCALAR,0,ctx->comm);
105: } else {
106: MPI_Gatherv(yv,yy_n,MPIU_SCALAR,0, 0,0,MPIU_SCALAR,0,ctx->comm);
107: #if defined(PETSC_USE_COMPLEX)
108: MPI_Reduce(xv,xvt,2*xx_n,MPIU_REAL,MPI_SUM,0,ctx->comm);
109: #else
110: MPI_Reduce(xv,xvt,xx_n,MPIU_SCALAR,MPI_SUM,0,ctx->comm);
111: #endif
112: MPI_Scatterv(0,scat->count,y->map.range,MPIU_SCALAR,yv,yy_n,MPIU_SCALAR,0,ctx->comm);
113: }
114: }
115: } else {
116: PetscScalar *yvt;
117: VecScatter_MPI_ToAll *scat = (VecScatter_MPI_ToAll*)ctx->todata;
118: PetscInt i;
120: if (addv == INSERT_VALUES) {
121: MPI_Allgatherv(xv,xx_n,MPIU_SCALAR,yv,scat->count,x->map.range,MPIU_SCALAR,ctx->comm);
122: } else {
123: if (scat->work1) yvt = scat->work1;
124: else {
125: PetscMalloc(yy_n*sizeof(PetscScalar),&yvt);
126: scat->work1 = yvt;
127: }
128: MPI_Allgatherv(xv,xx_n,MPIU_SCALAR,yvt,scat->count,x->map.range,MPIU_SCALAR,ctx->comm);
129: if (addv == ADD_VALUES){
130: for (i=0; i<yy_n; i++) {
131: yv[i] += yvt[i];
132: }
133: #if !defined(PETSC_USE_COMPLEX)
134: } else if (addv == MAX_VALUES) {
135: for (i=0; i<yy_n; i++) {
136: yv[i] = PetscMax(yv[i],yvt[i]);
137: }
138: #endif
139: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
140: }
141: }
142: VecRestoreArray(y,&yv);
143: if (x != y) {VecRestoreArray(x,&xv);}
144: #endif
145: return(0);
146: }
148: /*
149: This is special scatter code for when the entire parallel vector is copied to processor 0.
151: */
154: PetscErrorCode VecScatterBegin_MPI_ToOne(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
155: {
156: #if defined(PETSC_USE_64BIT_INDICES)
158: SETERRQ(PETSC_ERR_SUP,"Not implemented due to limited MPI support for extremely long gathers");
159: #else
161: PetscMPIInt rank;
162: PetscInt yy_n,xx_n;
163: PetscScalar *xv,*yv;
164: MPI_Comm comm;
167: VecGetLocalSize(y,&yy_n);
168: VecGetLocalSize(x,&xx_n);
169: VecGetArray(x,&xv);
170: VecGetArray(y,&yv);
172: PetscObjectGetComm((PetscObject)x,&comm);
173: MPI_Comm_rank(comm,&rank);
175: /* -------- Reverse scatter; spread from processor 0 to other processors */
176: if (mode & SCATTER_REVERSE) {
177: PetscScalar *yvt;
178: VecScatter_MPI_ToAll *scat = (VecScatter_MPI_ToAll*)ctx->todata;
179: PetscInt i;
181: if (addv == INSERT_VALUES) {
182: MPI_Scatterv(xv,scat->count,y->map.range,MPIU_SCALAR,yv,yy_n,MPIU_SCALAR,0,ctx->comm);
183: } else {
184: if (scat->work2) yvt = scat->work2;
185: else {
186: PetscMalloc(xx_n*sizeof(PetscScalar),&yvt);
187: scat->work2 = yvt;
188: }
189: MPI_Scatterv(xv,scat->count,y->map.range,MPIU_SCALAR,yvt,yy_n,MPIU_SCALAR,0,ctx->comm);
190: if (addv == ADD_VALUES) {
191: for (i=0; i<yy_n; i++) {
192: yv[i] += yvt[i];
193: }
194: #if !defined(PETSC_USE_COMPLEX)
195: } else if (addv == MAX_VALUES) {
196: for (i=0; i<yy_n; i++) {
197: yv[i] = PetscMax(yv[i],yvt[i]);
198: }
199: #endif
200: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
201: }
202: /* --------- Forward scatter; gather all values onto processor 0 */
203: } else {
204: PetscScalar *yvt = 0;
205: VecScatter_MPI_ToAll *scat = (VecScatter_MPI_ToAll*)ctx->todata;
206: PetscInt i;
208: if (addv == INSERT_VALUES) {
209: MPI_Gatherv(xv,xx_n,MPIU_SCALAR,yv,scat->count,x->map.range,MPIU_SCALAR,0,ctx->comm);
210: } else {
211: if (!rank) {
212: if (scat->work1) yvt = scat->work1;
213: else {
214: PetscMalloc(yy_n*sizeof(PetscScalar),&yvt);
215: scat->work1 = yvt;
216: }
217: }
218: MPI_Gatherv(xv,xx_n,MPIU_SCALAR,yvt,scat->count,x->map.range,MPIU_SCALAR,0,ctx->comm);
219: if (!rank) {
220: if (addv == ADD_VALUES) {
221: for (i=0; i<yy_n; i++) {
222: yv[i] += yvt[i];
223: }
224: #if !defined(PETSC_USE_COMPLEX)
225: } else if (addv == MAX_VALUES) {
226: for (i=0; i<yy_n; i++) {
227: yv[i] = PetscMax(yv[i],yvt[i]);
228: }
229: #endif
230: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
231: }
232: }
233: }
234: VecRestoreArray(x,&xv);
235: VecRestoreArray(y,&yv);
236: #endif
237: return(0);
238: }
240: /*
241: The follow to are used for both VecScatterBegin_MPI_ToAll() and VecScatterBegin_MPI_ToOne()
242: */
245: PetscErrorCode VecScatterDestroy_MPI_ToAll(VecScatter ctx)
246: {
247: VecScatter_MPI_ToAll *scat = (VecScatter_MPI_ToAll*)ctx->todata;
248: PetscErrorCode ierr;
251: PetscFree(scat->work1);
252: PetscFree(scat->work2);
253: PetscFree2(ctx->todata,scat->count);
254: PetscHeaderDestroy(ctx);
255: return(0);
256: }
260: PetscErrorCode VecScatterDestroy_SGtoSG(VecScatter ctx)
261: {
262: PetscErrorCode ierr;
265: PetscFree4(ctx->todata,((VecScatter_Seq_General*)ctx->todata)->vslots,ctx->fromdata,((VecScatter_Seq_General*)ctx->fromdata)->vslots);
266: PetscHeaderDestroy(ctx);
267: return(0);
268: }
272: PetscErrorCode VecScatterDestroy_SGtoSS(VecScatter ctx)
273: {
274: PetscErrorCode ierr;
277: PetscFree3(ctx->todata,ctx->fromdata,((VecScatter_Seq_General*)ctx->fromdata)->vslots);
278: PetscHeaderDestroy(ctx);
279: return(0);
280: }
284: PetscErrorCode VecScatterDestroy_SStoSG(VecScatter ctx)
285: {
286: PetscErrorCode ierr;
289: PetscFree3(ctx->todata,((VecScatter_Seq_General*)ctx->todata)->vslots,ctx->fromdata);
290: PetscHeaderDestroy(ctx);
291: return(0);
292: }
296: PetscErrorCode VecScatterDestroy_SStoSS(VecScatter ctx)
297: {
301: PetscFree2(ctx->todata,ctx->fromdata);
302: PetscHeaderDestroy(ctx);
303: return(0);
304: }
306: /* -------------------------------------------------------------------------*/
309: PetscErrorCode VecScatterCopy_MPI_ToAll(VecScatter in,VecScatter out)
310: {
311: VecScatter_MPI_ToAll *in_to = (VecScatter_MPI_ToAll*)in->todata,*sto;
312: PetscErrorCode ierr;
313: PetscMPIInt size;
314: PetscInt i;
317: out->begin = in->begin;
318: out->end = in->end;
319: out->copy = in->copy;
320: out->destroy = in->destroy;
321: out->view = in->view;
323: MPI_Comm_size(out->comm,&size);
324: PetscMalloc2(1,VecScatter_MPI_ToAll,&sto,size,PetscMPIInt,&sto->count);
325: sto->type = in_to->type;
326: for (i=0; i<size; i++) {
327: sto->count[i] = in_to->count[i];
328: }
329: sto->work1 = 0;
330: sto->work2 = 0;
331: out->todata = (void*)sto;
332: out->fromdata = (void*)0;
333: return(0);
334: }
336: /* --------------------------------------------------------------------------------------*/
337: /*
338: Scatter: sequential general to sequential general
339: */
342: PetscErrorCode VecScatterBegin_SGtoSG(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
343: {
344: VecScatter_Seq_General *gen_to = (VecScatter_Seq_General*)ctx->todata;
345: VecScatter_Seq_General *gen_from = (VecScatter_Seq_General*)ctx->fromdata;
346: PetscErrorCode ierr;
347: PetscInt i,n = gen_from->n,*fslots,*tslots;
348: PetscScalar *xv,*yv;
349:
351: VecGetArray(x,&xv);
352: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
353: if (mode & SCATTER_REVERSE){
354: gen_to = (VecScatter_Seq_General*)ctx->fromdata;
355: gen_from = (VecScatter_Seq_General*)ctx->todata;
356: }
357: fslots = gen_from->vslots;
358: tslots = gen_to->vslots;
360: if (addv == INSERT_VALUES) {
361: for (i=0; i<n; i++) {yv[tslots[i]] = xv[fslots[i]];}
362: } else if (addv == ADD_VALUES) {
363: for (i=0; i<n; i++) {yv[tslots[i]] += xv[fslots[i]];}
364: #if !defined(PETSC_USE_COMPLEX)
365: } else if (addv == MAX_VALUES) {
366: for (i=0; i<n; i++) {yv[tslots[i]] = PetscMax(yv[tslots[i]],xv[fslots[i]]);}
367: #endif
368: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
369: VecRestoreArray(x,&xv);
370: if (x != y) {VecRestoreArray(y,&yv);}
371: return(0);
372: }
374: /*
375: Scatter: sequential general to sequential stride 1
376: */
379: PetscErrorCode VecScatterBegin_SGtoSS_Stride1(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
380: {
381: VecScatter_Seq_Stride *gen_to = (VecScatter_Seq_Stride*)ctx->todata;
382: VecScatter_Seq_General *gen_from = (VecScatter_Seq_General*)ctx->fromdata;
383: PetscInt i,n = gen_from->n,*fslots = gen_from->vslots;
384: PetscErrorCode ierr;
385: PetscInt first = gen_to->first;
386: PetscScalar *xv,*yv;
387:
389: VecGetArray(x,&xv);
390: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
391: if (mode & SCATTER_REVERSE){
392: xv += first;
393: if (addv == INSERT_VALUES) {
394: for (i=0; i<n; i++) {yv[fslots[i]] = xv[i];}
395: } else if (addv == ADD_VALUES) {
396: for (i=0; i<n; i++) {yv[fslots[i]] += xv[i];}
397: #if !defined(PETSC_USE_COMPLEX)
398: } else if (addv == MAX_VALUES) {
399: for (i=0; i<n; i++) {yv[fslots[i]] = PetscMax(yv[fslots[i]],xv[i]);}
400: #endif
401: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
402: } else {
403: yv += first;
404: if (addv == INSERT_VALUES) {
405: for (i=0; i<n; i++) {yv[i] = xv[fslots[i]];}
406: } else if (addv == ADD_VALUES) {
407: for (i=0; i<n; i++) {yv[i] += xv[fslots[i]];}
408: #if !defined(PETSC_USE_COMPLEX)
409: } else if (addv == MAX_VALUES) {
410: for (i=0; i<n; i++) {yv[i] = PetscMax(yv[i],xv[fslots[i]]);}
411: #endif
412: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
413: }
414: VecRestoreArray(x,&xv);
415: if (x != y) {VecRestoreArray(y,&yv);}
416: return(0);
417: }
419: /*
420: Scatter: sequential general to sequential stride
421: */
424: PetscErrorCode VecScatterBegin_SGtoSS(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
425: {
426: VecScatter_Seq_Stride *gen_to = (VecScatter_Seq_Stride*)ctx->todata;
427: VecScatter_Seq_General *gen_from = (VecScatter_Seq_General*)ctx->fromdata;
428: PetscInt i,n = gen_from->n,*fslots = gen_from->vslots;
429: PetscErrorCode ierr;
430: PetscInt first = gen_to->first,step = gen_to->step;
431: PetscScalar *xv,*yv;
432:
434: VecGetArray(x,&xv);
435: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
437: if (mode & SCATTER_REVERSE){
438: if (addv == INSERT_VALUES) {
439: for (i=0; i<n; i++) {yv[fslots[i]] = xv[first + i*step];}
440: } else if (addv == ADD_VALUES) {
441: for (i=0; i<n; i++) {yv[fslots[i]] += xv[first + i*step];}
442: #if !defined(PETSC_USE_COMPLEX)
443: } else if (addv == MAX_VALUES) {
444: for (i=0; i<n; i++) {yv[fslots[i]] = PetscMax(yv[fslots[i]],xv[first + i*step]);}
445: #endif
446: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
447: } else {
448: if (addv == INSERT_VALUES) {
449: for (i=0; i<n; i++) {yv[first + i*step] = xv[fslots[i]];}
450: } else if (addv == ADD_VALUES) {
451: for (i=0; i<n; i++) {yv[first + i*step] += xv[fslots[i]];}
452: #if !defined(PETSC_USE_COMPLEX)
453: } else if (addv == MAX_VALUES) {
454: for (i=0; i<n; i++) {yv[first + i*step] = PetscMax(yv[first + i*step],xv[fslots[i]]);}
455: #endif
456: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
457: }
458: VecRestoreArray(x,&xv);
459: if (x != y) {VecRestoreArray(y,&yv);}
460: return(0);
461: }
463: /*
464: Scatter: sequential stride 1 to sequential general
465: */
468: PetscErrorCode VecScatterBegin_SStoSG_Stride1(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
469: {
470: VecScatter_Seq_Stride *gen_from = (VecScatter_Seq_Stride*)ctx->fromdata;
471: VecScatter_Seq_General *gen_to = (VecScatter_Seq_General*)ctx->todata;
472: PetscInt i,n = gen_from->n,*fslots = gen_to->vslots;
473: PetscErrorCode ierr;
474: PetscInt first = gen_from->first;
475: PetscScalar *xv,*yv;
476:
478: VecGetArray(x,&xv);
479: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
481: if (mode & SCATTER_REVERSE){
482: yv += first;
483: if (addv == INSERT_VALUES) {
484: for (i=0; i<n; i++) {yv[i] = xv[fslots[i]];}
485: } else if (addv == ADD_VALUES) {
486: for (i=0; i<n; i++) {yv[i] += xv[fslots[i]];}
487: #if !defined(PETSC_USE_COMPLEX)
488: } else if (addv == MAX_VALUES) {
489: for (i=0; i<n; i++) {yv[i] = PetscMax(yv[i],xv[fslots[i]]);}
490: #endif
491: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
492: } else {
493: xv += first;
494: if (addv == INSERT_VALUES) {
495: for (i=0; i<n; i++) {yv[fslots[i]] = xv[i];}
496: } else if (addv == ADD_VALUES) {
497: for (i=0; i<n; i++) {yv[fslots[i]] += xv[i];}
498: #if !defined(PETSC_USE_COMPLEX)
499: } else if (addv == MAX_VALUES) {
500: for (i=0; i<n; i++) {yv[fslots[i]] = PetscMax(yv[fslots[i]],xv[i]);}
501: #endif
502: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
503: }
504: VecRestoreArray(x,&xv);
505: if (x != y) {VecRestoreArray(y,&yv);}
506: return(0);
507: }
511: /*
512: Scatter: sequential stride to sequential general
513: */
514: PetscErrorCode VecScatterBegin_SStoSG(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
515: {
516: VecScatter_Seq_Stride *gen_from = (VecScatter_Seq_Stride*)ctx->fromdata;
517: VecScatter_Seq_General *gen_to = (VecScatter_Seq_General*)ctx->todata;
518: PetscInt i,n = gen_from->n,*fslots = gen_to->vslots;
519: PetscErrorCode ierr;
520: PetscInt first = gen_from->first,step = gen_from->step;
521: PetscScalar *xv,*yv;
522:
524: VecGetArray(x,&xv);
525: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
527: if (mode & SCATTER_REVERSE){
528: if (addv == INSERT_VALUES) {
529: for (i=0; i<n; i++) {yv[first + i*step] = xv[fslots[i]];}
530: } else if (addv == ADD_VALUES) {
531: for (i=0; i<n; i++) {yv[first + i*step] += xv[fslots[i]];}
532: #if !defined(PETSC_USE_COMPLEX)
533: } else if (addv == MAX_VALUES) {
534: for (i=0; i<n; i++) {yv[first + i*step] = PetscMax(yv[first + i*step],xv[fslots[i]]);}
535: #endif
536: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
537: } else {
538: if (addv == INSERT_VALUES) {
539: for (i=0; i<n; i++) {yv[fslots[i]] = xv[first + i*step];}
540: } else if (addv == ADD_VALUES) {
541: for (i=0; i<n; i++) {yv[fslots[i]] += xv[first + i*step];}
542: #if !defined(PETSC_USE_COMPLEX)
543: } else if (addv == MAX_VALUES) {
544: for (i=0; i<n; i++) {yv[fslots[i]] = PetscMax(yv[fslots[i]],xv[first + i*step]);}
545: #endif
546: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
547: }
548: VecRestoreArray(x,&xv);
549: if (x != y) {VecRestoreArray(y,&yv);}
550: return(0);
551: }
553: /*
554: Scatter: sequential stride to sequential stride
555: */
558: PetscErrorCode VecScatterBegin_SStoSS(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
559: {
560: VecScatter_Seq_Stride *gen_to = (VecScatter_Seq_Stride*)ctx->todata;
561: VecScatter_Seq_Stride *gen_from = (VecScatter_Seq_Stride*)ctx->fromdata;
562: PetscInt i,n = gen_from->n,to_first = gen_to->first,to_step = gen_to->step;
563: PetscErrorCode ierr;
564: PetscInt from_first = gen_from->first,from_step = gen_from->step;
565: PetscScalar *xv,*yv;
566:
568: VecGetArray(x,&xv);
569: if (x != y) {VecGetArray(y,&yv);} else {yv = xv;}
571: if (mode & SCATTER_REVERSE){
572: from_first = gen_to->first;
573: to_first = gen_from->first;
574: from_step = gen_to->step;
575: to_step = gen_from->step;
576: }
578: if (addv == INSERT_VALUES) {
579: if (to_step == 1 && from_step == 1) {
580: PetscMemcpy(yv+to_first,xv+from_first,n*sizeof(PetscScalar));
581: } else {
582: for (i=0; i<n; i++) {
583: yv[to_first + i*to_step] = xv[from_first+i*from_step];
584: }
585: }
586: } else if (addv == ADD_VALUES) {
587: if (to_step == 1 && from_step == 1) {
588: yv += to_first; xv += from_first;
589: for (i=0; i<n; i++) {
590: yv[i] += xv[i];
591: }
592: } else {
593: for (i=0; i<n; i++) {
594: yv[to_first + i*to_step] += xv[from_first+i*from_step];
595: }
596: }
597: #if !defined(PETSC_USE_COMPLEX)
598: } else if (addv == MAX_VALUES) {
599: if (to_step == 1 && from_step == 1) {
600: yv += to_first; xv += from_first;
601: for (i=0; i<n; i++) {
602: yv[i] = PetscMax(yv[i],xv[i]);
603: }
604: } else {
605: for (i=0; i<n; i++) {
606: yv[to_first + i*to_step] = PetscMax(yv[to_first + i*to_step],xv[from_first+i*from_step]);
607: }
608: }
609: #endif
610: } else {SETERRQ(PETSC_ERR_ARG_UNKNOWN_TYPE,"Wrong insert option");}
611: VecRestoreArray(x,&xv);
612: if (x != y) {VecRestoreArray(y,&yv);}
613: return(0);
614: }
616: /* --------------------------------------------------------------------------*/
621: PetscErrorCode VecScatterCopy_SGToSG(VecScatter in,VecScatter out)
622: {
623: PetscErrorCode ierr;
624: VecScatter_Seq_General *in_to = (VecScatter_Seq_General*)in->todata,*out_to = PETSC_NULL;
625: VecScatter_Seq_General *in_from = (VecScatter_Seq_General*)in->fromdata,*out_from = PETSC_NULL;
626:
628: out->begin = in->begin;
629: out->end = in->end;
630: out->copy = in->copy;
631: out->destroy = in->destroy;
632: out->view = in->view;
634: PetscMalloc4(1,VecScatter_Seq_General,&out_to,in_to->n,PetscInt,&out_to->vslots,1,VecScatter_Seq_General,&out_from,in_from->n,PetscInt,&out_from->vslots);
635: out_to->n = in_to->n;
636: out_to->type = in_to->type;
637: out_to->nonmatching_computed = PETSC_FALSE;
638: out_to->slots_nonmatching = 0;
639: out_to->is_copy = PETSC_FALSE;
640: PetscMemcpy(out_to->vslots,in_to->vslots,(out_to->n)*sizeof(PetscInt));
642: out_from->n = in_from->n;
643: out_from->type = in_from->type;
644: out_from->nonmatching_computed = PETSC_FALSE;
645: out_from->slots_nonmatching = 0;
646: out_from->is_copy = PETSC_FALSE;
647: PetscMemcpy(out_from->vslots,in_from->vslots,(out_from->n)*sizeof(PetscInt));
649: out->todata = (void*)out_to;
650: out->fromdata = (void*)out_from;
651: return(0);
652: }
657: PetscErrorCode VecScatterCopy_SGToSS(VecScatter in,VecScatter out)
658: {
659: PetscErrorCode ierr;
660: VecScatter_Seq_Stride *in_to = (VecScatter_Seq_Stride*)in->todata,*out_to = PETSC_NULL;
661: VecScatter_Seq_General *in_from = (VecScatter_Seq_General*)in->fromdata,*out_from = PETSC_NULL;
662:
664: out->begin = in->begin;
665: out->end = in->end;
666: out->copy = in->copy;
667: out->destroy = in->destroy;
668: out->view = in->view;
670: PetscMalloc3(1,VecScatter_Seq_Stride,&out_to,1,VecScatter_Seq_General,&out_from,in_from->n,PetscInt,&out_from->vslots);
671: out_to->n = in_to->n;
672: out_to->type = in_to->type;
673: out_to->first = in_to->first;
674: out_to->step = in_to->step;
675: out_to->type = in_to->type;
677: out_from->n = in_from->n;
678: out_from->type = in_from->type;
679: out_from->nonmatching_computed = PETSC_FALSE;
680: out_from->slots_nonmatching = 0;
681: out_from->is_copy = PETSC_FALSE;
682: PetscMemcpy(out_from->vslots,in_from->vslots,(out_from->n)*sizeof(PetscInt));
684: out->todata = (void*)out_to;
685: out->fromdata = (void*)out_from;
686: return(0);
687: }
689: /* --------------------------------------------------------------------------*/
690: /*
691: Scatter: parallel to sequential vector, sequential strides for both.
692: */
695: PetscErrorCode VecScatterCopy_SStoSS(VecScatter in,VecScatter out)
696: {
697: VecScatter_Seq_Stride *in_to = (VecScatter_Seq_Stride*)in->todata,*out_to = PETSC_NULL;
698: VecScatter_Seq_Stride *in_from = (VecScatter_Seq_Stride*)in->fromdata,*out_from = PETSC_NULL;
699: PetscErrorCode ierr;
702: out->begin = in->begin;
703: out->end = in->end;
704: out->copy = in->copy;
705: out->destroy = in->destroy;
706: out->view = in->view;
708: PetscMalloc2(1,VecScatter_Seq_Stride,&out_to,1,VecScatter_Seq_Stride,&out_from);
709: out_to->n = in_to->n;
710: out_to->type = in_to->type;
711: out_to->first = in_to->first;
712: out_to->step = in_to->step;
713: out_to->type = in_to->type;
714: out_from->n = in_from->n;
715: out_from->type = in_from->type;
716: out_from->first = in_from->first;
717: out_from->step = in_from->step;
718: out_from->type = in_from->type;
719: out->todata = (void*)out_to;
720: out->fromdata = (void*)out_from;
721: return(0);
722: }
724: EXTERN PetscErrorCode VecScatterCreate_PtoS(PetscInt,const PetscInt *,PetscInt,const PetscInt *,Vec,Vec,PetscInt,VecScatter);
725: EXTERN PetscErrorCode VecScatterCreate_PtoP(PetscInt,const PetscInt *,PetscInt,const PetscInt *,Vec,Vec,VecScatter);
726: EXTERN PetscErrorCode VecScatterCreate_StoP(PetscInt,const PetscInt *,PetscInt,const PetscInt *,Vec,Vec,PetscInt,VecScatter);
728: /* =======================================================================*/
729: #define VEC_SEQ_ID 0
730: #define VEC_MPI_ID 1
732: /*
733: Blocksizes we have optimized scatters for
734: */
736: #define VecScatterOptimizedBS(mbs) ((2 <= mbs && mbs <= 8) || mbs == 12)
738: PetscErrorCode VecScatterCreateEmpty(MPI_Comm comm,VecScatter *newctx)
739: {
740: VecScatter ctx;
744: PetscHeaderCreate(ctx,_p_VecScatter,int,VEC_SCATTER_COOKIE,0,"VecScatter",comm,VecScatterDestroy,VecScatterView);
745: ctx->inuse = PETSC_FALSE;
746: ctx->beginandendtogether = PETSC_FALSE;
747: PetscOptionsHasName(PETSC_NULL,"-vecscatter_merge",&ctx->beginandendtogether);
748: if (ctx->beginandendtogether) {
749: PetscInfo(ctx,"Using combined (merged) vector scatter begin and end\n");
750: }
751: PetscOptionsHasName(PETSC_NULL,"-vecscatter_packtogether",&ctx->packtogether);
752: if (ctx->packtogether) {
753: PetscInfo(ctx,"Pack all messages before sending\n");
754: }
755: *newctx = ctx;
756: return(0);
757: }
761: /*@C
762: VecScatterCreate - Creates a vector scatter context.
764: Collective on Vec
766: Input Parameters:
767: + xin - a vector that defines the shape (parallel data layout of the vector)
768: of vectors from which we scatter
769: . yin - a vector that defines the shape (parallel data layout of the vector)
770: of vectors to which we scatter
771: . ix - the indices of xin to scatter (if PETSC_NULL scatters all values)
772: - iy - the indices of yin to hold results (if PETSC_NULL fills entire vector yin)
774: Output Parameter:
775: . newctx - location to store the new scatter context
777: Options Database Keys: (uses regular MPI_Sends by default)
778: + -vecscatter_ssend - Uses MPI_Ssend_init() instead of MPI_Send_init()
779: . -vecscatter_rsend - use ready receiver mode for MPI sends
780: . -vecscatter_merge - VecScatterBegin() handles all of the communication, VecScatterEnd() is a nop
781: eliminates the chance for overlap of computation and communication
782: . -vecscatter_sendfirst - Posts sends before receives
783: . -vecscatter_packtogether - Pack all messages before sending, receive all messages before unpacking
784: . -vecscatter_alltoall - Uses MPI all to all communication for scatter
785: - -vecscatter_nopack - Avoid packing to work vector when possible (if used with -vecscatter_alltoall then will use MPI_Alltoallw()
787: $
788: $ --When packing is used--
789: $ MPI Datatypes (no packing) sendfirst merge packtogether persistent* -vecscatter_
790: $
791: $ Message passing Send p X X X always
792: $ Ssend p X X X always _ssend
793: $ Rsend p nonsense X X always _rsend
794: $ AlltoAll v or w X nonsense always X nonsense _alltoall
795: $ MPI_Win p nonsense p p nonsense _window
796: $
797: $ -vecscatter_ _nopack _sendfirst _merge _packtogether
798: $
799: $ Since persistent sends and receives require a constant memory address they can only be used when data is packed into the work vector
800: $ because the in and out array may be different for each call to VecScatterBegin/End().
801: $
802: $ p indicates possible, but not implemented. X indicates implemented
803: $
805: Level: intermediate
807: Notes:
808: In calls to VecScatter() you can use different vectors than the xin and
809: yin you used above; BUT they must have the same parallel data layout, for example,
810: they could be obtained from VecDuplicate().
811: A VecScatter context CANNOT be used in two or more simultaneous scatters;
812: that is you cannot call a second VecScatterBegin() with the same scatter
813: context until the VecScatterEnd() has been called on the first VecScatterBegin().
814: In this case a separate VecScatter is needed for each concurrent scatter.
816: Currently the MPI_Send(), MPI_Ssend() and MPI_Rsend() all use PERSISTENT versions.
817: (this unfortunately requires that the same in and out arrays be used for each use, this
818: is why when no using MPI_alltoallw() we always need to pack the input into the work array before sending
819: and unpack upon recieving instead of using MPI datatypes to avoid the packing/unpacking).
821: Concepts: scatter^between vectors
822: Concepts: gather^between vectors
824: .seealso: VecScatterDestroy(), VecScatterCreateToAll(), VecScatterCreateToZero()
825: @*/
826: PetscErrorCode VecScatterCreate(Vec xin,IS ix,Vec yin,IS iy,VecScatter *newctx)
827: {
828: VecScatter ctx;
830: PetscMPIInt size;
831: PetscInt totalv,xin_type = VEC_SEQ_ID,yin_type = VEC_SEQ_ID,*range;
832: MPI_Comm comm,ycomm;
833: PetscTruth ixblock,iyblock,iystride,islocal,cando,flag;
834: IS tix = 0,tiy = 0;
838: /*
839: Determine if the vectors are "parallel", ie. it shares a comm with other processors, or
840: sequential (it does not share a comm). The difference is that parallel vectors treat the
841: index set as providing indices in the global parallel numbering of the vector, with
842: sequential vectors treat the index set as providing indices in the local sequential
843: numbering
844: */
845: PetscObjectGetComm((PetscObject)xin,&comm);
846: MPI_Comm_size(comm,&size);
847: if (size > 1) {xin_type = VEC_MPI_ID;}
849: PetscObjectGetComm((PetscObject)yin,&ycomm);
850: MPI_Comm_size(ycomm,&size);
851: if (size > 1) {comm = ycomm; yin_type = VEC_MPI_ID;}
852:
853: /* generate the Scatter context */
854: PetscHeaderCreate(ctx,_p_VecScatter,int,VEC_SCATTER_COOKIE,0,"VecScatter",comm,VecScatterDestroy,VecScatterView);
855: ctx->inuse = PETSC_FALSE;
857: ctx->beginandendtogether = PETSC_FALSE;
858: PetscOptionsHasName(PETSC_NULL,"-vecscatter_merge",&ctx->beginandendtogether);
859: if (ctx->beginandendtogether) {
860: PetscInfo(ctx,"Using combined (merged) vector scatter begin and end\n");
861: }
862: PetscOptionsHasName(PETSC_NULL,"-vecscatter_packtogether",&ctx->packtogether);
863: if (ctx->packtogether) {
864: PetscInfo(ctx,"Pack all messages before sending\n");
865: }
867: VecGetLocalSize(xin,&ctx->from_n);
868: VecGetLocalSize(yin,&ctx->to_n);
870: /*
871: if ix or iy is not included; assume just grabbing entire vector
872: */
873: if (!ix && xin_type == VEC_SEQ_ID) {
874: ISCreateStride(comm,ctx->from_n,0,1,&ix);
875: tix = ix;
876: } else if (!ix && xin_type == VEC_MPI_ID) {
877: if (yin_type == VEC_MPI_ID) {
878: PetscInt ntmp, low;
879: VecGetLocalSize(xin,&ntmp);
880: VecGetOwnershipRange(xin,&low,PETSC_NULL);
881: ISCreateStride(comm,ntmp,low,1,&ix);
882: } else{
883: PetscInt Ntmp;
884: VecGetSize(xin,&Ntmp);
885: ISCreateStride(comm,Ntmp,0,1,&ix);
886: }
887: tix = ix;
888: } else if (!ix) {
889: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"ix not given, but not Seq or MPI vector");
890: }
892: if (!iy && yin_type == VEC_SEQ_ID) {
893: ISCreateStride(comm,ctx->to_n,0,1,&iy);
894: tiy = iy;
895: } else if (!iy && yin_type == VEC_MPI_ID) {
896: if (xin_type == VEC_MPI_ID) {
897: PetscInt ntmp, low;
898: VecGetLocalSize(yin,&ntmp);
899: VecGetOwnershipRange(yin,&low,PETSC_NULL);
900: ISCreateStride(comm,ntmp,low,1,&iy);
901: } else{
902: PetscInt Ntmp;
903: VecGetSize(yin,&Ntmp);
904: ISCreateStride(comm,Ntmp,0,1,&iy);
905: }
906: tiy = iy;
907: } else if (!iy) {
908: SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"iy not given, but not Seq or MPI vector");
909: }
911: /* ===========================================================================================================
912: Check for special cases
913: ==========================================================================================================*/
914: /* ---------------------------------------------------------------------------*/
915: if (xin_type == VEC_SEQ_ID && yin_type == VEC_SEQ_ID) {
916: if (ix->type == IS_GENERAL && iy->type == IS_GENERAL){
917: PetscInt nx,ny,*idx,*idy;
918: VecScatter_Seq_General *to = PETSC_NULL,*from = PETSC_NULL;
920: ISGetLocalSize(ix,&nx);
921: ISGetLocalSize(iy,&ny);
922: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
923: ISGetIndices(ix,&idx);
924: ISGetIndices(iy,&idy);
925: PetscMalloc4(1,VecScatter_Seq_General,&to,nx,PetscInt,&to->vslots,1,VecScatter_Seq_General,&from,nx,PetscInt,&from->vslots);
926: to->n = nx;
927: #if defined(PETSC_USE_DEBUG)
928: VecScatterCheckIndices_Private(ctx->to_n,ny,idy);
929: #endif
930: PetscMemcpy(to->vslots,idy,nx*sizeof(PetscInt));
931: from->n = nx;
932: #if defined(PETSC_USE_DEBUG)
933: VecScatterCheckIndices_Private(ctx->from_n,nx,idx);
934: #endif
935: PetscMemcpy(from->vslots,idx,nx*sizeof(PetscInt));
936: to->type = VEC_SCATTER_SEQ_GENERAL;
937: from->type = VEC_SCATTER_SEQ_GENERAL;
938: ctx->todata = (void*)to;
939: ctx->fromdata = (void*)from;
940: ctx->begin = VecScatterBegin_SGtoSG;
941: ctx->end = 0;
942: ctx->destroy = VecScatterDestroy_SGtoSG;
943: ctx->copy = VecScatterCopy_SGToSG;
944: PetscInfo(xin,"Special case: sequential vector general scatter\n");
945: goto functionend;
946: } else if (ix->type == IS_STRIDE && iy->type == IS_STRIDE){
947: PetscInt nx,ny,to_first,to_step,from_first,from_step;
948: VecScatter_Seq_Stride *from8 = PETSC_NULL,*to8 = PETSC_NULL;
950: ISGetLocalSize(ix,&nx);
951: ISGetLocalSize(iy,&ny);
952: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
953: ISStrideGetInfo(iy,&to_first,&to_step);
954: ISStrideGetInfo(ix,&from_first,&from_step);
955: PetscMalloc2(1,VecScatter_Seq_Stride,&to8,1,VecScatter_Seq_Stride,&from8);
956: to8->n = nx;
957: to8->first = to_first;
958: to8->step = to_step;
959: from8->n = nx;
960: from8->first = from_first;
961: from8->step = from_step;
962: to8->type = VEC_SCATTER_SEQ_STRIDE;
963: from8->type = VEC_SCATTER_SEQ_STRIDE;
964: ctx->todata = (void*)to8;
965: ctx->fromdata = (void*)from8;
966: ctx->begin = VecScatterBegin_SStoSS;
967: ctx->end = 0;
968: ctx->destroy = VecScatterDestroy_SStoSS;
969: ctx->copy = VecScatterCopy_SStoSS;
970: PetscInfo(xin,"Special case: sequential vector stride to stride\n");
971: goto functionend;
972: } else if (ix->type == IS_GENERAL && iy->type == IS_STRIDE){
973: PetscInt nx,ny,*idx,first,step;
974: VecScatter_Seq_General *from9 = PETSC_NULL;
975: VecScatter_Seq_Stride *to9 = PETSC_NULL;
977: ISGetLocalSize(ix,&nx);
978: ISGetIndices(ix,&idx);
979: ISGetLocalSize(iy,&ny);
980: ISStrideGetInfo(iy,&first,&step);
981: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
982: PetscMalloc3(1,VecScatter_Seq_Stride,&to9,1,VecScatter_Seq_General,&from9,nx,PetscInt,&from9->vslots);
983: to9->n = nx;
984: to9->first = first;
985: to9->step = step;
986: from9->n = nx;
987: #if defined(PETSC_USE_DEBUG)
988: VecScatterCheckIndices_Private(ctx->from_n,nx,idx);
989: #endif
990: PetscMemcpy(from9->vslots,idx,nx*sizeof(PetscInt));
991: ctx->todata = (void*)to9; ctx->fromdata = (void*)from9;
992: if (step == 1) ctx->begin = VecScatterBegin_SGtoSS_Stride1;
993: else ctx->begin = VecScatterBegin_SGtoSS;
994: ctx->destroy = VecScatterDestroy_SGtoSS;
995: ctx->end = 0;
996: ctx->copy = VecScatterCopy_SGToSS;
997: to9->type = VEC_SCATTER_SEQ_STRIDE;
998: from9->type = VEC_SCATTER_SEQ_GENERAL;
999: PetscInfo(xin,"Special case: sequential vector general to stride\n");
1000: goto functionend;
1001: } else if (ix->type == IS_STRIDE && iy->type == IS_GENERAL){
1002: PetscInt nx,ny,*idy,first,step;
1003: VecScatter_Seq_General *to10 = PETSC_NULL;
1004: VecScatter_Seq_Stride *from10 = PETSC_NULL;
1006: ISGetLocalSize(ix,&nx);
1007: ISGetIndices(iy,&idy);
1008: ISGetLocalSize(iy,&ny);
1009: ISStrideGetInfo(ix,&first,&step);
1010: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1011: PetscMalloc3(1,VecScatter_Seq_General,&to10,nx,PetscInt,&to10->vslots,1,VecScatter_Seq_Stride,&from10);
1012: from10->n = nx;
1013: from10->first = first;
1014: from10->step = step;
1015: to10->n = nx;
1016: #if defined(PETSC_USE_DEBUG)
1017: VecScatterCheckIndices_Private(ctx->to_n,ny,idy);
1018: #endif
1019: PetscMemcpy(to10->vslots,idy,nx*sizeof(PetscInt));
1020: ctx->todata = (void*)to10;
1021: ctx->fromdata = (void*)from10;
1022: if (step == 1) ctx->begin = VecScatterBegin_SStoSG_Stride1;
1023: else ctx->begin = VecScatterBegin_SStoSG;
1024: ctx->destroy = VecScatterDestroy_SStoSG;
1025: ctx->end = 0;
1026: ctx->copy = 0;
1027: to10->type = VEC_SCATTER_SEQ_GENERAL;
1028: from10->type = VEC_SCATTER_SEQ_STRIDE;
1029: PetscInfo(xin,"Special case: sequential vector stride to general\n");
1030: goto functionend;
1031: } else {
1032: PetscInt nx,ny,*idx,*idy;
1033: VecScatter_Seq_General *to11 = PETSC_NULL,*from11 = PETSC_NULL;
1034: PetscTruth idnx,idny;
1036: ISGetLocalSize(ix,&nx);
1037: ISGetLocalSize(iy,&ny);
1038: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1040: ISIdentity(ix,&idnx);
1041: ISIdentity(iy,&idny);
1042: if (idnx && idny) {
1043: VecScatter_Seq_Stride *to13 = PETSC_NULL,*from13 = PETSC_NULL;
1044: PetscMalloc2(1,VecScatter_Seq_Stride,&to13,1,VecScatter_Seq_Stride,&from13);
1045: to13->n = nx;
1046: to13->first = 0;
1047: to13->step = 1;
1048: from13->n = nx;
1049: from13->first = 0;
1050: from13->step = 1;
1051: to13->type = VEC_SCATTER_SEQ_STRIDE;
1052: from13->type = VEC_SCATTER_SEQ_STRIDE;
1053: ctx->todata = (void*)to13;
1054: ctx->fromdata = (void*)from13;
1055: ctx->begin = VecScatterBegin_SStoSS;
1056: ctx->end = 0;
1057: ctx->destroy = VecScatterDestroy_SStoSS;
1058: ctx->copy = VecScatterCopy_SStoSS;
1059: PetscInfo(xin,"Special case: sequential copy\n");
1060: goto functionend;
1061: }
1063: ISGetIndices(iy,&idy);
1064: ISGetIndices(ix,&idx);
1065: PetscMalloc4(1,VecScatter_Seq_General,&to11,nx,PetscInt,&to11->vslots,1,VecScatter_Seq_General,&from11,nx,PetscInt,&from11->vslots);
1066: to11->n = nx;
1067: #if defined(PETSC_USE_DEBUG)
1068: VecScatterCheckIndices_Private(ctx->to_n,ny,idy);
1069: #endif
1070: PetscMemcpy(to11->vslots,idy,nx*sizeof(PetscInt));
1071: from11->n = nx;
1072: #if defined(PETSC_USE_DEBUG)
1073: VecScatterCheckIndices_Private(ctx->from_n,nx,idx);
1074: #endif
1075: PetscMemcpy(from11->vslots,idx,nx*sizeof(PetscInt));
1076: to11->type = VEC_SCATTER_SEQ_GENERAL;
1077: from11->type = VEC_SCATTER_SEQ_GENERAL;
1078: ctx->todata = (void*)to11;
1079: ctx->fromdata = (void*)from11;
1080: ctx->begin = VecScatterBegin_SGtoSG;
1081: ctx->end = 0;
1082: ctx->destroy = VecScatterDestroy_SGtoSG;
1083: ctx->copy = VecScatterCopy_SGToSG;
1084: ISRestoreIndices(ix,&idx);
1085: ISRestoreIndices(iy,&idy);
1086: PetscInfo(xin,"Sequential vector scatter with block indices\n");
1087: goto functionend;
1088: }
1089: }
1090: /* ---------------------------------------------------------------------------*/
1091: if (xin_type == VEC_MPI_ID && yin_type == VEC_SEQ_ID) {
1093: /* ===========================================================================================================
1094: Check for special cases
1095: ==========================================================================================================*/
1096: islocal = PETSC_FALSE;
1097: /* special case extracting (subset of) local portion */
1098: if (ix->type == IS_STRIDE && iy->type == IS_STRIDE){
1099: PetscInt nx,ny,to_first,to_step,from_first,from_step;
1100: PetscInt start,end;
1101: VecScatter_Seq_Stride *from12 = PETSC_NULL,*to12 = PETSC_NULL;
1103: VecGetOwnershipRange(xin,&start,&end);
1104: ISGetLocalSize(ix,&nx);
1105: ISStrideGetInfo(ix,&from_first,&from_step);
1106: ISGetLocalSize(iy,&ny);
1107: ISStrideGetInfo(iy,&to_first,&to_step);
1108: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1109: if (ix->min >= start && ix->max < end) islocal = PETSC_TRUE; else islocal = PETSC_FALSE;
1110: MPI_Allreduce(&islocal,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1111: if (cando) {
1112: PetscMalloc2(1,VecScatter_Seq_Stride,&to12,1,VecScatter_Seq_Stride,&from12);
1113: to12->n = nx;
1114: to12->first = to_first;
1115: to12->step = to_step;
1116: from12->n = nx;
1117: from12->first = from_first-start;
1118: from12->step = from_step;
1119: to12->type = VEC_SCATTER_SEQ_STRIDE;
1120: from12->type = VEC_SCATTER_SEQ_STRIDE;
1121: ctx->todata = (void*)to12;
1122: ctx->fromdata = (void*)from12;
1123: ctx->begin = VecScatterBegin_SStoSS;
1124: ctx->end = 0;
1125: ctx->destroy = VecScatterDestroy_SStoSS;
1126: ctx->copy = VecScatterCopy_SStoSS;
1127: PetscInfo(xin,"Special case: processors only getting local values\n");
1128: goto functionend;
1129: }
1130: } else {
1131: MPI_Allreduce(&islocal,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1132: }
1134: /* test for special case of all processors getting entire vector */
1135: totalv = 0;
1136: if (ix->type == IS_STRIDE && iy->type == IS_STRIDE){
1137: PetscInt i,nx,ny,to_first,to_step,from_first,from_step,N;
1138: PetscMPIInt *count = PETSC_NULL;
1139: VecScatter_MPI_ToAll *sto = PETSC_NULL;
1141: ISGetLocalSize(ix,&nx);
1142: ISStrideGetInfo(ix,&from_first,&from_step);
1143: ISGetLocalSize(iy,&ny);
1144: ISStrideGetInfo(iy,&to_first,&to_step);
1145: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1146: VecGetSize(xin,&N);
1147: if (nx != N) {
1148: totalv = 0;
1149: } else if (from_first == 0 && from_step == 1 &&
1150: from_first == to_first && from_step == to_step){
1151: totalv = 1;
1152: } else totalv = 0;
1153: MPI_Allreduce(&totalv,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1155: if (cando) {
1157: MPI_Comm_size(ctx->comm,&size);
1158: PetscMalloc2(1,VecScatter_MPI_ToAll,&sto,size,PetscMPIInt,&count);
1159: range = xin->map.range;
1160: for (i=0; i<size; i++) {
1161: count[i] = range[i+1] - range[i];
1162: }
1163: sto->count = count;
1164: sto->work1 = 0;
1165: sto->work2 = 0;
1166: sto->type = VEC_SCATTER_MPI_TOALL;
1167: ctx->todata = (void*)sto;
1168: ctx->fromdata = 0;
1169: ctx->begin = VecScatterBegin_MPI_ToAll;
1170: ctx->end = 0;
1171: ctx->destroy = VecScatterDestroy_MPI_ToAll;
1172: ctx->copy = VecScatterCopy_MPI_ToAll;
1173: PetscInfo(xin,"Special case: all processors get entire parallel vector\n");
1174: goto functionend;
1175: }
1176: } else {
1177: MPI_Allreduce(&totalv,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1178: }
1180: /* test for special case of processor 0 getting entire vector */
1181: totalv = 0;
1182: if (ix->type == IS_STRIDE && iy->type == IS_STRIDE){
1183: PetscInt i,nx,ny,to_first,to_step,from_first,from_step,N;
1184: PetscMPIInt rank,*count = PETSC_NULL;
1185: VecScatter_MPI_ToAll *sto = PETSC_NULL;
1187: PetscObjectGetComm((PetscObject)xin,&comm);
1188: MPI_Comm_rank(comm,&rank);
1189: ISGetLocalSize(ix,&nx);
1190: ISStrideGetInfo(ix,&from_first,&from_step);
1191: ISGetLocalSize(iy,&ny);
1192: ISStrideGetInfo(iy,&to_first,&to_step);
1193: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1194: if (!rank) {
1195: VecGetSize(xin,&N);
1196: if (nx != N) {
1197: totalv = 0;
1198: } else if (from_first == 0 && from_step == 1 &&
1199: from_first == to_first && from_step == to_step){
1200: totalv = 1;
1201: } else totalv = 0;
1202: } else {
1203: if (!nx) totalv = 1;
1204: else totalv = 0;
1205: }
1206: MPI_Allreduce(&totalv,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1208: if (cando) {
1209: MPI_Comm_size(ctx->comm,&size);
1210: PetscMalloc2(1,VecScatter_MPI_ToAll,&sto,size,PetscMPIInt,&count);
1211: range = xin->map.range;
1212: for (i=0; i<size; i++) {
1213: count[i] = range[i+1] - range[i];
1214: }
1215: sto->count = count;
1216: sto->work1 = 0;
1217: sto->work2 = 0;
1218: sto->type = VEC_SCATTER_MPI_TOONE;
1219: ctx->todata = (void*)sto;
1220: ctx->fromdata = 0;
1221: ctx->begin = VecScatterBegin_MPI_ToOne;
1222: ctx->end = 0;
1223: ctx->destroy = VecScatterDestroy_MPI_ToAll;
1224: ctx->copy = VecScatterCopy_MPI_ToAll;
1225: PetscInfo(xin,"Special case: processor zero gets entire parallel vector, rest get none\n");
1226: goto functionend;
1227: }
1228: } else {
1229: MPI_Allreduce(&totalv,&cando,1,MPI_INT,MPI_LAND,xin->comm);
1230: }
1232: ISBlock(ix,&ixblock);
1233: ISBlock(iy,&iyblock);
1234: ISStride(iy,&iystride);
1235: if (ixblock) {
1236: /* special case block to block */
1237: if (iyblock) {
1238: PetscInt nx,ny,*idx,*idy,bsx,bsy;
1239: ISBlockGetBlockSize(iy,&bsy);
1240: ISBlockGetBlockSize(ix,&bsx);
1241: if (bsx == bsy && VecScatterOptimizedBS(bsx)) {
1242: ISBlockGetSize(ix,&nx);
1243: ISBlockGetIndices(ix,&idx);
1244: ISBlockGetSize(iy,&ny);
1245: ISBlockGetIndices(iy,&idy);
1246: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1247: VecScatterCreate_PtoS(nx,idx,ny,idy,xin,yin,bsx,ctx);
1248: ISBlockRestoreIndices(ix,&idx);
1249: ISBlockRestoreIndices(iy,&idy);
1250: PetscInfo(xin,"Special case: blocked indices\n");
1251: goto functionend;
1252: }
1253: /* special case block to stride */
1254: } else if (iystride) {
1255: PetscInt ystart,ystride,ysize,bsx;
1256: ISStrideGetInfo(iy,&ystart,&ystride);
1257: ISGetLocalSize(iy,&ysize);
1258: ISBlockGetBlockSize(ix,&bsx);
1259: /* see if stride index set is equivalent to block index set */
1260: if (VecScatterOptimizedBS(bsx) && ((ystart % bsx) == 0) && (ystride == 1) && ((ysize % bsx) == 0)) {
1261: PetscInt nx,*idx,*idy,il;
1262: ISBlockGetSize(ix,&nx); ISBlockGetIndices(ix,&idx);
1263: if (ysize != bsx*nx) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1264: PetscMalloc(nx*sizeof(PetscInt),&idy);
1265: if (nx) {
1266: idy[0] = ystart;
1267: for (il=1; il<nx; il++) idy[il] = idy[il-1] + bsx;
1268: }
1269: VecScatterCreate_PtoS(nx,idx,nx,idy,xin,yin,bsx,ctx);
1270: PetscFree(idy);
1271: ISBlockRestoreIndices(ix,&idx);
1272: PetscInfo(xin,"Special case: blocked indices to stride\n");
1273: goto functionend;
1274: }
1275: }
1276: }
1277: /* left over general case */
1278: {
1279: PetscInt nx,ny,*idx,*idy;
1280: ISGetLocalSize(ix,&nx);
1281: ISGetIndices(ix,&idx);
1282: ISGetLocalSize(iy,&ny);
1283: ISGetIndices(iy,&idy);
1284: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1285: VecScatterCreate_PtoS(nx,idx,ny,idy,xin,yin,1,ctx);
1286: ISRestoreIndices(ix,&idx);
1287: ISRestoreIndices(iy,&idy);
1288: PetscInfo(xin,"General case: MPI to Seq\n");
1289: goto functionend;
1290: }
1291: }
1292: /* ---------------------------------------------------------------------------*/
1293: if (xin_type == VEC_SEQ_ID && yin_type == VEC_MPI_ID) {
1294: /* ===========================================================================================================
1295: Check for special cases
1296: ==========================================================================================================*/
1297: /* special case local copy portion */
1298: islocal = PETSC_FALSE;
1299: if (ix->type == IS_STRIDE && iy->type == IS_STRIDE){
1300: PetscInt nx,ny,to_first,to_step,from_step,start,end,from_first;
1301: VecScatter_Seq_Stride *from = PETSC_NULL,*to = PETSC_NULL;
1303: VecGetOwnershipRange(yin,&start,&end);
1304: ISGetLocalSize(ix,&nx);
1305: ISStrideGetInfo(ix,&from_first,&from_step);
1306: ISGetLocalSize(iy,&ny);
1307: ISStrideGetInfo(iy,&to_first,&to_step);
1308: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1309: if (iy->min >= start && iy->max < end) islocal = PETSC_TRUE; else islocal = PETSC_FALSE;
1310: MPI_Allreduce(&islocal,&cando,1,MPI_INT,MPI_LAND,yin->comm);
1311: if (cando) {
1312: PetscMalloc2(1,VecScatter_Seq_Stride,&to,1,VecScatter_Seq_Stride,&from);
1313: to->n = nx;
1314: to->first = to_first-start;
1315: to->step = to_step;
1316: from->n = nx;
1317: from->first = from_first;
1318: from->step = from_step;
1319: to->type = VEC_SCATTER_SEQ_STRIDE;
1320: from->type = VEC_SCATTER_SEQ_STRIDE;
1321: ctx->todata = (void*)to;
1322: ctx->fromdata = (void*)from;
1323: ctx->begin = VecScatterBegin_SStoSS;
1324: ctx->end = 0;
1325: ctx->destroy = VecScatterDestroy_SStoSS;
1326: ctx->copy = VecScatterCopy_SStoSS;
1327: PetscInfo(xin,"Special case: sequential stride to MPI stride\n");
1328: goto functionend;
1329: }
1330: } else {
1331: MPI_Allreduce(&islocal,&cando,1,MPI_INT,MPI_LAND,yin->comm);
1332: }
1333: if (ix->type == IS_BLOCK && iy->type == IS_STRIDE){
1334: PetscInt ystart,ystride,ysize,bsx;
1335: ISStrideGetInfo(iy,&ystart,&ystride);
1336: ISGetLocalSize(iy,&ysize);
1337: ISBlockGetBlockSize(ix,&bsx);
1338: /* see if stride index set is equivalent to block index set */
1339: if (VecScatterOptimizedBS(bsx) && ((ystart % bsx) == 0) && (ystride == 1) && ((ysize % bsx) == 0)) {
1340: PetscInt nx,*idx,*idy,il;
1341: ISBlockGetSize(ix,&nx); ISBlockGetIndices(ix,&idx);
1342: if (ysize != bsx*nx) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1343: PetscMalloc(nx*sizeof(PetscInt),&idy);
1344: if (nx) {
1345: idy[0] = ystart;
1346: for (il=1; il<nx; il++) idy[il] = idy[il-1] + bsx;
1347: }
1348: VecScatterCreate_StoP(nx,idx,nx,idy,xin,yin,bsx,ctx);
1349: PetscFree(idy);
1350: ISBlockRestoreIndices(ix,&idx);
1351: PetscInfo(xin,"Special case: Blocked indices to stride\n");
1352: goto functionend;
1353: }
1354: }
1356: /* general case */
1357: {
1358: PetscInt nx,ny,*idx,*idy;
1359: ISGetLocalSize(ix,&nx);
1360: ISGetIndices(ix,&idx);
1361: ISGetLocalSize(iy,&ny);
1362: ISGetIndices(iy,&idy);
1363: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1364: VecScatterCreate_StoP(nx,idx,ny,idy,xin,yin,1,ctx);
1365: ISRestoreIndices(ix,&idx);
1366: ISRestoreIndices(iy,&idy);
1367: PetscInfo(xin,"General case: Seq to MPI\n");
1368: goto functionend;
1369: }
1370: }
1371: /* ---------------------------------------------------------------------------*/
1372: if (xin_type == VEC_MPI_ID && yin_type == VEC_MPI_ID) {
1373: /* no special cases for now */
1374: PetscInt nx,ny,*idx,*idy;
1375: ISGetLocalSize(ix,&nx);
1376: ISGetIndices(ix,&idx);
1377: ISGetLocalSize(iy,&ny);
1378: ISGetIndices(iy,&idy);
1379: if (nx != ny) SETERRQ(PETSC_ERR_ARG_SIZ,"Local scatter sizes don't match");
1380: VecScatterCreate_PtoP(nx,idx,ny,idy,xin,yin,ctx);
1381: ISRestoreIndices(ix,&idx);
1382: ISRestoreIndices(iy,&idy);
1383: PetscInfo(xin,"General case: MPI to MPI\n");
1384: goto functionend;
1385: }
1387: functionend:
1388: *newctx = ctx;
1389: if (tix) {ISDestroy(tix);}
1390: if (tiy) {ISDestroy(tiy);}
1391: PetscOptionsHasName(PETSC_NULL,"-vecscatter_view_info",&flag);
1392: if (flag) {
1393: PetscViewer viewer;
1394: PetscViewerASCIIGetStdout(comm,&viewer);
1395: PetscViewerPushFormat(viewer,PETSC_VIEWER_ASCII_INFO);
1396: VecScatterView(ctx,viewer);
1397: PetscViewerPopFormat(viewer);
1398: }
1399: PetscOptionsHasName(PETSC_NULL,"-vecscatter_view",&flag);
1400: if (flag) {
1401: PetscViewer viewer;
1402: PetscViewerASCIIGetStdout(comm,&viewer);
1403: VecScatterView(ctx,viewer);
1404: }
1405: return(0);
1406: }
1408: /* ------------------------------------------------------------------*/
1411: /*@
1412: VecScatterGetMerged - Returns true if the scatter is completed in the VecScatterBegin()
1413: and the VecScatterEnd() does nothing
1415: Not Collective
1417: Input Parameter:
1418: . ctx - scatter context created with VecScatterCreate()
1420: Output Parameter:
1421: . flg - PETSC_TRUE if the VecScatterBegin/End() are all done during the VecScatterBegin()
1423: Level: developer
1425: .seealso: VecScatterCreate(), VecScatterEnd(), VecScatterBegin()
1426: @*/
1427: PetscErrorCode VecScatterGetMerged(VecScatter ctx,PetscTruth *flg)
1428: {
1431: *flg = ctx->beginandendtogether;
1432: return(0);
1433: }
1437: /*@
1438: VecScatterBegin - Begins a generalized scatter from one vector to
1439: another. Complete the scattering phase with VecScatterEnd().
1441: Collective on VecScatter and Vec
1443: Input Parameters:
1444: + inctx - scatter context generated by VecScatterCreate()
1445: . x - the vector from which we scatter
1446: . y - the vector to which we scatter
1447: . addv - either ADD_VALUES or INSERT_VALUES, with INSERT_VALUES mode any location
1448: not scattered to retains its old value; i.e. the vector is NOT first zeroed.
1449: - mode - the scattering mode, usually SCATTER_FORWARD. The available modes are:
1450: SCATTER_FORWARD or SCATTER_REVERSE
1453: Level: intermediate
1455: Options Database: See VecScatterCreate()
1457: Notes:
1458: The vectors x and y need not be the same vectors used in the call
1459: to VecScatterCreate(), but x must have the same parallel data layout
1460: as that passed in as the x to VecScatterCreate(), similarly for the y.
1461: Most likely they have been obtained from VecDuplicate().
1463: You cannot change the values in the input vector between the calls to VecScatterBegin()
1464: and VecScatterEnd().
1466: If you use SCATTER_REVERSE the first two arguments should be reversed, from
1467: the SCATTER_FORWARD.
1468:
1469: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
1471: This scatter is far more general than the conventional
1472: scatter, since it can be a gather or a scatter or a combination,
1473: depending on the indices ix and iy. If x is a parallel vector and y
1474: is sequential, VecScatterBegin() can serve to gather values to a
1475: single processor. Similarly, if y is parallel and x sequential, the
1476: routine can scatter from one processor to many processors.
1478: Concepts: scatter^between vectors
1479: Concepts: gather^between vectors
1481: .seealso: VecScatterCreate(), VecScatterEnd()
1482: @*/
1483: PetscErrorCode VecScatterBegin(VecScatter inctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
1484: {
1486: #if defined(PETSC_USE_DEBUG)
1487: PetscInt to_n,from_n;
1488: #endif
1494: if (inctx->inuse) SETERRQ(PETSC_ERR_ARG_WRONGSTATE," Scatter ctx already in use");
1495: #if defined(PETSC_USE_DEBUG)
1496: /*
1497: Error checking to make sure these vectors match the vectors used
1498: to create the vector scatter context. -1 in the from_n and to_n indicate the
1499: vector lengths are unknown (for example with mapped scatters) and thus
1500: no error checking is performed.
1501: */
1502: if (inctx->from_n >= 0 && inctx->to_n >= 0) {
1503: VecGetLocalSize(x,&from_n);
1504: VecGetLocalSize(y,&to_n);
1505: if (mode & SCATTER_REVERSE) {
1506: if (to_n != inctx->from_n) SETERRQ2(PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector to != ctx from size)",to_n,inctx->from_n);
1507: if (from_n != inctx->to_n) SETERRQ2(PETSC_ERR_ARG_SIZ,"Vector wrong size %D for scatter %D (scatter reverse and vector from != ctx to size)",from_n,inctx->to_n);
1508: } else {
1509: if (to_n != inctx->to_n) SETERRQ2(PETSC_ERR_ARG_SIZ,"Vector wrong size % for scatter %D (scatter forward and vector to != ctx to size)",to_n,inctx->to_n);
1510: if (from_n != inctx->from_n) SETERRQ2(PETSC_ERR_ARG_SIZ,"Vector wrong size % for scatter %D (scatter forward and vector from != ctx from size)",from_n,inctx->from_n);
1511: }
1512: }
1513: #endif
1515: inctx->inuse = PETSC_TRUE;
1517: (*inctx->begin)(inctx,x,y,addv,mode);
1518: if (inctx->beginandendtogether && inctx->end) {
1519: inctx->inuse = PETSC_FALSE;
1520: (*inctx->end)(inctx,x,y,addv,mode);
1521: }
1523: return(0);
1524: }
1526: /* --------------------------------------------------------------------*/
1529: /*@
1530: VecScatterEnd - Ends a generalized scatter from one vector to another. Call
1531: after first calling VecScatterBegin().
1533: Collective on VecScatter and Vec
1535: Input Parameters:
1536: + ctx - scatter context generated by VecScatterCreate()
1537: . x - the vector from which we scatter
1538: . y - the vector to which we scatter
1539: . addv - either ADD_VALUES or INSERT_VALUES.
1540: - mode - the scattering mode, usually SCATTER_FORWARD. The available modes are:
1541: SCATTER_FORWARD, SCATTER_REVERSE
1543: Level: intermediate
1545: Notes:
1546: If you use SCATTER_REVERSE the first two arguments should be reversed, from
1547: the SCATTER_FORWARD.
1548: y[iy[i]] = x[ix[i]], for i=0,...,ni-1
1550: .seealso: VecScatterBegin(), VecScatterCreate()
1551: @*/
1552: PetscErrorCode VecScatterEnd(VecScatter ctx,Vec x,Vec y,InsertMode addv,ScatterMode mode)
1553: {
1560: ctx->inuse = PETSC_FALSE;
1561: if (!ctx->end) return(0);
1562: if (!ctx->beginandendtogether) {
1564: (*(ctx)->end)(ctx,x,y,addv,mode);
1566: }
1567: return(0);
1568: }
1572: /*@
1573: VecScatterDestroy - Destroys a scatter context created by
1574: VecScatterCreate().
1576: Collective on VecScatter
1578: Input Parameter:
1579: . ctx - the scatter context
1581: Level: intermediate
1583: .seealso: VecScatterCreate(), VecScatterCopy()
1584: @*/
1585: PetscErrorCode VecScatterDestroy(VecScatter ctx)
1586: {
1591: if (--ctx->refct > 0) return(0);
1593: /* if memory was published with AMS then destroy it */
1594: PetscObjectDepublish(ctx);
1596: (*ctx->destroy)(ctx);
1597: return(0);
1598: }
1602: /*@
1603: VecScatterCopy - Makes a copy of a scatter context.
1605: Collective on VecScatter
1607: Input Parameter:
1608: . sctx - the scatter context
1610: Output Parameter:
1611: . ctx - the context copy
1613: Level: advanced
1615: .seealso: VecScatterCreate(), VecScatterDestroy()
1616: @*/
1617: PetscErrorCode VecScatterCopy(VecScatter sctx,VecScatter *ctx)
1618: {
1624: if (!sctx->copy) SETERRQ(PETSC_ERR_SUP,"Cannot copy this type");
1625: PetscHeaderCreate(*ctx,_p_VecScatter,int,VEC_SCATTER_COOKIE,0,"VecScatter",sctx->comm,VecScatterDestroy,VecScatterView);
1626: (*ctx)->to_n = sctx->to_n;
1627: (*ctx)->from_n = sctx->from_n;
1628: (*sctx->copy)(sctx,*ctx);
1629: return(0);
1630: }
1633: /* ------------------------------------------------------------------*/
1636: /*@
1637: VecScatterView - Views a vector scatter context.
1639: Collective on VecScatter
1641: Input Parameters:
1642: + ctx - the scatter context
1643: - viewer - the viewer for displaying the context
1645: Level: intermediate
1647: @*/
1648: PetscErrorCode VecScatterView(VecScatter ctx,PetscViewer viewer)
1649: {
1654: if (!viewer) {
1655: PetscViewerASCIIGetStdout(ctx->comm,&viewer);
1656: }
1658: if (!ctx->view) SETERRQ(PETSC_ERR_SUP,"Cannot view this type of scatter context yet");
1660: (*ctx->view)(ctx,viewer);
1661: return(0);
1662: }
1666: /*@
1667: VecScatterRemap - Remaps the "from" and "to" indices in a
1668: vector scatter context. FOR EXPERTS ONLY!
1670: Collective on VecScatter
1672: Input Parameters:
1673: + scat - vector scatter context
1674: . from - remapping for "from" indices (may be PETSC_NULL)
1675: - to - remapping for "to" indices (may be PETSC_NULL)
1677: Level: developer
1679: Notes: In the parallel case the todata is actually the indices
1680: from which the data is TAKEN! The from stuff is where the
1681: data is finally put. This is VERY VERY confusing!
1683: In the sequential case the todata is the indices where the
1684: data is put and the fromdata is where it is taken from.
1685: This is backwards from the paralllel case! CRY! CRY! CRY!
1687: @*/
1688: PetscErrorCode VecScatterRemap(VecScatter scat,PetscInt *rto,PetscInt *rfrom)
1689: {
1690: VecScatter_Seq_General *to,*from;
1691: VecScatter_MPI_General *mto;
1692: PetscInt i;
1699: from = (VecScatter_Seq_General *)scat->fromdata;
1700: mto = (VecScatter_MPI_General *)scat->todata;
1702: if (mto->type == VEC_SCATTER_MPI_TOALL) SETERRQ(PETSC_ERR_ARG_SIZ,"Not for to all scatter");
1704: if (rto) {
1705: if (mto->type == VEC_SCATTER_MPI_GENERAL) {
1706: /* handle off processor parts */
1707: for (i=0; i<mto->starts[mto->n]; i++) {
1708: mto->indices[i] = rto[mto->indices[i]];
1709: }
1710: /* handle local part */
1711: to = &mto->local;
1712: for (i=0; i<to->n; i++) {
1713: to->vslots[i] = rto[to->vslots[i]];
1714: }
1715: } else if (from->type == VEC_SCATTER_SEQ_GENERAL) {
1716: for (i=0; i<from->n; i++) {
1717: from->vslots[i] = rto[from->vslots[i]];
1718: }
1719: } else if (from->type == VEC_SCATTER_SEQ_STRIDE) {
1720: VecScatter_Seq_Stride *sto = (VecScatter_Seq_Stride*)from;
1721:
1722: /* if the remapping is the identity and stride is identity then skip remap */
1723: if (sto->step == 1 && sto->first == 0) {
1724: for (i=0; i<sto->n; i++) {
1725: if (rto[i] != i) {
1726: SETERRQ(PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
1727: }
1728: }
1729: } else SETERRQ(PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
1730: } else SETERRQ(PETSC_ERR_ARG_SIZ,"Unable to remap such scatters");
1731: }
1733: if (rfrom) {
1734: SETERRQ(PETSC_ERR_SUP,"Unable to remap the FROM in scatters yet");
1735: }
1737: /*
1738: Mark then vector lengths as unknown because we do not know the
1739: lengths of the remapped vectors
1740: */
1741: scat->from_n = -1;
1742: scat->to_n = -1;
1744: return(0);
1745: }