Actual source code: mpisbaij.c

  1: #define PETSCMAT_DLL

 3:  #include src/mat/impls/baij/mpi/mpibaij.h
 4:  #include mpisbaij.h
 5:  #include src/mat/impls/sbaij/seq/sbaij.h

  7: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ(Mat);
  8: EXTERN PetscErrorCode MatSetUpMultiply_MPISBAIJ_2comm(Mat);
  9: EXTERN PetscErrorCode DisAssemble_MPISBAIJ(Mat);
 10: EXTERN PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat,PetscInt,IS[],PetscInt);
 11: EXTERN PetscErrorCode MatGetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 12: EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],PetscScalar []);
 13: EXTERN PetscErrorCode MatSetValues_SeqSBAIJ(Mat,PetscInt,const PetscInt [],PetscInt,const PetscInt [],const PetscScalar [],InsertMode);
 14: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 15: EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode);
 16: EXTERN PetscErrorCode MatGetRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 17: EXTERN PetscErrorCode MatRestoreRow_SeqSBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
 18: EXTERN PetscErrorCode MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
 19: EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
 20: EXTERN PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat,Vec,PetscInt[]);
 21: EXTERN PetscErrorCode MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,PetscInt,PetscInt,Vec);

 23: /*  UGLY, ugly, ugly
 24:    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does 
 25:    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and 
 26:    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
 27:    converts the entries into single precision and then calls ..._MatScalar() to put them
 28:    into the single precision data structures.
 29: */
 30: #if defined(PETSC_USE_MAT_SINGLE)
 31: EXTERN PetscErrorCode MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 32: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 33: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 34: EXTERN PetscErrorCode MatSetValues_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 35: EXTERN PetscErrorCode MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const MatScalar[],InsertMode);
 36: #else
 37: #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
 38: #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
 39: #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
 40: #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
 41: #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
 42: #endif

 47: PetscErrorCode  MatStoreValues_MPISBAIJ(Mat mat)
 48: {
 49:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 53:   MatStoreValues(aij->A);
 54:   MatStoreValues(aij->B);
 55:   return(0);
 56: }

 62: PetscErrorCode  MatRetrieveValues_MPISBAIJ(Mat mat)
 63: {
 64:   Mat_MPISBAIJ   *aij = (Mat_MPISBAIJ *)mat->data;

 68:   MatRetrieveValues(aij->A);
 69:   MatRetrieveValues(aij->B);
 70:   return(0);
 71: }


 75: #define CHUNKSIZE  10

 77: #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
 78: { \
 79:  \
 80:     brow = row/bs;  \
 81:     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
 82:     rmax = aimax[brow]; nrow = ailen[brow]; \
 83:       bcol = col/bs; \
 84:       ridx = row % bs; cidx = col % bs; \
 85:       low = 0; high = nrow; \
 86:       while (high-low > 3) { \
 87:         t = (low+high)/2; \
 88:         if (rp[t] > bcol) high = t; \
 89:         else              low  = t; \
 90:       } \
 91:       for (_i=low; _i<high; _i++) { \
 92:         if (rp[_i] > bcol) break; \
 93:         if (rp[_i] == bcol) { \
 94:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
 95:           if (addv == ADD_VALUES) *bap += value;  \
 96:           else                    *bap  = value;  \
 97:           goto a_noinsert; \
 98:         } \
 99:       } \
100:       if (a->nonew == 1) goto a_noinsert; \
101:       if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
102:       MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \
103:       N = nrow++ - 1;  \
104:       /* shift up all the later entries in this row */ \
105:       for (ii=N; ii>=_i; ii--) { \
106:         rp[ii+1] = rp[ii]; \
107:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
108:       } \
109:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar)); }  \
110:       rp[_i]                      = bcol;  \
111:       ap[bs2*_i + bs*cidx + ridx] = value;  \
112:       a_noinsert:; \
113:     ailen[brow] = nrow; \
114: } 
115: #ifndef MatSetValues_SeqBAIJ_B_Private
116: #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
117: { \
118:     brow = row/bs;  \
119:     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
120:     rmax = bimax[brow]; nrow = bilen[brow]; \
121:       bcol = col/bs; \
122:       ridx = row % bs; cidx = col % bs; \
123:       low = 0; high = nrow; \
124:       while (high-low > 3) { \
125:         t = (low+high)/2; \
126:         if (rp[t] > bcol) high = t; \
127:         else              low  = t; \
128:       } \
129:       for (_i=low; _i<high; _i++) { \
130:         if (rp[_i] > bcol) break; \
131:         if (rp[_i] == bcol) { \
132:           bap  = ap +  bs2*_i + bs*cidx + ridx; \
133:           if (addv == ADD_VALUES) *bap += value;  \
134:           else                    *bap  = value;  \
135:           goto b_noinsert; \
136:         } \
137:       } \
138:       if (b->nonew == 1) goto b_noinsert; \
139:       if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
140:       MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \
141:       N = nrow++ - 1;  \
142:       /* shift up all the later entries in this row */ \
143:       for (ii=N; ii>=_i; ii--) { \
144:         rp[ii+1] = rp[ii]; \
145:         PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar)); \
146:       } \
147:       if (N>=_i) { PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));}  \
148:       rp[_i]                      = bcol;  \
149:       ap[bs2*_i + bs*cidx + ridx] = value;  \
150:       b_noinsert:; \
151:     bilen[brow] = nrow; \
152: } 
153: #endif

155: #if defined(PETSC_USE_MAT_SINGLE)
158: PetscErrorCode MatSetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
159: {
160:   Mat_MPISBAIJ   *b = (Mat_MPISBAIJ*)mat->data;
162:   PetscInt       i,N = m*n;
163:   MatScalar      *vsingle;

166:   if (N > b->setvalueslen) {
167:     PetscFree(b->setvaluescopy);
168:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
169:     b->setvalueslen  = N;
170:   }
171:   vsingle = b->setvaluescopy;

173:   for (i=0; i<N; i++) {
174:     vsingle[i] = v[i];
175:   }
176:   MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
177:   return(0);
178: }

182: PetscErrorCode MatSetValuesBlocked_MPISBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
183: {
184:   Mat_MPIBAIJ    *b = (Mat_MPIBAIJ*)mat->data;
186:   PetscInt       i,N = m*n*b->bs2;
187:   MatScalar      *vsingle;

190:   if (N > b->setvalueslen) {
191:     PetscFree(b->setvaluescopy);
192:     PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);
193:     b->setvalueslen  = N;
194:   }
195:   vsingle = b->setvaluescopy;
196:   for (i=0; i<N; i++) {
197:     vsingle[i] = v[i];
198:   }
199:   MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);
200:   return(0);
201: }
202: #endif

204: /* Only add/insert a(i,j) with i<=j (blocks). 
205:    Any a(i,j) with i>j input by user is ingored. 
206: */
209: PetscErrorCode MatSetValues_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
210: {
211:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
212:   MatScalar      value;
213:   PetscTruth     roworiented = baij->roworiented;
215:   PetscInt       i,j,row,col;
216:   PetscInt       rstart_orig=mat->rmap.rstart;
217:   PetscInt       rend_orig=mat->rmap.rend,cstart_orig=mat->cmap.rstart;
218:   PetscInt       cend_orig=mat->cmap.rend,bs=mat->rmap.bs;

220:   /* Some Variables required in the macro */
221:   Mat            A = baij->A;
222:   Mat_SeqSBAIJ   *a = (Mat_SeqSBAIJ*)(A)->data;
223:   PetscInt       *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
224:   MatScalar      *aa=a->a;

226:   Mat            B = baij->B;
227:   Mat_SeqBAIJ   *b = (Mat_SeqBAIJ*)(B)->data;
228:   PetscInt      *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
229:   MatScalar     *ba=b->a;

231:   PetscInt      *rp,ii,nrow,_i,rmax,N,brow,bcol;
232:   PetscInt      low,high,t,ridx,cidx,bs2=a->bs2;
233:   MatScalar     *ap,*bap;

235:   /* for stash */
236:   PetscInt      n_loc, *in_loc = PETSC_NULL;
237:   MatScalar     *v_loc = PETSC_NULL;


241:   if (!baij->donotstash){
242:     if (n > baij->n_loc) {
243:       PetscFree(baij->in_loc);
244:       PetscFree(baij->v_loc);
245:       PetscMalloc(n*sizeof(PetscInt),&baij->in_loc);
246:       PetscMalloc(n*sizeof(MatScalar),&baij->v_loc);
247:       baij->n_loc = n;
248:     }
249:     in_loc = baij->in_loc;
250:     v_loc  = baij->v_loc;
251:   }

253:   for (i=0; i<m; i++) {
254:     if (im[i] < 0) continue;
255: #if defined(PETSC_USE_DEBUG)
256:     if (im[i] >= mat->rmap.N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap.N-1);
257: #endif
258:     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
259:       row = im[i] - rstart_orig;              /* local row index */
260:       for (j=0; j<n; j++) {
261:         if (im[i]/bs > in[j]/bs){
262:           if (a->ignore_ltriangular){
263:             continue;    /* ignore lower triangular blocks */
264:           } else {
265:             SETERRQ(PETSC_ERR_USER,"Lower triangular value cannot be set for sbaij format. Ignoring these values, run with -mat_ignore_lower_triangular or call MatSetOption(mat,MAT_IGNORE_LOWER_TRIANGULAR)");
266:           }
267:         }
268:         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
269:           col = in[j] - cstart_orig;          /* local col index */
270:           brow = row/bs; bcol = col/bs;
271:           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
272:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
273:           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
274:           /* MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv); */
275:         } else if (in[j] < 0) continue;
276: #if defined(PETSC_USE_DEBUG)
277:         else if (in[j] >= mat->cmap.N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap.N-1);}
278: #endif
279:         else {  /* off-diag entry (B) */
280:           if (mat->was_assembled) {
281:             if (!baij->colmap) {
282:               CreateColmap_MPIBAIJ_Private(mat);
283:             }
284: #if defined (PETSC_USE_CTABLE)
285:             PetscTableFind(baij->colmap,in[j]/bs + 1,&col);
286:             col  = col - 1;
287: #else
288:             col = baij->colmap[in[j]/bs] - 1;
289: #endif
290:             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
291:               DisAssemble_MPISBAIJ(mat);
292:               col =  in[j];
293:               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
294:               B = baij->B;
295:               b = (Mat_SeqBAIJ*)(B)->data;
296:               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
297:               ba=b->a;
298:             } else col += in[j]%bs;
299:           } else col = in[j];
300:           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
301:           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
302:           /* MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv); */
303:         }
304:       }
305:     } else {  /* off processor entry */
306:       if (!baij->donotstash) {
307:         n_loc = 0;
308:         for (j=0; j<n; j++){
309:           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
310:           in_loc[n_loc] = in[j];
311:           if (roworiented) {
312:             v_loc[n_loc] = v[i*n+j];
313:           } else {
314:             v_loc[n_loc] = v[j*m+i];
315:           }
316:           n_loc++;
317:         }
318:         MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);
319:       }
320:     }
321:   }
322:   return(0);
323: }

327: PetscErrorCode MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const MatScalar v[],InsertMode addv)
328: {
329:   Mat_MPISBAIJ    *baij = (Mat_MPISBAIJ*)mat->data;
330:   const MatScalar *value;
331:   MatScalar       *barray=baij->barray;
332:   PetscTruth      roworiented = baij->roworiented;
333:   PetscErrorCode  ierr;
334:   PetscInt        i,j,ii,jj,row,col,rstart=baij->rstartbs;
335:   PetscInt        rend=baij->rendbs,cstart=baij->rstartbs,stepval;
336:   PetscInt        cend=baij->rendbs,bs=mat->rmap.bs,bs2=baij->bs2;

339:   if(!barray) {
340:     PetscMalloc(bs2*sizeof(MatScalar),&barray);
341:     baij->barray = barray;
342:   }

344:   if (roworiented) {
345:     stepval = (n-1)*bs;
346:   } else {
347:     stepval = (m-1)*bs;
348:   }
349:   for (i=0; i<m; i++) {
350:     if (im[i] < 0) continue;
351: #if defined(PETSC_USE_DEBUG)
352:     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1);
353: #endif
354:     if (im[i] >= rstart && im[i] < rend) {
355:       row = im[i] - rstart;
356:       for (j=0; j<n; j++) {
357:         /* If NumCol = 1 then a copy is not required */
358:         if ((roworiented) && (n == 1)) {
359:           barray = (MatScalar*) v + i*bs2;
360:         } else if((!roworiented) && (m == 1)) {
361:           barray = (MatScalar*) v + j*bs2;
362:         } else { /* Here a copy is required */
363:           if (roworiented) {
364:             value = v + i*(stepval+bs)*bs + j*bs;
365:           } else {
366:             value = v + j*(stepval+bs)*bs + i*bs;
367:           }
368:           for (ii=0; ii<bs; ii++,value+=stepval) {
369:             for (jj=0; jj<bs; jj++) {
370:               *barray++  = *value++;
371:             }
372:           }
373:           barray -=bs2;
374:         }
375: 
376:         if (in[j] >= cstart && in[j] < cend){
377:           col  = in[j] - cstart;
378:           MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);
379:         }
380:         else if (in[j] < 0) continue;
381: #if defined(PETSC_USE_DEBUG)
382:         else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);}
383: #endif
384:         else {
385:           if (mat->was_assembled) {
386:             if (!baij->colmap) {
387:               CreateColmap_MPIBAIJ_Private(mat);
388:             }

390: #if defined(PETSC_USE_DEBUG)
391: #if defined (PETSC_USE_CTABLE)
392:             { PetscInt data;
393:               PetscTableFind(baij->colmap,in[j]+1,&data);
394:               if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
395:             }
396: #else
397:             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
398: #endif
399: #endif
400: #if defined (PETSC_USE_CTABLE)
401:             PetscTableFind(baij->colmap,in[j]+1,&col);
402:             col  = (col - 1)/bs;
403: #else
404:             col = (baij->colmap[in[j]] - 1)/bs;
405: #endif
406:             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
407:               DisAssemble_MPISBAIJ(mat);
408:               col =  in[j];
409:             }
410:           }
411:           else col = in[j];
412:           MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);
413:         }
414:       }
415:     } else {
416:       if (!baij->donotstash) {
417:         if (roworiented) {
418:           MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
419:         } else {
420:           MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);
421:         }
422:       }
423:     }
424:   }
425:   return(0);
426: }

430: PetscErrorCode MatGetValues_MPISBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
431: {
432:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
434:   PetscInt       bs=mat->rmap.bs,i,j,bsrstart = mat->rmap.rstart,bsrend = mat->rmap.rend;
435:   PetscInt       bscstart = mat->cmap.rstart,bscend = mat->cmap.rend,row,col,data;

438:   for (i=0; i<m; i++) {
439:     if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);
440:     if (idxm[i] >= mat->rmap.N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap.N-1);
441:     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
442:       row = idxm[i] - bsrstart;
443:       for (j=0; j<n; j++) {
444:         if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %D",idxn[j]);
445:         if (idxn[j] >= mat->cmap.N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap.N-1);
446:         if (idxn[j] >= bscstart && idxn[j] < bscend){
447:           col = idxn[j] - bscstart;
448:           MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);
449:         } else {
450:           if (!baij->colmap) {
451:             CreateColmap_MPIBAIJ_Private(mat);
452:           }
453: #if defined (PETSC_USE_CTABLE)
454:           PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);
455:           data --;
456: #else
457:           data = baij->colmap[idxn[j]/bs]-1;
458: #endif
459:           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
460:           else {
461:             col  = data + idxn[j]%bs;
462:             MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);
463:           }
464:         }
465:       }
466:     } else {
467:       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
468:     }
469:   }
470:  return(0);
471: }

475: PetscErrorCode MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
476: {
477:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
479:   PetscReal      sum[2],*lnorm2;

482:   if (baij->size == 1) {
483:      MatNorm(baij->A,type,norm);
484:   } else {
485:     if (type == NORM_FROBENIUS) {
486:       PetscMalloc(2*sizeof(PetscReal),&lnorm2);
487:        MatNorm(baij->A,type,lnorm2);
488:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
489:        MatNorm(baij->B,type,lnorm2);
490:       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
491:       MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);
492:       *norm = sqrt(sum[0] + 2*sum[1]);
493:       PetscFree(lnorm2);
494:     } else if (type == NORM_INFINITY || type == NORM_1) { /* max row/column sum */
495:       Mat_SeqSBAIJ *amat=(Mat_SeqSBAIJ*)baij->A->data;
496:       Mat_SeqBAIJ  *bmat=(Mat_SeqBAIJ*)baij->B->data;
497:       PetscReal    *rsum,*rsum2,vabs;
498:       PetscInt     *jj,*garray=baij->garray,rstart=baij->rstartbs,nz;
499:       PetscInt     brow,bcol,col,bs=baij->A->rmap.bs,row,grow,gcol,mbs=amat->mbs;
500:       MatScalar    *v;

502:       PetscMalloc((2*mat->cmap.N+1)*sizeof(PetscReal),&rsum);
503:       rsum2 = rsum + mat->cmap.N;
504:       PetscMemzero(rsum,mat->cmap.N*sizeof(PetscReal));
505:       /* Amat */
506:       v = amat->a; jj = amat->j;
507:       for (brow=0; brow<mbs; brow++) {
508:         grow = bs*(rstart + brow);
509:         nz = amat->i[brow+1] - amat->i[brow];
510:         for (bcol=0; bcol<nz; bcol++){
511:           gcol = bs*(rstart + *jj); jj++;
512:           for (col=0; col<bs; col++){
513:             for (row=0; row<bs; row++){
514:               vabs = PetscAbsScalar(*v); v++;
515:               rsum[gcol+col] += vabs;
516:               /* non-diagonal block */
517:               if (bcol > 0 && vabs > 0.0) rsum[grow+row] += vabs;
518:             }
519:           }
520:         }
521:       }
522:       /* Bmat */
523:       v = bmat->a; jj = bmat->j;
524:       for (brow=0; brow<mbs; brow++) {
525:         grow = bs*(rstart + brow);
526:         nz = bmat->i[brow+1] - bmat->i[brow];
527:         for (bcol=0; bcol<nz; bcol++){
528:           gcol = bs*garray[*jj]; jj++;
529:           for (col=0; col<bs; col++){
530:             for (row=0; row<bs; row++){
531:               vabs = PetscAbsScalar(*v); v++;
532:               rsum[gcol+col] += vabs;
533:               rsum[grow+row] += vabs;
534:             }
535:           }
536:         }
537:       }
538:       MPI_Allreduce(rsum,rsum2,mat->cmap.N,MPIU_REAL,MPI_SUM,mat->comm);
539:       *norm = 0.0;
540:       for (col=0; col<mat->cmap.N; col++) {
541:         if (rsum2[col] > *norm) *norm = rsum2[col];
542:       }
543:       PetscFree(rsum);
544:     } else {
545:       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
546:     }
547:   }
548:   return(0);
549: }

553: PetscErrorCode MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
554: {
555:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
557:   PetscInt       nstash,reallocs;
558:   InsertMode     addv;

561:   if (baij->donotstash) {
562:     return(0);
563:   }

565:   /* make sure all processors are either in INSERTMODE or ADDMODE */
566:   MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);
567:   if (addv == (ADD_VALUES|INSERT_VALUES)) {
568:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
569:   }
570:   mat->insertmode = addv; /* in case this processor had no cache */

572:   MatStashScatterBegin_Private(&mat->stash,mat->rmap.range);
573:   MatStashScatterBegin_Private(&mat->bstash,baij->rangebs);
574:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
575:   PetscInfo2(0,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);
576:   MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);
577:   PetscInfo2(0,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);
578:   return(0);
579: }

583: PetscErrorCode MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
584: {
585:   Mat_MPISBAIJ   *baij=(Mat_MPISBAIJ*)mat->data;
586:   Mat_SeqSBAIJ   *a=(Mat_SeqSBAIJ*)baij->A->data;
588:   PetscInt       i,j,rstart,ncols,flg,bs2=baij->bs2;
589:   PetscInt       *row,*col,other_disassembled;
590:   PetscMPIInt    n;
591:   PetscTruth     r1,r2,r3;
592:   MatScalar      *val;
593:   InsertMode     addv = mat->insertmode;

595:   /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */

598:   if (!baij->donotstash) {
599:     while (1) {
600:       MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);
601:       if (!flg) break;

603:       for (i=0; i<n;) {
604:         /* Now identify the consecutive vals belonging to the same row */
605:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
606:         if (j < n) ncols = j-i;
607:         else       ncols = n-i;
608:         /* Now assemble all these values with a single function call */
609:         MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);
610:         i = j;
611:       }
612:     }
613:     MatStashScatterEnd_Private(&mat->stash);
614:     /* Now process the block-stash. Since the values are stashed column-oriented,
615:        set the roworiented flag to column oriented, and after MatSetValues() 
616:        restore the original flags */
617:     r1 = baij->roworiented;
618:     r2 = a->roworiented;
619:     r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented;
620:     baij->roworiented = PETSC_FALSE;
621:     a->roworiented    = PETSC_FALSE;
622:     ((Mat_SeqBAIJ*)baij->B->data)->roworiented    = PETSC_FALSE; /* b->roworinted */
623:     while (1) {
624:       MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);
625:       if (!flg) break;
626: 
627:       for (i=0; i<n;) {
628:         /* Now identify the consecutive vals belonging to the same row */
629:         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
630:         if (j < n) ncols = j-i;
631:         else       ncols = n-i;
632:         MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);
633:         i = j;
634:       }
635:     }
636:     MatStashScatterEnd_Private(&mat->bstash);
637:     baij->roworiented = r1;
638:     a->roworiented    = r2;
639:     ((Mat_SeqBAIJ*)baij->B->data)->roworiented    = r3; /* b->roworinted */
640:   }

642:   MatAssemblyBegin(baij->A,mode);
643:   MatAssemblyEnd(baij->A,mode);

645:   /* determine if any processor has disassembled, if so we must 
646:      also disassemble ourselfs, in order that we may reassemble. */
647:   /*
648:      if nonzero structure of submatrix B cannot change then we know that
649:      no processor disassembled thus we can skip this stuff
650:   */
651:   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
652:     MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);
653:     if (mat->was_assembled && !other_disassembled) {
654:       DisAssemble_MPISBAIJ(mat);
655:     }
656:   }

658:   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
659:     MatSetUpMultiply_MPISBAIJ(mat); /* setup Mvctx and sMvctx */
660:   }
661:   ((Mat_SeqBAIJ*)baij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */
662:   MatAssemblyBegin(baij->B,mode);
663:   MatAssemblyEnd(baij->B,mode);
664: 
665:   PetscFree(baij->rowvalues);
666:   baij->rowvalues = 0;

668:   return(0);
669: }

674: static PetscErrorCode MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
675: {
676:   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
677:   PetscErrorCode    ierr;
678:   PetscInt          bs = mat->rmap.bs;
679:   PetscMPIInt       size = baij->size,rank = baij->rank;
680:   PetscTruth        iascii,isdraw;
681:   PetscViewer       sviewer;
682:   PetscViewerFormat format;

685:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
686:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
687:   if (iascii) {
688:     PetscViewerGetFormat(viewer,&format);
689:     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
690:       MatInfo info;
691:       MPI_Comm_rank(mat->comm,&rank);
692:       MatGetInfo(mat,MAT_LOCAL,&info);
693:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n",
694:               rank,mat->rmap.N,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs,
695:               mat->rmap.bs,(PetscInt)info.memory);
696:       MatGetInfo(baij->A,MAT_LOCAL,&info);
697:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
698:       MatGetInfo(baij->B,MAT_LOCAL,&info);
699:       PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);
700:       PetscViewerFlush(viewer);
701:       PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");
702:       VecScatterView(baij->Mvctx,viewer);
703:       return(0);
704:     } else if (format == PETSC_VIEWER_ASCII_INFO) {
705:       PetscViewerASCIIPrintf(viewer,"  block size is %D\n",bs);
706:       return(0);
707:     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
708:       return(0);
709:     }
710:   }

712:   if (isdraw) {
713:     PetscDraw  draw;
714:     PetscTruth isnull;
715:     PetscViewerDrawGetDraw(viewer,0,&draw);
716:     PetscDrawIsNull(draw,&isnull); if (isnull) return(0);
717:   }

719:   if (size == 1) {
720:     PetscObjectSetName((PetscObject)baij->A,mat->name);
721:     MatView(baij->A,viewer);
722:   } else {
723:     /* assemble the entire matrix onto first processor. */
724:     Mat          A;
725:     Mat_SeqSBAIJ *Aloc;
726:     Mat_SeqBAIJ  *Bloc;
727:     PetscInt     M = mat->rmap.N,N = mat->cmap.N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
728:     MatScalar    *a;

730:     /* Should this be the same type as mat? */
731:     MatCreate(mat->comm,&A);
732:     if (!rank) {
733:       MatSetSizes(A,M,N,M,N);
734:     } else {
735:       MatSetSizes(A,0,0,M,N);
736:     }
737:     MatSetType(A,MATMPISBAIJ);
738:     MatMPISBAIJSetPreallocation(A,mat->rmap.bs,0,PETSC_NULL,0,PETSC_NULL);
739:     PetscLogObjectParent(mat,A);

741:     /* copy over the A part */
742:     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
743:     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
744:     PetscMalloc(bs*sizeof(PetscInt),&rvals);

746:     for (i=0; i<mbs; i++) {
747:       rvals[0] = bs*(baij->rstartbs + i);
748:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
749:       for (j=ai[i]; j<ai[i+1]; j++) {
750:         col = (baij->cstartbs+aj[j])*bs;
751:         for (k=0; k<bs; k++) {
752:           MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);
753:           col++; a += bs;
754:         }
755:       }
756:     }
757:     /* copy over the B part */
758:     Bloc = (Mat_SeqBAIJ*)baij->B->data;
759:     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
760:     for (i=0; i<mbs; i++) {
761: 
762:       rvals[0] = bs*(baij->rstartbs + i);
763:       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
764:       for (j=ai[i]; j<ai[i+1]; j++) {
765:         col = baij->garray[aj[j]]*bs;
766:         for (k=0; k<bs; k++) {
767:           MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);
768:           col++; a += bs;
769:         }
770:       }
771:     }
772:     PetscFree(rvals);
773:     MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
774:     MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
775:     /* 
776:        Everyone has to call to draw the matrix since the graphics waits are
777:        synchronized across all processors that share the PetscDraw object
778:     */
779:     PetscViewerGetSingleton(viewer,&sviewer);
780:     if (!rank) {
781:       PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);
782:       MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);
783:     }
784:     PetscViewerRestoreSingleton(viewer,&sviewer);
785:     MatDestroy(A);
786:   }
787:   return(0);
788: }

792: PetscErrorCode MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
793: {
795:   PetscTruth     iascii,isdraw,issocket,isbinary;

798:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);
799:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);
800:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);
801:   PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);
802:   if (iascii || isdraw || issocket || isbinary) {
803:     MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);
804:   } else {
805:     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
806:   }
807:   return(0);
808: }

812: PetscErrorCode MatDestroy_MPISBAIJ(Mat mat)
813: {
814:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;

818: #if defined(PETSC_USE_LOG)
819:   PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap.N,mat->cmap.N);
820: #endif
821:   MatStashDestroy_Private(&mat->stash);
822:   MatStashDestroy_Private(&mat->bstash);
823:   MatDestroy(baij->A);
824:   MatDestroy(baij->B);
825: #if defined (PETSC_USE_CTABLE)
826:   if (baij->colmap) {PetscTableDestroy(baij->colmap);}
827: #else
828:   PetscFree(baij->colmap);
829: #endif
830:   PetscFree(baij->garray);
831:   if (baij->lvec)   {VecDestroy(baij->lvec);}
832:   if (baij->Mvctx)  {VecScatterDestroy(baij->Mvctx);}
833:   if (baij->slvec0) {
834:     VecDestroy(baij->slvec0);
835:     VecDestroy(baij->slvec0b);
836:   }
837:   if (baij->slvec1) {
838:     VecDestroy(baij->slvec1);
839:     VecDestroy(baij->slvec1a);
840:     VecDestroy(baij->slvec1b);
841:   }
842:   if (baij->sMvctx)  {VecScatterDestroy(baij->sMvctx);}
843:   PetscFree(baij->rowvalues);
844:   PetscFree(baij->barray);
845:   PetscFree(baij->hd);
846: #if defined(PETSC_USE_MAT_SINGLE)
847:   PetscFree(baij->setvaluescopy);
848: #endif
849:   PetscFree(baij->in_loc);
850:   PetscFree(baij->v_loc);
851:   PetscFree(baij->rangebs);
852:   PetscFree(baij);

854:   PetscObjectChangeTypeName((PetscObject)mat,0);
855:   PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);
856:   PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);
857:   PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);
858:   PetscObjectComposeFunction((PetscObject)mat,"MatMPISBAIJSetPreallocation_C","",PETSC_NULL);
859:   return(0);
860: }

864: PetscErrorCode MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
865: {
866:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
868:   PetscInt       nt,mbs=a->mbs,bs=A->rmap.bs;
869:   PetscScalar    *x,*from,zero=0.0;
870: 
872:   VecGetLocalSize(xx,&nt);
873:   if (nt != A->cmap.n) {
874:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
875:   }

877:   /* diagonal part */
878:   (*a->A->ops->mult)(a->A,xx,a->slvec1a);
879:   VecSet(a->slvec1b,zero);

881:   /* subdiagonal part */
882:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);
883:   CHKMEMQ;
884:   /* copy x into the vec slvec0 */
885:   VecGetArray(a->slvec0,&from);
886:   VecGetArray(xx,&x);
887:   CHKMEMQ;
888:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
889:   CHKMEMQ;
890:   VecRestoreArray(a->slvec0,&from);
891: 
892:   CHKMEMQ;
893:   VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
894:   CHKMEMQ;
895:   VecRestoreArray(xx,&x);
896:   CHKMEMQ;
897:   VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
898:     CHKMEMQ;
899:   /* supperdiagonal part */
900:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);
901:     CHKMEMQ;
902:   return(0);
903: }

907: PetscErrorCode MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
908: {
909:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
911:   PetscInt       nt;

914:   VecGetLocalSize(xx,&nt);
915:   if (nt != A->cmap.n) {
916:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
917:   }
918:   VecGetLocalSize(yy,&nt);
919:   if (nt != A->rmap.N) {
920:     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
921:   }

923:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
924:   /* do diagonal part */
925:   (*a->A->ops->mult)(a->A,xx,yy);
926:   /* do supperdiagonal part */
927:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
928:   (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);
929:   /* do subdiagonal part */
930:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
931:   VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);
932:   VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);

934:   return(0);
935: }

939: PetscErrorCode MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
940: {
941:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
943:   PetscInt       mbs=a->mbs,bs=A->rmap.bs;
944:   PetscScalar    *x,*from,zero=0.0;
945: 
947:   /*
948:   PetscSynchronizedPrintf(A->comm," MatMultAdd is called ...\n");
949:   PetscSynchronizedFlush(A->comm);
950:   */
951:   /* diagonal part */
952:   (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);
953:   VecSet(a->slvec1b,zero);

955:   /* subdiagonal part */
956:   (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);

958:   /* copy x into the vec slvec0 */
959:   VecGetArray(a->slvec0,&from);
960:   VecGetArray(xx,&x);
961:   PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));
962:   VecRestoreArray(a->slvec0,&from);
963: 
964:   VecScatterBegin(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
965:   VecRestoreArray(xx,&x);
966:   VecScatterEnd(a->sMvctx,a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD);
967: 
968:   /* supperdiagonal part */
969:   (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);
970: 
971:   return(0);
972: }

976: PetscErrorCode MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
977: {
978:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

982:   VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
983:   /* do diagonal part */
984:   (*a->A->ops->multadd)(a->A,xx,yy,zz);
985:   /* do supperdiagonal part */
986:   VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
987:   (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);

989:   /* do subdiagonal part */
990:   (*a->B->ops->multtranspose)(a->B,xx,a->lvec);
991:   VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);
992:   VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);

994:   return(0);
995: }

997: /*
998:   This only works correctly for square matrices where the subblock A->A is the 
999:    diagonal block
1000: */
1003: PetscErrorCode MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
1004: {
1005:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1009:   /* if (a->rmap.N != a->cmap.N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
1010:   MatGetDiagonal(a->A,v);
1011:   return(0);
1012: }

1016: PetscErrorCode MatScale_MPISBAIJ(Mat A,PetscScalar aa)
1017: {
1018:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1022:   MatScale(a->A,aa);
1023:   MatScale(a->B,aa);
1024:   return(0);
1025: }

1029: PetscErrorCode MatGetRow_MPISBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1030: {
1031:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
1032:   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1034:   PetscInt       bs = matin->rmap.bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB;
1035:   PetscInt       nztot,nzA,nzB,lrow,brstart = matin->rmap.rstart,brend = matin->rmap.rend;
1036:   PetscInt       *cmap,*idx_p,cstart = mat->rstartbs;

1039:   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1040:   mat->getrowactive = PETSC_TRUE;

1042:   if (!mat->rowvalues && (idx || v)) {
1043:     /*
1044:         allocate enough space to hold information from the longest row.
1045:     */
1046:     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1047:     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
1048:     PetscInt     max = 1,mbs = mat->mbs,tmp;
1049:     for (i=0; i<mbs; i++) {
1050:       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1051:       if (max < tmp) { max = tmp; }
1052:     }
1053:     PetscMalloc(max*bs2*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);
1054:     mat->rowindices = (PetscInt*)(mat->rowvalues + max*bs2);
1055:   }
1056: 
1057:   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1058:   lrow = row - brstart;  /* local row index */

1060:   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1061:   if (!v)   {pvA = 0; pvB = 0;}
1062:   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1063:   (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);
1064:   (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);
1065:   nztot = nzA + nzB;

1067:   cmap  = mat->garray;
1068:   if (v  || idx) {
1069:     if (nztot) {
1070:       /* Sort by increasing column numbers, assuming A and B already sorted */
1071:       PetscInt imark = -1;
1072:       if (v) {
1073:         *v = v_p = mat->rowvalues;
1074:         for (i=0; i<nzB; i++) {
1075:           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1076:           else break;
1077:         }
1078:         imark = i;
1079:         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1080:         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1081:       }
1082:       if (idx) {
1083:         *idx = idx_p = mat->rowindices;
1084:         if (imark > -1) {
1085:           for (i=0; i<imark; i++) {
1086:             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1087:           }
1088:         } else {
1089:           for (i=0; i<nzB; i++) {
1090:             if (cmap[cworkB[i]/bs] < cstart)
1091:               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1092:             else break;
1093:           }
1094:           imark = i;
1095:         }
1096:         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1097:         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1098:       }
1099:     } else {
1100:       if (idx) *idx = 0;
1101:       if (v)   *v   = 0;
1102:     }
1103:   }
1104:   *nz = nztot;
1105:   (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);
1106:   (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);
1107:   return(0);
1108: }

1112: PetscErrorCode MatRestoreRow_MPISBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1113: {
1114:   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;

1117:   if (!baij->getrowactive) {
1118:     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1119:   }
1120:   baij->getrowactive = PETSC_FALSE;
1121:   return(0);
1122: }

1126: PetscErrorCode MatGetRowUpperTriangular_MPISBAIJ(Mat A)
1127: {
1128:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1129:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1132:   aA->getrow_utriangular = PETSC_TRUE;
1133:   return(0);
1134: }
1137: PetscErrorCode MatRestoreRowUpperTriangular_MPISBAIJ(Mat A)
1138: {
1139:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1140:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1143:   aA->getrow_utriangular = PETSC_FALSE;
1144:   return(0);
1145: }

1149: PetscErrorCode MatRealPart_MPISBAIJ(Mat A)
1150: {
1151:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1155:   MatRealPart(a->A);
1156:   MatRealPart(a->B);
1157:   return(0);
1158: }

1162: PetscErrorCode MatImaginaryPart_MPISBAIJ(Mat A)
1163: {
1164:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1168:   MatImaginaryPart(a->A);
1169:   MatImaginaryPart(a->B);
1170:   return(0);
1171: }

1175: PetscErrorCode MatZeroEntries_MPISBAIJ(Mat A)
1176: {
1177:   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;

1181:   MatZeroEntries(l->A);
1182:   MatZeroEntries(l->B);
1183:   return(0);
1184: }

1188: PetscErrorCode MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1189: {
1190:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)matin->data;
1191:   Mat            A = a->A,B = a->B;
1193:   PetscReal      isend[5],irecv[5];

1196:   info->block_size     = (PetscReal)matin->rmap.bs;
1197:   MatGetInfo(A,MAT_LOCAL,info);
1198:   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1199:   isend[3] = info->memory;  isend[4] = info->mallocs;
1200:   MatGetInfo(B,MAT_LOCAL,info);
1201:   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1202:   isend[3] += info->memory;  isend[4] += info->mallocs;
1203:   if (flag == MAT_LOCAL) {
1204:     info->nz_used      = isend[0];
1205:     info->nz_allocated = isend[1];
1206:     info->nz_unneeded  = isend[2];
1207:     info->memory       = isend[3];
1208:     info->mallocs      = isend[4];
1209:   } else if (flag == MAT_GLOBAL_MAX) {
1210:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);
1211:     info->nz_used      = irecv[0];
1212:     info->nz_allocated = irecv[1];
1213:     info->nz_unneeded  = irecv[2];
1214:     info->memory       = irecv[3];
1215:     info->mallocs      = irecv[4];
1216:   } else if (flag == MAT_GLOBAL_SUM) {
1217:     MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);
1218:     info->nz_used      = irecv[0];
1219:     info->nz_allocated = irecv[1];
1220:     info->nz_unneeded  = irecv[2];
1221:     info->memory       = irecv[3];
1222:     info->mallocs      = irecv[4];
1223:   } else {
1224:     SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag);
1225:   }
1226:   info->rows_global       = (PetscReal)A->rmap.N;
1227:   info->columns_global    = (PetscReal)A->cmap.N;
1228:   info->rows_local        = (PetscReal)A->rmap.N;
1229:   info->columns_local     = (PetscReal)A->cmap.N;
1230:   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1231:   info->fill_ratio_needed = 0;
1232:   info->factor_mallocs    = 0;
1233:   return(0);
1234: }

1238: PetscErrorCode MatSetOption_MPISBAIJ(Mat A,MatOption op)
1239: {
1240:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
1241:   Mat_SeqSBAIJ   *aA = (Mat_SeqSBAIJ*)a->A->data;

1245:   switch (op) {
1246:   case MAT_NO_NEW_NONZERO_LOCATIONS:
1247:   case MAT_YES_NEW_NONZERO_LOCATIONS:
1248:   case MAT_COLUMNS_UNSORTED:
1249:   case MAT_COLUMNS_SORTED:
1250:   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1251:   case MAT_KEEP_ZEROED_ROWS:
1252:   case MAT_NEW_NONZERO_LOCATION_ERR:
1253:     MatSetOption(a->A,op);
1254:     MatSetOption(a->B,op);
1255:     break;
1256:   case MAT_ROW_ORIENTED:
1257:     a->roworiented = PETSC_TRUE;
1258:     MatSetOption(a->A,op);
1259:     MatSetOption(a->B,op);
1260:     break;
1261:   case MAT_ROWS_SORTED:
1262:   case MAT_ROWS_UNSORTED:
1263:   case MAT_YES_NEW_DIAGONALS:
1264:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1265:     break;
1266:   case MAT_COLUMN_ORIENTED:
1267:     a->roworiented = PETSC_FALSE;
1268:     MatSetOption(a->A,op);
1269:     MatSetOption(a->B,op);
1270:     break;
1271:   case MAT_IGNORE_OFF_PROC_ENTRIES:
1272:     a->donotstash = PETSC_TRUE;
1273:     break;
1274:   case MAT_NO_NEW_DIAGONALS:
1275:     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1276:   case MAT_USE_HASH_TABLE:
1277:     a->ht_flag = PETSC_TRUE;
1278:     break;
1279:   case MAT_NOT_SYMMETRIC:
1280:   case MAT_NOT_STRUCTURALLY_SYMMETRIC:
1281:   case MAT_HERMITIAN:
1282:     SETERRQ(PETSC_ERR_SUP,"Matrix must be symmetric");
1283:   case MAT_SYMMETRIC:
1284:   case MAT_STRUCTURALLY_SYMMETRIC:
1285:   case MAT_NOT_HERMITIAN:
1286:   case MAT_SYMMETRY_ETERNAL:
1287:   case MAT_NOT_SYMMETRY_ETERNAL:
1288:     PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);
1289:     break;
1290:   case MAT_IGNORE_LOWER_TRIANGULAR:
1291:     aA->ignore_ltriangular = PETSC_TRUE;
1292:     break;
1293:   case MAT_ERROR_LOWER_TRIANGULAR:
1294:     aA->ignore_ltriangular = PETSC_FALSE;
1295:     break;
1296:   case MAT_GETROW_UPPERTRIANGULAR:
1297:     aA->getrow_utriangular = PETSC_TRUE;
1298:     break;
1299:   default:
1300:     SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
1301:   }
1302:   return(0);
1303: }

1307: PetscErrorCode MatTranspose_MPISBAIJ(Mat A,Mat *B)
1308: {
1311:   MatDuplicate(A,MAT_COPY_VALUES,B);
1312:   return(0);
1313: }

1317: PetscErrorCode MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1318: {
1319:   Mat_MPISBAIJ   *baij = (Mat_MPISBAIJ*)mat->data;
1320:   Mat            a=baij->A, b=baij->B;
1322:   PetscInt       nv,m,n;
1323:   PetscTruth     flg;

1326:   if (ll != rr){
1327:     VecEqual(ll,rr,&flg);
1328:     if (!flg)
1329:       SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1330:   }
1331:   if (!ll) return(0);

1333:   MatGetLocalSize(mat,&m,&n);
1334:   if (m != n) SETERRQ2(PETSC_ERR_ARG_SIZ,"For symmetric format, local size %d %d must be same",m,n);
1335: 
1336:   VecGetLocalSize(rr,&nv);
1337:   if (nv!=n) SETERRQ(PETSC_ERR_ARG_SIZ,"Left and right vector non-conforming local size");

1339:   VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1340: 
1341:   /* left diagonalscale the off-diagonal part */
1342:   (*b->ops->diagonalscale)(b,ll,PETSC_NULL);
1343: 
1344:   /* scale the diagonal part */
1345:   (*a->ops->diagonalscale)(a,ll,rr);

1347:   /* right diagonalscale the off-diagonal part */
1348:   VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);
1349:   (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);
1350:   return(0);
1351: }

1355: PetscErrorCode MatSetUnfactored_MPISBAIJ(Mat A)
1356: {
1357:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;

1361:   MatSetUnfactored(a->A);
1362:   return(0);
1363: }

1365: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);

1369: PetscErrorCode MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1370: {
1371:   Mat_MPISBAIJ   *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1372:   Mat            a,b,c,d;
1373:   PetscTruth     flg;

1377:   a = matA->A; b = matA->B;
1378:   c = matB->A; d = matB->B;

1380:   MatEqual(a,c,&flg);
1381:   if (flg) {
1382:     MatEqual(b,d,&flg);
1383:   }
1384:   MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);
1385:   return(0);
1386: }

1390: PetscErrorCode MatCopy_MPISBAIJ(Mat A,Mat B,MatStructure str)
1391: {
1393:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ *)A->data;
1394:   Mat_MPISBAIJ   *b = (Mat_MPISBAIJ *)B->data;

1397:   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1398:   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1399:     MatGetRowUpperTriangular(A);
1400:     MatCopy_Basic(A,B,str);
1401:     MatRestoreRowUpperTriangular(A);
1402:   } else {
1403:     MatCopy(a->A,b->A,str);
1404:     MatCopy(a->B,b->B,str);
1405:   }
1406:   return(0);
1407: }

1411: PetscErrorCode MatSetUpPreallocation_MPISBAIJ(Mat A)
1412: {

1416:   MatMPISBAIJSetPreallocation(A,PetscMax(A->rmap.bs,1),PETSC_DEFAULT,0,PETSC_DEFAULT,0);
1417:   return(0);
1418: }

1420:  #include petscblaslapack.h
1423: PetscErrorCode MatAXPY_MPISBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1424: {
1426:   Mat_MPISBAIJ   *xx=(Mat_MPISBAIJ *)X->data,*yy=(Mat_MPISBAIJ *)Y->data;
1427:   PetscBLASInt   bnz,one=1;
1428:   Mat_SeqSBAIJ   *xa,*ya;
1429:   Mat_SeqBAIJ    *xb,*yb;

1432:   if (str == SAME_NONZERO_PATTERN) {
1433:     PetscScalar alpha = a;
1434:     xa = (Mat_SeqSBAIJ *)xx->A->data;
1435:     ya = (Mat_SeqSBAIJ *)yy->A->data;
1436:     bnz = (PetscBLASInt)xa->nz;
1437:     BLASaxpy_(&bnz,&alpha,xa->a,&one,ya->a,&one);
1438:     xb = (Mat_SeqBAIJ *)xx->B->data;
1439:     yb = (Mat_SeqBAIJ *)yy->B->data;
1440:     bnz = (PetscBLASInt)xb->nz;
1441:     BLASaxpy_(&bnz,&alpha,xb->a,&one,yb->a,&one);
1442:   } else {
1443:     MatGetRowUpperTriangular(X);
1444:     MatAXPY_Basic(Y,a,X,str);
1445:     MatRestoreRowUpperTriangular(X);
1446:   }
1447:   return(0);
1448: }

1452: PetscErrorCode MatGetSubMatrices_MPISBAIJ(Mat A,PetscInt n,const IS irow[],const IS icol[],MatReuse scall,Mat *B[])
1453: {
1455:   PetscInt       i;
1456:   PetscTruth     flg;

1459:   for (i=0; i<n; i++) {
1460:     ISEqual(irow[i],icol[i],&flg);
1461:     if (!flg) {
1462:       SETERRQ(PETSC_ERR_SUP,"Can only get symmetric submatrix for MPISBAIJ matrices");
1463:     }
1464:   }
1465:   MatGetSubMatrices_MPIBAIJ(A,n,irow,icol,scall,B);
1466:   return(0);
1467: }
1468: 

1470: /* -------------------------------------------------------------------*/
1471: static struct _MatOps MatOps_Values = {
1472:        MatSetValues_MPISBAIJ,
1473:        MatGetRow_MPISBAIJ,
1474:        MatRestoreRow_MPISBAIJ,
1475:        MatMult_MPISBAIJ,
1476: /* 4*/ MatMultAdd_MPISBAIJ,
1477:        MatMult_MPISBAIJ,       /* transpose versions are same as non-transpose */
1478:        MatMultAdd_MPISBAIJ,
1479:        0,
1480:        0,
1481:        0,
1482: /*10*/ 0,
1483:        0,
1484:        0,
1485:        MatRelax_MPISBAIJ,
1486:        MatTranspose_MPISBAIJ,
1487: /*15*/ MatGetInfo_MPISBAIJ,
1488:        MatEqual_MPISBAIJ,
1489:        MatGetDiagonal_MPISBAIJ,
1490:        MatDiagonalScale_MPISBAIJ,
1491:        MatNorm_MPISBAIJ,
1492: /*20*/ MatAssemblyBegin_MPISBAIJ,
1493:        MatAssemblyEnd_MPISBAIJ,
1494:        0,
1495:        MatSetOption_MPISBAIJ,
1496:        MatZeroEntries_MPISBAIJ,
1497: /*25*/ 0,
1498:        0,
1499:        0,
1500:        0,
1501:        0,
1502: /*30*/ MatSetUpPreallocation_MPISBAIJ,
1503:        0,
1504:        0,
1505:        0,
1506:        0,
1507: /*35*/ MatDuplicate_MPISBAIJ,
1508:        0,
1509:        0,
1510:        0,
1511:        0,
1512: /*40*/ MatAXPY_MPISBAIJ,
1513:        MatGetSubMatrices_MPISBAIJ,
1514:        MatIncreaseOverlap_MPISBAIJ,
1515:        MatGetValues_MPISBAIJ,
1516:        MatCopy_MPISBAIJ,
1517: /*45*/ 0,
1518:        MatScale_MPISBAIJ,
1519:        0,
1520:        0,
1521:        0,
1522: /*50*/ 0,
1523:        0,
1524:        0,
1525:        0,
1526:        0,
1527: /*55*/ 0,
1528:        0,
1529:        MatSetUnfactored_MPISBAIJ,
1530:        0,
1531:        MatSetValuesBlocked_MPISBAIJ,
1532: /*60*/ 0,
1533:        0,
1534:        0,
1535:        0,
1536:        0,
1537: /*65*/ 0,
1538:        0,
1539:        0,
1540:        0,
1541:        0,
1542: /*70*/ MatGetRowMaxAbs_MPISBAIJ,
1543:        0,
1544:        0,
1545:        0,
1546:        0,
1547: /*75*/ 0,
1548:        0,
1549:        0,
1550:        0,
1551:        0,
1552: /*80*/ 0,
1553:        0,
1554:        0,
1555:        0,
1556:        MatLoad_MPISBAIJ,
1557: /*85*/ 0,
1558:        0,
1559:        0,
1560:        0,
1561:        0,
1562: /*90*/ 0,
1563:        0,
1564:        0,
1565:        0,
1566:        0,
1567: /*95*/ 0,
1568:        0,
1569:        0,
1570:        0,
1571:        0,
1572: /*100*/0,
1573:        0,
1574:        0,
1575:        0,
1576:        0,
1577: /*105*/0,
1578:        MatRealPart_MPISBAIJ,
1579:        MatImaginaryPart_MPISBAIJ,
1580:        MatGetRowUpperTriangular_MPISBAIJ,
1581:        MatRestoreRowUpperTriangular_MPISBAIJ
1582: };


1588: PetscErrorCode  MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1589: {
1591:   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1592:   *iscopy = PETSC_FALSE;
1593:   return(0);
1594: }

1600: PetscErrorCode  MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz)
1601: {
1602:   Mat_MPISBAIJ   *b;
1604:   PetscInt       i,mbs,Mbs;

1607:   PetscOptionsBegin(B->comm,B->prefix,"Options for MPISBAIJ matrix","Mat");
1608:     PetscOptionsInt("-mat_block_size","Set the blocksize used to store the matrix","MatMPIBAIJSetPreallocation",bs,&bs,PETSC_NULL);
1609:   PetscOptionsEnd();

1611:   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1612:   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1613:   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1614:   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
1615:   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);

1617:   B->rmap.bs = B->cmap.bs = bs;
1618:   PetscMapSetUp(&B->rmap);
1619:   PetscMapSetUp(&B->cmap);

1621:   if (d_nnz) {
1622:     for (i=0; i<B->rmap.n/bs; i++) {
1623:       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]);
1624:     }
1625:   }
1626:   if (o_nnz) {
1627:     for (i=0; i<B->rmap.n/bs; i++) {
1628:       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]);
1629:     }
1630:   }
1631:   B->preallocated = PETSC_TRUE;

1633:   b   = (Mat_MPISBAIJ*)B->data;
1634:   mbs = B->rmap.n/bs;
1635:   Mbs = B->rmap.N/bs;
1636:   if (mbs*bs != B->rmap.n) {
1637:     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %D must be divisible by blocksize %D",B->rmap.N,bs);
1638:   }

1640:   B->rmap.bs  = bs;
1641:   b->bs2 = bs*bs;
1642:   b->mbs = mbs;
1643:   b->nbs = mbs;
1644:   b->Mbs = Mbs;
1645:   b->Nbs = Mbs;

1647:   for (i=0; i<=b->size; i++) {
1648:     b->rangebs[i] = B->rmap.range[i]/bs;
1649:   }
1650:   b->rstartbs = B->rmap.rstart/bs;
1651:   b->rendbs   = B->rmap.rend/bs;
1652: 
1653:   b->cstartbs = B->cmap.rstart/bs;
1654:   b->cendbs   = B->cmap.rend/bs;
1655: 
1656:   MatCreate(PETSC_COMM_SELF,&b->A);
1657:   MatSetSizes(b->A,B->rmap.n,B->cmap.n,B->rmap.n,B->cmap.n);
1658:   MatSetType(b->A,MATSEQSBAIJ);
1659:   MatSeqSBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);
1660:   PetscLogObjectParent(B,b->A);

1662:   MatCreate(PETSC_COMM_SELF,&b->B);
1663:   MatSetSizes(b->B,B->rmap.n,B->cmap.N,B->rmap.n,B->cmap.N);
1664:   MatSetType(b->B,MATSEQBAIJ);
1665:   MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);
1666:   PetscLogObjectParent(B,b->B);

1668:   /* build cache for off array entries formed */
1669:   MatStashCreate_Private(B->comm,bs,&B->bstash);

1671:   return(0);
1672: }

1675: /*MC
1676:    MATMPISBAIJ - MATMPISBAIJ = "mpisbaij" - A matrix type to be used for distributed symmetric sparse block matrices, 
1677:    based on block compressed sparse row format.  Only the upper triangular portion of the matrix is stored.

1679:    Options Database Keys:
1680: . -mat_type mpisbaij - sets the matrix type to "mpisbaij" during a call to MatSetFromOptions()

1682:   Level: beginner

1684: .seealso: MatCreateMPISBAIJ
1685: M*/

1690: PetscErrorCode  MatCreate_MPISBAIJ(Mat B)
1691: {
1692:   Mat_MPISBAIJ   *b;
1694:   PetscTruth     flg;


1698:   PetscNew(Mat_MPISBAIJ,&b);
1699:   B->data = (void*)b;
1700:   PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));

1702:   B->ops->destroy    = MatDestroy_MPISBAIJ;
1703:   B->ops->view       = MatView_MPISBAIJ;
1704:   B->mapping    = 0;
1705:   B->factor     = 0;
1706:   B->assembled  = PETSC_FALSE;

1708:   B->insertmode = NOT_SET_VALUES;
1709:   MPI_Comm_rank(B->comm,&b->rank);
1710:   MPI_Comm_size(B->comm,&b->size);

1712:   /* build local table of row and column ownerships */
1713:   PetscMalloc((b->size+2)*sizeof(PetscInt),&b->rangebs);

1715:   /* build cache for off array entries formed */
1716:   MatStashCreate_Private(B->comm,1,&B->stash);
1717:   b->donotstash  = PETSC_FALSE;
1718:   b->colmap      = PETSC_NULL;
1719:   b->garray      = PETSC_NULL;
1720:   b->roworiented = PETSC_TRUE;

1722: #if defined(PETSC_USE_MAT_SINGLE)
1723:   /* stuff for MatSetValues_XXX in single precision */
1724:   b->setvalueslen     = 0;
1725:   b->setvaluescopy    = PETSC_NULL;
1726: #endif

1728:   /* stuff used in block assembly */
1729:   b->barray       = 0;

1731:   /* stuff used for matrix vector multiply */
1732:   b->lvec         = 0;
1733:   b->Mvctx        = 0;
1734:   b->slvec0       = 0;
1735:   b->slvec0b      = 0;
1736:   b->slvec1       = 0;
1737:   b->slvec1a      = 0;
1738:   b->slvec1b      = 0;
1739:   b->sMvctx       = 0;

1741:   /* stuff for MatGetRow() */
1742:   b->rowindices   = 0;
1743:   b->rowvalues    = 0;
1744:   b->getrowactive = PETSC_FALSE;

1746:   /* hash table stuff */
1747:   b->ht           = 0;
1748:   b->hd           = 0;
1749:   b->ht_size      = 0;
1750:   b->ht_flag      = PETSC_FALSE;
1751:   b->ht_fact      = 0;
1752:   b->ht_total_ct  = 0;
1753:   b->ht_insert_ct = 0;

1755:   b->in_loc       = 0;
1756:   b->v_loc        = 0;
1757:   b->n_loc        = 0;
1758:   PetscOptionsBegin(B->comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 1","Mat");
1759:     PetscOptionsTruth("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);
1760:     if (flg) {
1761:       PetscReal fact = 1.39;
1762:       MatSetOption(B,MAT_USE_HASH_TABLE);
1763:       PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);
1764:       if (fact <= 1.0) fact = 1.39;
1765:       MatMPIBAIJSetHashTableFactor(B,fact);
1766:       PetscInfo1(0,"Hash table Factor used %5.2f\n",fact);
1767:     }
1768:   PetscOptionsEnd();

1770:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1771:                                      "MatStoreValues_MPISBAIJ",
1772:                                      MatStoreValues_MPISBAIJ);
1773:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1774:                                      "MatRetrieveValues_MPISBAIJ",
1775:                                      MatRetrieveValues_MPISBAIJ);
1776:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1777:                                      "MatGetDiagonalBlock_MPISBAIJ",
1778:                                      MatGetDiagonalBlock_MPISBAIJ);
1779:   PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1780:                                      "MatMPISBAIJSetPreallocation_MPISBAIJ",
1781:                                      MatMPISBAIJSetPreallocation_MPISBAIJ);
1782:   B->symmetric                  = PETSC_TRUE;
1783:   B->structurally_symmetric     = PETSC_TRUE;
1784:   B->symmetric_set              = PETSC_TRUE;
1785:   B->structurally_symmetric_set = PETSC_TRUE;
1786:   PetscObjectChangeTypeName((PetscObject)B,MATMPISBAIJ);
1787:   return(0);
1788: }

1791: /*MC
1792:    MATSBAIJ - MATSBAIJ = "sbaij" - A matrix type to be used for symmetric block sparse matrices.

1794:    This matrix type is identical to MATSEQSBAIJ when constructed with a single process communicator,
1795:    and MATMPISBAIJ otherwise.

1797:    Options Database Keys:
1798: . -mat_type sbaij - sets the matrix type to "sbaij" during a call to MatSetFromOptions()

1800:   Level: beginner

1802: .seealso: MatCreateMPISBAIJ,MATSEQSBAIJ,MATMPISBAIJ
1803: M*/

1808: PetscErrorCode  MatCreate_SBAIJ(Mat A)
1809: {
1811:   PetscMPIInt    size;

1814:   MPI_Comm_size(A->comm,&size);
1815:   if (size == 1) {
1816:     MatSetType(A,MATSEQSBAIJ);
1817:   } else {
1818:     MatSetType(A,MATMPISBAIJ);
1819:   }
1820:   return(0);
1821: }

1826: /*@C
1827:    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1828:    the user should preallocate the matrix storage by setting the parameters 
1829:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1830:    performance can be increased by more than a factor of 50.

1832:    Collective on Mat

1834:    Input Parameters:
1835: +  A - the matrix 
1836: .  bs   - size of blockk
1837: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1838:            submatrix  (same for all local rows)
1839: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1840:            in the upper triangular and diagonal part of the in diagonal portion of the local
1841:            (possibly different for each block row) or PETSC_NULL.  You must leave room 
1842:            for the diagonal entry even if it is zero.
1843: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1844:            submatrix (same for all local rows).
1845: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1846:            off-diagonal portion of the local submatrix (possibly different for
1847:            each block row) or PETSC_NULL.


1850:    Options Database Keys:
1851: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1852:                      block calculations (much slower)
1853: .   -mat_block_size - size of the blocks to use

1855:    Notes:

1857:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1858:    than it must be used on all processors that share the object for that argument.

1860:    If the *_nnz parameter is given then the *_nz parameter is ignored

1862:    Storage Information:
1863:    For a square global matrix we define each processor's diagonal portion 
1864:    to be its local rows and the corresponding columns (a square submatrix);  
1865:    each processor's off-diagonal portion encompasses the remainder of the
1866:    local matrix (a rectangular submatrix). 

1868:    The user can specify preallocated storage for the diagonal part of
1869:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1870:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1871:    memory allocation.  Likewise, specify preallocated storage for the
1872:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1874:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1875:    the figure below we depict these three local rows and all columns (0-11).

1877: .vb
1878:            0 1 2 3 4 5 6 7 8 9 10 11
1879:           -------------------
1880:    row 3  |  o o o d d d o o o o o o
1881:    row 4  |  o o o d d d o o o o o o
1882:    row 5  |  o o o d d d o o o o o o
1883:           -------------------
1884: .ve
1885:   
1886:    Thus, any entries in the d locations are stored in the d (diagonal) 
1887:    submatrix, and any entries in the o locations are stored in the
1888:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1889:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

1891:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1892:    plus the diagonal part of the d matrix,
1893:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1894:    In general, for PDE problems in which most nonzeros are near the diagonal,
1895:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1896:    or you will get TERRIBLE performance; see the users' manual chapter on
1897:    matrices.

1899:    Level: intermediate

1901: .keywords: matrix, block, aij, compressed row, sparse, parallel

1903: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1904: @*/
1905: PetscErrorCode  MatMPISBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
1906: {
1907:   PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);

1910:   PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);
1911:   if (f) {
1912:     (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
1913:   }
1914:   return(0);
1915: }

1919: /*@C
1920:    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1921:    (block compressed row).  For good matrix assembly performance
1922:    the user should preallocate the matrix storage by setting the parameters 
1923:    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1924:    performance can be increased by more than a factor of 50.

1926:    Collective on MPI_Comm

1928:    Input Parameters:
1929: +  comm - MPI communicator
1930: .  bs   - size of blockk
1931: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1932:            This value should be the same as the local size used in creating the 
1933:            y vector for the matrix-vector product y = Ax.
1934: .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1935:            This value should be the same as the local size used in creating the 
1936:            x vector for the matrix-vector product y = Ax.
1937: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1938: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1939: .  d_nz  - number of block nonzeros per block row in diagonal portion of local 
1940:            submatrix  (same for all local rows)
1941: .  d_nnz - array containing the number of block nonzeros in the various block rows 
1942:            in the upper triangular portion of the in diagonal portion of the local 
1943:            (possibly different for each block block row) or PETSC_NULL.  
1944:            You must leave room for the diagonal entry even if it is zero.
1945: .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1946:            submatrix (same for all local rows).
1947: -  o_nnz - array containing the number of nonzeros in the various block rows of the
1948:            off-diagonal portion of the local submatrix (possibly different for
1949:            each block row) or PETSC_NULL.

1951:    Output Parameter:
1952: .  A - the matrix 

1954:    Options Database Keys:
1955: .   -mat_no_unroll - uses code that does not unroll the loops in the 
1956:                      block calculations (much slower)
1957: .   -mat_block_size - size of the blocks to use
1958: .   -mat_mpi - use the parallel matrix data structures even on one processor 
1959:                (defaults to using SeqBAIJ format on one processor)

1961:    Notes:
1962:    The number of rows and columns must be divisible by blocksize.
1963:    This matrix type does not support complex Hermitian operation.

1965:    The user MUST specify either the local or global matrix dimensions
1966:    (possibly both).

1968:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1969:    than it must be used on all processors that share the object for that argument.

1971:    If the *_nnz parameter is given then the *_nz parameter is ignored

1973:    Storage Information:
1974:    For a square global matrix we define each processor's diagonal portion 
1975:    to be its local rows and the corresponding columns (a square submatrix);  
1976:    each processor's off-diagonal portion encompasses the remainder of the
1977:    local matrix (a rectangular submatrix). 

1979:    The user can specify preallocated storage for the diagonal part of
1980:    the local submatrix with either d_nz or d_nnz (not both).  Set 
1981:    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1982:    memory allocation.  Likewise, specify preallocated storage for the
1983:    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).

1985:    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1986:    the figure below we depict these three local rows and all columns (0-11).

1988: .vb
1989:            0 1 2 3 4 5 6 7 8 9 10 11
1990:           -------------------
1991:    row 3  |  o o o d d d o o o o o o
1992:    row 4  |  o o o d d d o o o o o o
1993:    row 5  |  o o o d d d o o o o o o
1994:           -------------------
1995: .ve
1996:   
1997:    Thus, any entries in the d locations are stored in the d (diagonal) 
1998:    submatrix, and any entries in the o locations are stored in the
1999:    o (off-diagonal) submatrix.  Note that the d matrix is stored in
2000:    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.

2002:    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
2003:    plus the diagonal part of the d matrix,
2004:    and o_nz should indicate the number of block nonzeros per row in the o matrix.
2005:    In general, for PDE problems in which most nonzeros are near the diagonal,
2006:    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
2007:    or you will get TERRIBLE performance; see the users' manual chapter on
2008:    matrices.

2010:    Level: intermediate

2012: .keywords: matrix, block, aij, compressed row, sparse, parallel

2014: .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
2015: @*/

2017: PetscErrorCode  MatCreateMPISBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
2018: {
2020:   PetscMPIInt    size;

2023:   MatCreate(comm,A);
2024:   MatSetSizes(*A,m,n,M,N);
2025:   MPI_Comm_size(comm,&size);
2026:   if (size > 1) {
2027:     MatSetType(*A,MATMPISBAIJ);
2028:     MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);
2029:   } else {
2030:     MatSetType(*A,MATSEQSBAIJ);
2031:     MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);
2032:   }
2033:   return(0);
2034: }


2039: static PetscErrorCode MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2040: {
2041:   Mat            mat;
2042:   Mat_MPISBAIJ   *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
2044:   PetscInt       len=0,nt,bs=matin->rmap.bs,mbs=oldmat->mbs;
2045:   PetscScalar    *array;

2048:   *newmat       = 0;
2049:   MatCreate(matin->comm,&mat);
2050:   MatSetSizes(mat,matin->rmap.n,matin->cmap.n,matin->rmap.N,matin->cmap.N);
2051:   MatSetType(mat,matin->type_name);
2052:   PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));
2053:   PetscMapCopy(matin->comm,&matin->rmap,&mat->rmap);
2054:   PetscMapCopy(matin->comm,&matin->cmap,&mat->cmap);
2055: 
2056:   mat->factor       = matin->factor;
2057:   mat->preallocated = PETSC_TRUE;
2058:   mat->assembled    = PETSC_TRUE;
2059:   mat->insertmode   = NOT_SET_VALUES;

2061:   a = (Mat_MPISBAIJ*)mat->data;
2062:   a->bs2   = oldmat->bs2;
2063:   a->mbs   = oldmat->mbs;
2064:   a->nbs   = oldmat->nbs;
2065:   a->Mbs   = oldmat->Mbs;
2066:   a->Nbs   = oldmat->Nbs;


2069:   a->size         = oldmat->size;
2070:   a->rank         = oldmat->rank;
2071:   a->donotstash   = oldmat->donotstash;
2072:   a->roworiented  = oldmat->roworiented;
2073:   a->rowindices   = 0;
2074:   a->rowvalues    = 0;
2075:   a->getrowactive = PETSC_FALSE;
2076:   a->barray       = 0;
2077:   a->rstartbs    = oldmat->rstartbs;
2078:   a->rendbs      = oldmat->rendbs;
2079:   a->cstartbs    = oldmat->cstartbs;
2080:   a->cendbs      = oldmat->cendbs;

2082:   /* hash table stuff */
2083:   a->ht           = 0;
2084:   a->hd           = 0;
2085:   a->ht_size      = 0;
2086:   a->ht_flag      = oldmat->ht_flag;
2087:   a->ht_fact      = oldmat->ht_fact;
2088:   a->ht_total_ct  = 0;
2089:   a->ht_insert_ct = 0;
2090: 
2091:   PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+2)*sizeof(PetscInt));
2092:   MatStashCreate_Private(matin->comm,1,&mat->stash);
2093:   MatStashCreate_Private(matin->comm,matin->rmap.bs,&mat->bstash);
2094:   if (oldmat->colmap) {
2095: #if defined (PETSC_USE_CTABLE)
2096:     PetscTableCreateCopy(oldmat->colmap,&a->colmap);
2097: #else
2098:     PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);
2099:     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));
2100:     PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));
2101: #endif
2102:   } else a->colmap = 0;

2104:   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2105:     PetscMalloc(len*sizeof(PetscInt),&a->garray);
2106:     PetscLogObjectMemory(mat,len*sizeof(PetscInt));
2107:     PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));
2108:   } else a->garray = 0;
2109: 
2110:    VecDuplicate(oldmat->lvec,&a->lvec);
2111:   PetscLogObjectParent(mat,a->lvec);
2112:    VecScatterCopy(oldmat->Mvctx,&a->Mvctx);
2113:   PetscLogObjectParent(mat,a->Mvctx);

2115:    VecDuplicate(oldmat->slvec0,&a->slvec0);
2116:   PetscLogObjectParent(mat,a->slvec0);
2117:    VecDuplicate(oldmat->slvec1,&a->slvec1);
2118:   PetscLogObjectParent(mat,a->slvec1);

2120:   VecGetLocalSize(a->slvec1,&nt);
2121:   VecGetArray(a->slvec1,&array);
2122:   VecCreateSeqWithArray(PETSC_COMM_SELF,bs*mbs,array,&a->slvec1a);
2123:   VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec1b);
2124:   VecRestoreArray(a->slvec1,&array);
2125:   VecGetArray(a->slvec0,&array);
2126:   VecCreateSeqWithArray(PETSC_COMM_SELF,nt-bs*mbs,array+bs*mbs,&a->slvec0b);
2127:   VecRestoreArray(a->slvec0,&array);
2128:   PetscLogObjectParent(mat,a->slvec0);
2129:   PetscLogObjectParent(mat,a->slvec1);
2130:   PetscLogObjectParent(mat,a->slvec0b);
2131:   PetscLogObjectParent(mat,a->slvec1a);
2132:   PetscLogObjectParent(mat,a->slvec1b);

2134:   /*  VecScatterCopy(oldmat->sMvctx,&a->sMvctx); - not written yet, replaced by the lazy trick: */
2135:   PetscObjectReference((PetscObject)oldmat->sMvctx);
2136:   a->sMvctx = oldmat->sMvctx;
2137:   PetscLogObjectParent(mat,a->sMvctx);

2139:    MatDuplicate(oldmat->A,cpvalues,&a->A);
2140:   PetscLogObjectParent(mat,a->A);
2141:    MatDuplicate(oldmat->B,cpvalues,&a->B);
2142:   PetscLogObjectParent(mat,a->B);
2143:   PetscFListDuplicate(mat->qlist,&matin->qlist);
2144:   *newmat = mat;
2145:   return(0);
2146: }

2148:  #include petscsys.h

2152: PetscErrorCode MatLoad_MPISBAIJ(PetscViewer viewer, MatType type,Mat *newmat)
2153: {
2154:   Mat            A;
2156:   PetscInt       i,nz,j,rstart,rend;
2157:   PetscScalar    *vals,*buf;
2158:   MPI_Comm       comm = ((PetscObject)viewer)->comm;
2159:   MPI_Status     status;
2160:   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,*sndcounts = 0,*browners,maxnz,*rowners,*locrowlens;
2161:   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2162:   PetscInt       *procsnz = 0,jj,*mycols,*ibuf;
2163:   PetscInt       bs=1,Mbs,mbs,extra_rows;
2164:   PetscInt       *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2165:   PetscInt       dcount,kmax,k,nzcount,tmp;
2166:   int            fd;
2167: 
2169:   PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPISBAIJ matrix 2","Mat");
2170:     PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);
2171:   PetscOptionsEnd();

2173:   MPI_Comm_size(comm,&size);
2174:   MPI_Comm_rank(comm,&rank);
2175:   if (!rank) {
2176:     PetscViewerBinaryGetDescriptor(viewer,&fd);
2177:     PetscBinaryRead(fd,(char *)header,4,PETSC_INT);
2178:     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2179:     if (header[3] < 0) {
2180:       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2181:     }
2182:   }

2184:   MPI_Bcast(header+1,3,MPIU_INT,0,comm);
2185:   M = header[1]; N = header[2];

2187:   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");

2189:   /* 
2190:      This code adds extra rows to make sure the number of rows is 
2191:      divisible by the blocksize
2192:   */
2193:   Mbs        = M/bs;
2194:   extra_rows = bs - M + bs*(Mbs);
2195:   if (extra_rows == bs) extra_rows = 0;
2196:   else                  Mbs++;
2197:   if (extra_rows &&!rank) {
2198:     PetscInfo(0,"Padding loaded matrix to match blocksize\n");
2199:   }

2201:   /* determine ownership of all rows */
2202:   mbs        = Mbs/size + ((Mbs % size) > rank);
2203:   m          = mbs*bs;
2204:   PetscMalloc(2*(size+2)*sizeof(PetscMPIInt),&rowners);
2205:   browners   = rowners + size + 1;
2206:   MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);
2207:   rowners[0] = 0;
2208:   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2209:   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2210:   rstart = rowners[rank];
2211:   rend   = rowners[rank+1];
2212: 
2213:   /* distribute row lengths to all processors */
2214:   PetscMalloc((rend-rstart)*bs*sizeof(PetscMPIInt),&locrowlens);
2215:   if (!rank) {
2216:     PetscMalloc((M+extra_rows)*sizeof(PetscInt),&rowlengths);
2217:     PetscBinaryRead(fd,rowlengths,M,PETSC_INT);
2218:     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2219:     PetscMalloc(size*sizeof(PetscMPIInt),&sndcounts);
2220:     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2221:     MPI_Scatterv(rowlengths,sndcounts,browners,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2222:     PetscFree(sndcounts);
2223:   } else {
2224:     MPI_Scatterv(0,0,0,MPIU_INT,locrowlens,(rend-rstart)*bs,MPIU_INT,0,comm);
2225:   }
2226: 
2227:   if (!rank) {   /* procs[0] */
2228:     /* calculate the number of nonzeros on each processor */
2229:     PetscMalloc(size*sizeof(PetscInt),&procsnz);
2230:     PetscMemzero(procsnz,size*sizeof(PetscInt));
2231:     for (i=0; i<size; i++) {
2232:       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2233:         procsnz[i] += rowlengths[j];
2234:       }
2235:     }
2236:     PetscFree(rowlengths);
2237: 
2238:     /* determine max buffer needed and allocate it */
2239:     maxnz = 0;
2240:     for (i=0; i<size; i++) {
2241:       maxnz = PetscMax(maxnz,procsnz[i]);
2242:     }
2243:     PetscMalloc(maxnz*sizeof(PetscInt),&cols);

2245:     /* read in my part of the matrix column indices  */
2246:     nz     = procsnz[0];
2247:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2248:     mycols = ibuf;
2249:     if (size == 1)  nz -= extra_rows;
2250:     PetscBinaryRead(fd,mycols,nz,PETSC_INT);
2251:     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }

2253:     /* read in every ones (except the last) and ship off */
2254:     for (i=1; i<size-1; i++) {
2255:       nz   = procsnz[i];
2256:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2257:       MPI_Send(cols,nz,MPIU_INT,i,tag,comm);
2258:     }
2259:     /* read in the stuff for the last proc */
2260:     if (size != 1) {
2261:       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2262:       PetscBinaryRead(fd,cols,nz,PETSC_INT);
2263:       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2264:       MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);
2265:     }
2266:     PetscFree(cols);
2267:   } else {  /* procs[i], i>0 */
2268:     /* determine buffer space needed for message */
2269:     nz = 0;
2270:     for (i=0; i<m; i++) {
2271:       nz += locrowlens[i];
2272:     }
2273:     PetscMalloc(nz*sizeof(PetscInt),&ibuf);
2274:     mycols = ibuf;
2275:     /* receive message of column indices*/
2276:     MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);
2277:     MPI_Get_count(&status,MPIU_INT,&maxnz);
2278:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2279:   }

2281:   /* loop over local rows, determining number of off diagonal entries */
2282:   PetscMalloc(2*(rend-rstart+1)*sizeof(PetscInt),&dlens);
2283:   odlens   = dlens + (rend-rstart);
2284:   PetscMalloc(3*Mbs*sizeof(PetscInt),&mask);
2285:   PetscMemzero(mask,3*Mbs*sizeof(PetscInt));
2286:   masked1  = mask    + Mbs;
2287:   masked2  = masked1 + Mbs;
2288:   rowcount = 0; nzcount = 0;
2289:   for (i=0; i<mbs; i++) {
2290:     dcount  = 0;
2291:     odcount = 0;
2292:     for (j=0; j<bs; j++) {
2293:       kmax = locrowlens[rowcount];
2294:       for (k=0; k<kmax; k++) {
2295:         tmp = mycols[nzcount++]/bs; /* block col. index */
2296:         if (!mask[tmp]) {
2297:           mask[tmp] = 1;
2298:           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2299:           else masked1[dcount++] = tmp; /* entry in diag portion */
2300:         }
2301:       }
2302:       rowcount++;
2303:     }
2304: 
2305:     dlens[i]  = dcount;  /* d_nzz[i] */
2306:     odlens[i] = odcount; /* o_nzz[i] */

2308:     /* zero out the mask elements we set */
2309:     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2310:     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2311:   }
2312: 
2313:   /* create our matrix */
2314:   MatCreate(comm,&A);
2315:   MatSetSizes(A,m,m,PETSC_DETERMINE,PETSC_DETERMINE);
2316:   MatSetType(A,type);
2317:   MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);
2318:   MatSetOption(A,MAT_COLUMNS_SORTED);
2319: 
2320:   if (!rank) {
2321:     PetscMalloc(maxnz*sizeof(PetscScalar),&buf);
2322:     /* read in my part of the matrix numerical values  */
2323:     nz = procsnz[0];
2324:     vals = buf;
2325:     mycols = ibuf;
2326:     if (size == 1)  nz -= extra_rows;
2327:     PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2328:     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }

2330:     /* insert into matrix */
2331:     jj      = rstart*bs;
2332:     for (i=0; i<m; i++) {
2333:       MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2334:       mycols += locrowlens[i];
2335:       vals   += locrowlens[i];
2336:       jj++;
2337:     }

2339:     /* read in other processors (except the last one) and ship out */
2340:     for (i=1; i<size-1; i++) {
2341:       nz   = procsnz[i];
2342:       vals = buf;
2343:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2344:       MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);
2345:     }
2346:     /* the last proc */
2347:     if (size != 1){
2348:       nz   = procsnz[i] - extra_rows;
2349:       vals = buf;
2350:       PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);
2351:       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2352:       MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);
2353:     }
2354:     PetscFree(procsnz);

2356:   } else {
2357:     /* receive numeric values */
2358:     PetscMalloc(nz*sizeof(PetscScalar),&buf);

2360:     /* receive message of values*/
2361:     vals   = buf;
2362:     mycols = ibuf;
2363:     MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);
2364:     MPI_Get_count(&status,MPIU_SCALAR,&maxnz);
2365:     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");

2367:     /* insert into matrix */
2368:     jj      = rstart*bs;
2369:     for (i=0; i<m; i++) {
2370:       MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);
2371:       mycols += locrowlens[i];
2372:       vals   += locrowlens[i];
2373:       jj++;
2374:     }
2375:   }

2377:   PetscFree(locrowlens);
2378:   PetscFree(buf);
2379:   PetscFree(ibuf);
2380:   PetscFree(rowners);
2381:   PetscFree(dlens);
2382:   PetscFree(mask);
2383:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
2384:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
2385:   *newmat = A;
2386:   return(0);
2387: }

2391: /*XXXXX@
2392:    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.

2394:    Input Parameters:
2395: .  mat  - the matrix
2396: .  fact - factor

2398:    Collective on Mat

2400:    Level: advanced

2402:   Notes:
2403:    This can also be set by the command line option: -mat_use_hash_table fact

2405: .keywords: matrix, hashtable, factor, HT

2407: .seealso: MatSetOption()
2408: @XXXXX*/


2413: PetscErrorCode MatGetRowMaxAbs_MPISBAIJ(Mat A,Vec v,PetscInt idx[])
2414: {
2415:   Mat_MPISBAIJ   *a = (Mat_MPISBAIJ*)A->data;
2416:   Mat_SeqBAIJ    *b = (Mat_SeqBAIJ*)(a->B)->data;
2417:   PetscReal      atmp;
2418:   PetscReal      *work,*svalues,*rvalues;
2420:   PetscInt       i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2421:   PetscMPIInt    rank,size;
2422:   PetscInt       *rowners_bs,dest,count,source;
2423:   PetscScalar    *va;
2424:   MatScalar      *ba;
2425:   MPI_Status     stat;

2428:   if (idx) SETERRQ(PETSC_ERR_SUP,"Send email to petsc-maint@mcs.anl.gov");
2429:   MatGetRowMaxAbs(a->A,v,PETSC_NULL);
2430:   VecGetArray(v,&va);

2432:   MPI_Comm_size(A->comm,&size);
2433:   MPI_Comm_rank(A->comm,&rank);

2435:   bs   = A->rmap.bs;
2436:   mbs  = a->mbs;
2437:   Mbs  = a->Mbs;
2438:   ba   = b->a;
2439:   bi   = b->i;
2440:   bj   = b->j;

2442:   /* find ownerships */
2443:   rowners_bs = A->rmap.range;

2445:   /* each proc creates an array to be distributed */
2446:   PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);
2447:   PetscMemzero(work,bs*Mbs*sizeof(PetscReal));

2449:   /* row_max for B */
2450:   if (rank != size-1){
2451:     for (i=0; i<mbs; i++) {
2452:       ncols = bi[1] - bi[0]; bi++;
2453:       brow  = bs*i;
2454:       for (j=0; j<ncols; j++){
2455:         bcol = bs*(*bj);
2456:         for (kcol=0; kcol<bs; kcol++){
2457:           col = bcol + kcol;                 /* local col index */
2458:           col += rowners_bs[rank+1];      /* global col index */
2459:           for (krow=0; krow<bs; krow++){
2460:             atmp = PetscAbsScalar(*ba); ba++;
2461:             row = brow + krow;    /* local row index */
2462:             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2463:             if (work[col] < atmp) work[col] = atmp;
2464:           }
2465:         }
2466:         bj++;
2467:       }
2468:     }

2470:     /* send values to its owners */
2471:     for (dest=rank+1; dest<size; dest++){
2472:       svalues = work + rowners_bs[dest];
2473:       count   = rowners_bs[dest+1]-rowners_bs[dest];
2474:       MPI_Send(svalues,count,MPIU_REAL,dest,rank,A->comm);
2475:     }
2476:   }
2477: 
2478:   /* receive values */
2479:   if (rank){
2480:     rvalues = work;
2481:     count   = rowners_bs[rank+1]-rowners_bs[rank];
2482:     for (source=0; source<rank; source++){
2483:       MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,A->comm,&stat);
2484:       /* process values */
2485:       for (i=0; i<count; i++){
2486:         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2487:       }
2488:     }
2489:   }

2491:   VecRestoreArray(v,&va);
2492:   PetscFree(work);
2493:   return(0);
2494: }

2498: PetscErrorCode MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2499: {
2500:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2502:   PetscInt       mbs=mat->mbs,bs=matin->rmap.bs;
2503:   PetscScalar    *x,*b,*ptr,zero=0.0;
2504:   Vec            bb1;
2505: 
2507:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2508:   if (bs > 1)
2509:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2511:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2512:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2513:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2514:       its--;
2515:     }

2517:     VecDuplicate(bb,&bb1);
2518:     while (its--){
2519: 
2520:       /* lower triangular part: slvec0b = - B^T*xx */
2521:       (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);
2522: 
2523:       /* copy xx into slvec0a */
2524:       VecGetArray(mat->slvec0,&ptr);
2525:       VecGetArray(xx,&x);
2526:       PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));
2527:       VecRestoreArray(mat->slvec0,&ptr);

2529:       VecScale(mat->slvec0,-1.0);

2531:       /* copy bb into slvec1a */
2532:       VecGetArray(mat->slvec1,&ptr);
2533:       VecGetArray(bb,&b);
2534:       PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));
2535:       VecRestoreArray(mat->slvec1,&ptr);

2537:       /* set slvec1b = 0 */
2538:       VecSet(mat->slvec1b,zero);

2540:       VecScatterBegin(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);
2541:       VecRestoreArray(xx,&x);
2542:       VecRestoreArray(bb,&b);
2543:       VecScatterEnd(mat->sMvctx,mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD);

2545:       /* upper triangular part: bb1 = bb1 - B*x */
2546:       (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);
2547: 
2548:       /* local diagonal sweep */
2549:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2550:     }
2551:     VecDestroy(bb1);
2552:   } else {
2553:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2554:   }
2555:   return(0);
2556: }

2560: PetscErrorCode MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
2561: {
2562:   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2564:   Vec            lvec1,bb1;
2565: 
2567:   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %D and local its %D both positive",its,lits);
2568:   if (matin->rmap.bs > 1)
2569:     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");

2571:   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2572:     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2573:       (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);
2574:       its--;
2575:     }

2577:     VecDuplicate(mat->lvec,&lvec1);
2578:     VecDuplicate(bb,&bb1);
2579:     while (its--){
2580:       VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2581: 
2582:       /* lower diagonal part: bb1 = bb - B^T*xx */
2583:       (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);
2584:       VecScale(lvec1,-1.0);

2586:       VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);
2587:       VecCopy(bb,bb1);
2588:       VecScatterBegin(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);

2590:       /* upper diagonal part: bb1 = bb1 - B*x */
2591:       VecScale(mat->lvec,-1.0);
2592:       (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);

2594:       VecScatterEnd(mat->Mvctx,lvec1,bb1,ADD_VALUES,SCATTER_REVERSE);
2595: 
2596:       /* diagonal sweep */
2597:       (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);
2598:     }
2599:     VecDestroy(lvec1);
2600:     VecDestroy(bb1);
2601:   } else {
2602:     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2603:   }
2604:   return(0);
2605: }