Actual source code: mpimatmatmult.c
1: #define PETSCMAT_DLL
3: /*
4: Defines matrix-matrix product routines for pairs of MPIAIJ matrices
5: C = A * B
6: */
7: #include src/mat/impls/aij/seq/aij.h
8: #include src/mat/utils/freespace.h
9: #include src/mat/impls/aij/mpi/mpiaij.h
10: #include petscbt.h
11: #include src/mat/impls/dense/mpi/mpidense.h
15: PetscErrorCode MatMatMult_MPIAIJ_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill, Mat *C)
16: {
20: if (scall == MAT_INITIAL_MATRIX){
21: MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C);/* numeric product is computed as well */
22: } else if (scall == MAT_REUSE_MATRIX){
23: MatMatMultNumeric_MPIAIJ_MPIAIJ(A,B,*C);
24: } else {
25: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
26: }
27: return(0);
28: }
32: PetscErrorCode PetscContainerDestroy_Mat_MatMatMultMPI(void *ptr)
33: {
34: PetscErrorCode ierr;
35: Mat_MatMatMultMPI *mult=(Mat_MatMatMultMPI*)ptr;
38: PetscFree(mult->startsj);
39: PetscFree(mult->bufa);
40: if (mult->isrowa){ISDestroy(mult->isrowa);}
41: if (mult->isrowb){ISDestroy(mult->isrowb);}
42: if (mult->iscolb){ISDestroy(mult->iscolb);}
43: if (mult->C_seq){MatDestroy(mult->C_seq);}
44: if (mult->A_loc){MatDestroy(mult->A_loc); }
45: if (mult->B_seq){MatDestroy(mult->B_seq);}
46: if (mult->B_loc){MatDestroy(mult->B_loc);}
47: if (mult->B_oth){MatDestroy(mult->B_oth);}
48: PetscFree(mult->abi);
49: PetscFree(mult->abj);
50: PetscFree(mult);
51: return(0);
52: }
54: EXTERN PetscErrorCode MatDestroy_AIJ(Mat);
57: PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(Mat A)
58: {
59: PetscErrorCode ierr;
60: PetscContainer container;
61: Mat_MatMatMultMPI *mult=PETSC_NULL;
64: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
65: if (container) {
66: PetscContainerGetPointer(container,(void **)&mult);
67: } else {
68: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
69: }
70: A->ops->destroy = mult->MatDestroy;
71: PetscObjectCompose((PetscObject)A,"Mat_MatMatMultMPI",0);
72: (*A->ops->destroy)(A);
73: PetscContainerDestroy(container);
74: return(0);
75: }
79: PetscErrorCode MatDuplicate_MPIAIJ_MatMatMult(Mat A, MatDuplicateOption op, Mat *M) {
80: PetscErrorCode ierr;
81: Mat_MatMatMultMPI *mult;
82: PetscContainer container;
85: PetscObjectQuery((PetscObject)A,"Mat_MatMatMultMPI",(PetscObject *)&container);
86: if (container) {
87: PetscContainerGetPointer(container,(void **)&mult);
88: } else {
89: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
90: }
91: /* Note: the container is not duplicated, because it requires deep copying of
92: several large data sets (see PetscContainerDestroy_Mat_MatMatMultMPI()).
93: These data sets are only used for repeated calling of MatMatMultNumeric().
94: *M is unlikely being used in this way. Thus we create *M with pure mpiaij format */
95: (*mult->MatDuplicate)(A,op,M);
96: (*M)->ops->destroy = mult->MatDestroy; /* = MatDestroy_MPIAIJ, *M doesn't duplicate A's container! */
97: (*M)->ops->duplicate = mult->MatDuplicate; /* = MatDuplicate_MPIAIJ */
98: return(0);
99: }
103: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
104: {
105: PetscErrorCode ierr;
106: PetscInt start,end;
107: Mat_MatMatMultMPI *mult;
108: PetscContainer container;
109:
111: if (A->cmap.rstart != B->rmap.rstart || A->cmap.rend != B->rmap.rend){
112: SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap.rstart,A->cmap.rend,B->rmap.rstart,B->rmap.rend);
113: }
114: PetscNew(Mat_MatMatMultMPI,&mult);
116: /* create a seq matrix B_seq = submatrix of B by taking rows of B that equal to nonzero col of A */
117: MatGetBrowsOfAcols(A,B,MAT_INITIAL_MATRIX,&mult->isrowb,&mult->iscolb,&mult->brstart,&mult->B_seq);
119: /* create a seq matrix A_seq = submatrix of A by taking all local rows of A */
120: start = A->rmap.rstart; end = A->rmap.rend;
121: ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&mult->isrowa);
122: MatGetLocalMatCondensed(A,MAT_INITIAL_MATRIX,&mult->isrowa,&mult->isrowb,&mult->A_loc);
124: /* compute C_seq = A_seq * B_seq */
125: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_INITIAL_MATRIX,fill,&mult->C_seq);
127: /* create mpi matrix C by concatinating C_seq */
128: PetscObjectReference((PetscObject)mult->C_seq); /* prevent C_seq being destroyed by MatMerge() */
129: MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_INITIAL_MATRIX,C);
130:
131: /* attach the supporting struct to C for reuse of symbolic C */
132: PetscContainerCreate(PETSC_COMM_SELF,&container);
133: PetscContainerSetPointer(container,mult);
134: PetscObjectCompose((PetscObject)(*C),"Mat_MatMatMultMPI",(PetscObject)container);
135: PetscContainerSetUserDestroy(container,PetscContainerDestroy_Mat_MatMatMultMPI);
136: mult->MatDestroy = (*C)->ops->destroy;
137: mult->MatDuplicate = (*C)->ops->duplicate;
139: (*C)->ops->destroy = MatDestroy_MPIAIJ_MatMatMult;
140: (*C)->ops->duplicate = MatDuplicate_MPIAIJ_MatMatMult;
141: return(0);
142: }
144: /* This routine is called ONLY in the case of reusing previously computed symbolic C */
147: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat B,Mat C)
148: {
149: PetscErrorCode ierr;
150: Mat *seq;
151: Mat_MatMatMultMPI *mult;
152: PetscContainer container;
155: PetscObjectQuery((PetscObject)C,"Mat_MatMatMultMPI",(PetscObject *)&container);
156: if (container) {
157: PetscContainerGetPointer(container,(void **)&mult);
158: } else {
159: SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
160: }
162: seq = &mult->B_seq;
163: MatGetSubMatrices(B,1,&mult->isrowb,&mult->iscolb,MAT_REUSE_MATRIX,&seq);
164: mult->B_seq = *seq;
165:
166: seq = &mult->A_loc;
167: MatGetSubMatrices(A,1,&mult->isrowa,&mult->isrowb,MAT_REUSE_MATRIX,&seq);
168: mult->A_loc = *seq;
170: MatMatMult_SeqAIJ_SeqAIJ(mult->A_loc,mult->B_seq,MAT_REUSE_MATRIX,0.0,&mult->C_seq);
172: PetscObjectReference((PetscObject)mult->C_seq);
173: MatMerge(A->comm,mult->C_seq,B->cmap.n,MAT_REUSE_MATRIX,&C);
174: return(0);
175: }
179: PetscErrorCode MatMatMult_MPIAIJ_MPIDense(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
180: {
184: if (scall == MAT_INITIAL_MATRIX){
185: MatMatMultSymbolic_MPIAIJ_MPIDense(A,B,fill,C);
186: }
187: MatMatMultNumeric_MPIAIJ_MPIDense(A,B,*C);
188: return(0);
189: }
191: typedef struct {
192: Mat workB;
193: PetscScalar *rvalues,*svalues;
194: MPI_Request *rwaits,*swaits;
195: } MPIAIJ_MPIDense;
197: PetscErrorCode MPIAIJ_MPIDenseDestroy(void *ctx)
198: {
199: MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*) ctx;
200: PetscErrorCode ierr;
203: if (contents->workB) {MatDestroy(contents->workB);}
204: PetscFree4(contents->rvalues,contents->svalues,contents->rwaits,contents->swaits);
205: PetscFree(contents);
206: return(0);
207: }
211: PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat *C)
212: {
213: PetscErrorCode ierr;
214: Mat_MPIAIJ *aij = (Mat_MPIAIJ*) A->data;
215: PetscInt nz = aij->B->cmap.n;
216: PetscContainer cont;
217: MPIAIJ_MPIDense *contents;
218: VecScatter ctx = aij->Mvctx;
219: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
220: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
223: MatMatMultSymbolic_MPIDense_MPIDense(A,B,0.0,C);
225: PetscContainerCreate(A->comm,&cont);
226: PetscNew(MPIAIJ_MPIDense,&contents);
227: PetscContainerSetPointer(cont,contents);
228: PetscContainerSetUserDestroy(cont,MPIAIJ_MPIDenseDestroy);
230: /* Create work matrix used to store off processor rows of B needed for local product */
231: MatCreateSeqDense(PETSC_COMM_SELF,nz,B->cmap.N,PETSC_NULL,&contents->workB);
233: /* Create work arrays needed */
234: PetscMalloc4(B->cmap.N*from->starts[from->n],PetscScalar,&contents->rvalues,
235: B->cmap.N*to->starts[to->n],PetscScalar,&contents->svalues,
236: from->n,MPI_Request,&contents->rwaits,
237: to->n,MPI_Request,&contents->swaits);
239: PetscObjectCompose((PetscObject)(*C),"workB",(PetscObject)cont);
240: PetscContainerDestroy(cont);
241: return(0);
242: }
244: /*
245: Performs an efficient scatter on the rows of B needed by this process; this is
246: a modification of the VecScatterBegin_() routines.
247: */
248: PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,Mat C,Mat *outworkB)
249: {
250: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
251: PetscErrorCode ierr;
252: PetscScalar *b,*w,*svalues,*rvalues;
253: VecScatter ctx = aij->Mvctx;
254: VecScatter_MPI_General *from = (VecScatter_MPI_General*) ctx->fromdata;
255: VecScatter_MPI_General *to = ( VecScatter_MPI_General*) ctx->todata;
256: PetscInt i,j,k;
257: PetscMPIInt *sindices,*sstarts,*sprocs,*rindices,*rstarts,*rprocs,nrecvs;
258: MPI_Request *swaits,*rwaits;
259: MPI_Comm comm = A->comm;
260: PetscMPIInt tag = ctx->tag,ncols = B->cmap.N, nrows = aij->B->cmap.n,imdex,nrowsB = B->rmap.n;
261: MPI_Status status;
262: MPIAIJ_MPIDense *contents;
263: PetscContainer cont;
264: Mat workB;
267: PetscObjectQuery((PetscObject)C,"workB",(PetscObject*)&cont);
268: PetscContainerGetPointer(cont,(void**)&contents);
270: workB = *outworkB = contents->workB;
271: if (nrows != workB->rmap.n) SETERRQ2(PETSC_ERR_PLIB,"Number of rows of workB %D not equal to columns of aij->B %D",nrows,workB->cmap.n);
272: sindices = to->indices;
273: sstarts = to->starts;
274: sprocs = to->procs;
275: swaits = contents->swaits;
276: svalues = contents->svalues;
278: rindices = from->indices;
279: rstarts = from->starts;
280: rprocs = from->procs;
281: rwaits = contents->rwaits;
282: rvalues = contents->rvalues;
284: MatGetArray(B,&b);
285: MatGetArray(workB,&w);
287: for (i=0; i<from->n; i++) {
288: MPI_Irecv(rvalues+ncols*rstarts[i],ncols*(rstarts[i+1]-rstarts[i]),MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);
289: }
291: for (i=0; i<to->n; i++) {
292: /* pack a message at a time */
293: CHKMEMQ;
294: for (j=0; j<sstarts[i+1]-sstarts[i]; j++){
295: for (k=0; k<ncols; k++) {
296: svalues[ncols*(sstarts[i] + j) + k] = b[sindices[sstarts[i]+j] + nrowsB*k];
297: }
298: }
299: CHKMEMQ;
300: MPI_Isend(svalues+ncols*sstarts[i],ncols*(sstarts[i+1]-sstarts[i]),MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);
301: }
303: nrecvs = from->n;
304: while (nrecvs) {
305: MPI_Waitany(from->n,rwaits,&imdex,&status);
306: nrecvs--;
307: /* unpack a message at a time */
308: CHKMEMQ;
309: for (j=0; j<rstarts[imdex+1]-rstarts[imdex]; j++){
310: for (k=0; k<ncols; k++) {
311: w[rindices[rstarts[imdex]+j] + nrows*k] = rvalues[ncols*(rstarts[imdex] + j) + k];
312: }
313: }
314: CHKMEMQ;
315: }
316: if (to->n) {MPI_Waitall(to->n,swaits,to->sstatus);CHKERRQ(ierr)}
318: MatRestoreArray(B,&b);
319: MatRestoreArray(workB,&w);
320: MatAssemblyBegin(workB,MAT_FINAL_ASSEMBLY);
321: MatAssemblyEnd(workB,MAT_FINAL_ASSEMBLY);
322: return(0);
323: }
328: PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
329: {
330: PetscErrorCode ierr;
331: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
332: Mat_MPIDense *bdense = (Mat_MPIDense*)B->data;
333: Mat_MPIDense *cdense = (Mat_MPIDense*)C->data;
334: Mat workB;
338: /* diagonal block of A times all local rows of B*/
339: MatMatMultNumeric_SeqAIJ_SeqDense(aij->A,bdense->A,cdense->A);
341: /* get off processor parts of B needed to complete the product */
342: MatMPIDenseScatter(A,B,C,&workB);
344: /* off-diagonal block of A times nonlocal rows of B */
345: MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A);
346: MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);
347: MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);
348: return(0);
349: }