Actual source code: ml.c
1: #define PETSCKSP_DLL
3: /*
4: Provides an interface to the ML 4.0 smoothed Aggregation
5: */
6: #include private/pcimpl.h
7: #include src/ksp/pc/impls/mg/mgimpl.h
8: #include src/mat/impls/aij/seq/aij.h
9: #include src/mat/impls/aij/mpi/mpiaij.h
11: #include <math.h>
13: #include "ml_config.h"
14: #include "ml_include.h"
17: /* The context (data structure) at each grid level */
18: typedef struct {
19: Vec x,b,r; /* global vectors */
20: Mat A,P,R;
21: KSP ksp;
22: } GridCtx;
24: /* The context used to input PETSc matrix into ML at fine grid */
25: typedef struct {
26: Mat A; /* Petsc matrix in aij format */
27: Mat Aloc; /* local portion of A to be used by ML */
28: Vec x,y;
29: ML_Operator *mlmat;
30: PetscScalar *pwork; /* tmp array used by PetscML_comm() */
31: } FineGridCtx;
33: /* The context associates a ML matrix with a PETSc shell matrix */
34: typedef struct {
35: Mat A; /* PETSc shell matrix associated with mlmat */
36: ML_Operator *mlmat; /* ML matrix assorciated with A */
37: Vec y;
38: } Mat_MLShell;
40: /* Private context for the ML preconditioner */
41: typedef struct {
42: ML *ml_object;
43: ML_Aggregate *agg_object;
44: GridCtx *gridctx;
45: FineGridCtx *PetscMLdata;
46: PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme;
47: PetscReal Threshold,DampingFactor;
48: PetscTruth SpectralNormScheme_Anorm;
49: PetscMPIInt size; /* size of communicator for pc->pmat */
50: PetscErrorCode (*PCSetUp)(PC);
51: PetscErrorCode (*PCDestroy)(PC);
52: } PC_ML;
55: int allocated_space,int columns[],double values[],int row_lengths[]);
67: /* -------------------------------------------------------------------------- */
68: /*
69: PCSetUp_ML - Prepares for the use of the ML preconditioner
70: by setting data structures and options.
72: Input Parameter:
73: . pc - the preconditioner context
75: Application Interface Routine: PCSetUp()
77: Notes:
78: The interface routine PCSetUp() is not usually called directly by
79: the user, but instead is called by PCApply() if necessary.
80: */
84: PetscErrorCode PCSetUp_ML(PC pc)
85: {
86: PetscErrorCode ierr;
87: PetscMPIInt size;
88: FineGridCtx *PetscMLdata;
89: ML *ml_object;
90: ML_Aggregate *agg_object;
91: ML_Operator *mlmat;
92: PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level;
93: Mat A,Aloc;
94: GridCtx *gridctx;
95: PC_ML *pc_ml=PETSC_NULL;
96: PetscContainer container;
97: MatReuse reuse = MAT_INITIAL_MATRIX;
100: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
101: if (container) {
102: PetscContainerGetPointer(container,(void **)&pc_ml);
103: } else {
104: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
105: }
107: if (pc->setupcalled){
108: if (pc->flag == SAME_NONZERO_PATTERN){
109: reuse = MAT_REUSE_MATRIX;
110: PetscMLdata = pc_ml->PetscMLdata;
111: gridctx = pc_ml->gridctx;
112: /* ML objects cannot be reused */
113: ML_Destroy(&pc_ml->ml_object);
114: ML_Aggregate_Destroy(&pc_ml->agg_object);
115: } else {
116: PC_ML *pc_ml_new = PETSC_NULL;
117: PetscContainer container_new;
118: PetscNew(PC_ML,&pc_ml_new);
119: PetscLogObjectMemory(pc,sizeof(PC_ML));
120: PetscContainerCreate(PETSC_COMM_SELF,&container_new);
121: PetscContainerSetPointer(container_new,pc_ml_new);
122: PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);
123: PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);
125: PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));
126: PetscContainerDestroy(container);
127: pc_ml = pc_ml_new;
128: }
129: }
130:
131: /* setup special features of PCML */
132: /*--------------------------------*/
133: /* covert A to Aloc to be used by ML at fine grid */
134: A = pc->pmat;
135: MPI_Comm_size(A->comm,&size);
136: pc_ml->size = size;
137: if (size > 1){
138: if (reuse) Aloc = PetscMLdata->Aloc;
139: MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);
140: } else {
141: Aloc = A;
142: }
144: /* create and initialize struct 'PetscMLdata' */
145: if (!reuse){
146: PetscNew(FineGridCtx,&PetscMLdata);
147: pc_ml->PetscMLdata = PetscMLdata;
148: PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);
150: VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);
151: VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);
152: VecSetType(PetscMLdata->x,VECSEQ);
154: VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);
155: VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);
156: VecSetType(PetscMLdata->y,VECSEQ);
157: }
158: PetscMLdata->A = A;
159: PetscMLdata->Aloc = Aloc;
160:
161: /* create ML discretization matrix at fine grid */
162: /* ML requires input of fine-grid matrix. It determines nlevels. */
163: MatGetSize(Aloc,&m,&nlocal_allcols);
164: ML_Create(&ml_object,pc_ml->MaxNlevels);
165: pc_ml->ml_object = ml_object;
166: ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
167: ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols);
168: ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);
169:
170: /* aggregation */
171: ML_Aggregate_Create(&agg_object);
172: pc_ml->agg_object = agg_object;
173:
174: ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
175: /* set options */
176: switch (pc_ml->CoarsenScheme) {
177: case 1:
178: ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
179: case 2:
180: ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
181: case 3:
182: ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
183: }
184: ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold);
185: ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor);
186: if (pc_ml->SpectralNormScheme_Anorm){
187: ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object);
188: }
190: Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
191: if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
192: if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels);
193: pc_ml->Nlevels = Nlevels;
194: if (!pc->setupcalled){
195: PCMGSetLevels(pc,Nlevels,PETSC_NULL);
196: PCSetFromOptions_MG(pc); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
197: }
198:
199: if (!reuse){
200: PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);
201: pc_ml->gridctx = gridctx;
202: }
203: fine_level = Nlevels - 1;
205: /* wrap ML matrices by PETSc shell matrices at coarsened grids.
206: Level 0 is the finest grid for ML, but coarsest for PETSc! */
207: gridctx[fine_level].A = A;
208:
209: level = fine_level - 1;
210: if (size == 1){ /* convert ML P, R and A into seqaij format */
211: for (mllevel=1; mllevel<Nlevels; mllevel++){
212: mlmat = &(ml_object->Pmat[mllevel]);
213: MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);
214: mlmat = &(ml_object->Rmat[mllevel-1]);
215: MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);
216:
217: mlmat = &(ml_object->Amat[mllevel]);
218: if (reuse){
219: /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */
220: MatDestroy(gridctx[level].A);
221: }
222: MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);
223: level--;
224: }
225: } else { /* convert ML P and R into shell format, ML A into mpiaij format */
226: for (mllevel=1; mllevel<Nlevels; mllevel++){
227: mlmat = &(ml_object->Pmat[mllevel]);
228: MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);
229: mlmat = &(ml_object->Rmat[mllevel-1]);
230: MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);
232: mlmat = &(ml_object->Amat[mllevel]);
233: if (reuse){
234: MatDestroy(gridctx[level].A);
235: }
236: MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);
237: level--;
238: }
239: }
241: /* create vectors and ksp at all levels */
242: if (!reuse){
243: for (level=0; level<fine_level; level++){
244: level1 = level + 1;
245: VecCreate(gridctx[level].A->comm,&gridctx[level].x);
246: VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);
247: VecSetType(gridctx[level].x,VECMPI);
248: PCMGSetX(pc,level,gridctx[level].x);
249:
250: VecCreate(gridctx[level].A->comm,&gridctx[level].b);
251: VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);
252: VecSetType(gridctx[level].b,VECMPI);
253: PCMGSetRhs(pc,level,gridctx[level].b);
254:
255: VecCreate(gridctx[level1].A->comm,&gridctx[level1].r);
256: VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);
257: VecSetType(gridctx[level1].r,VECMPI);
258: PCMGSetR(pc,level1,gridctx[level1].r);
260: if (level == 0){
261: PCMGGetCoarseSolve(pc,&gridctx[level].ksp);
262: } else {
263: PCMGGetSmoother(pc,level,&gridctx[level].ksp);
264: }
265: }
266: PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);
267: }
269: /* create coarse level and the interpolation between the levels */
270: for (level=0; level<fine_level; level++){
271: level1 = level + 1;
272: PCMGSetInterpolation(pc,level1,gridctx[level].P);
273: PCMGSetRestriction(pc,level1,gridctx[level].R);
274: if (level > 0){
275: PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);
276: }
277: KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);
278: }
279: PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);
280: KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);
281:
282: /* now call PCSetUp_MG() */
283: /*-------------------------------*/
284: (*pc_ml->PCSetUp)(pc);
285: return(0);
286: }
290: PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr)
291: {
292: PetscErrorCode ierr;
293: PC_ML *pc_ml = (PC_ML*)ptr;
294: PetscInt level,fine_level=pc_ml->Nlevels-1;
297: if (pc_ml->size > 1){MatDestroy(pc_ml->PetscMLdata->Aloc);}
298: ML_Aggregate_Destroy(&pc_ml->agg_object);
299: ML_Destroy(&pc_ml->ml_object);
301: PetscFree(pc_ml->PetscMLdata->pwork);
302: if (pc_ml->PetscMLdata->x){VecDestroy(pc_ml->PetscMLdata->x);}
303: if (pc_ml->PetscMLdata->y){VecDestroy(pc_ml->PetscMLdata->y);}
304: PetscFree(pc_ml->PetscMLdata);
306: for (level=0; level<fine_level; level++){
307: if (pc_ml->gridctx[level].A){MatDestroy(pc_ml->gridctx[level].A);}
308: if (pc_ml->gridctx[level].P){MatDestroy(pc_ml->gridctx[level].P);}
309: if (pc_ml->gridctx[level].R){MatDestroy(pc_ml->gridctx[level].R);}
310: if (pc_ml->gridctx[level].x){VecDestroy(pc_ml->gridctx[level].x);}
311: if (pc_ml->gridctx[level].b){VecDestroy(pc_ml->gridctx[level].b);}
312: if (pc_ml->gridctx[level+1].r){VecDestroy(pc_ml->gridctx[level+1].r);}
313: }
314: PetscFree(pc_ml->gridctx);
315: PetscFree(pc_ml);
316: return(0);
317: }
318: /* -------------------------------------------------------------------------- */
319: /*
320: PCDestroy_ML - Destroys the private context for the ML preconditioner
321: that was created with PCCreate_ML().
323: Input Parameter:
324: . pc - the preconditioner context
326: Application Interface Routine: PCDestroy()
327: */
330: PetscErrorCode PCDestroy_ML(PC pc)
331: {
332: PetscErrorCode ierr;
333: PC_ML *pc_ml=PETSC_NULL;
334: PetscContainer container;
337: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
338: if (container) {
339: PetscContainerGetPointer(container,(void **)&pc_ml);
340: pc->ops->destroy = pc_ml->PCDestroy;
341: } else {
342: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
343: }
344: PetscContainerDestroy(container);
346: /* detach pc and PC_ML and dereference container */
347: PetscObjectCompose((PetscObject)pc,"PC_ML",0);
348: (*pc->ops->destroy)(pc);
349: return(0);
350: }
354: PetscErrorCode PCSetFromOptions_ML(PC pc)
355: {
356: PetscErrorCode ierr;
357: PetscInt indx,m,PrintLevel,MaxNlevels,MaxCoarseSize;
358: PetscReal Threshold,DampingFactor;
359: PetscTruth flg;
360: const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
361: PC_ML *pc_ml=PETSC_NULL;
362: PetscContainer container;
363: PCMGType mgtype;
366: PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);
367: if (container) {
368: PetscContainerGetPointer(container,(void **)&pc_ml);
369: } else {
370: SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit");
371: }
373: /* inherited MG options */
374: PetscOptionsHead("Multigrid options(inherited)");
375: PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);
376: PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);
377: PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);
378: PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);
379: PetscOptionsTail();
381: /* ML options */
382: PetscOptionsHead("ML options");
383: /* set defaults */
384: PrintLevel = 0;
385: indx = 0;
386: PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);
387: ML_Set_PrintLevel(PrintLevel);
388: PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);
389: PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);
390: PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL); /* ??? */
391: pc_ml->CoarsenScheme = indx;
393: PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);
394:
395: PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);
397: PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);
398:
399: PetscOptionsTail();
400: return(0);
401: }
403: /* -------------------------------------------------------------------------- */
404: /*
405: PCCreate_ML - Creates a ML preconditioner context, PC_ML,
406: and sets this as the private data within the generic preconditioning
407: context, PC, that was created within PCCreate().
409: Input Parameter:
410: . pc - the preconditioner context
412: Application Interface Routine: PCCreate()
413: */
415: /*MC
416: PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
417: fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
418: operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
419: and the restriction/interpolation operators wrapped as PETSc shell matrices.
421: Options Database Key:
422: Multigrid options(inherited)
423: + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
424: . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp)
425: . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown)
426: - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade
427:
428: ML options:
429: + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
430: . -pc_ml_maxNlevels <10>: Maximum number of levels (None)
431: . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
432: . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
433: . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
434: . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
435: - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm)
437: Level: intermediate
439: Concepts: multigrid
440:
441: .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
442: PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(),
443: PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
444: PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
445: PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
446: M*/
451: PetscErrorCode PCCreate_ML(PC pc)
452: {
453: PetscErrorCode ierr;
454: PC_ML *pc_ml;
455: PetscContainer container;
458: /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
459: PCSetType(pc,PCMG); /* calls PCCreate_MG() and MGCreate_Private() */
461: /* create a supporting struct and attach it to pc */
462: PetscNew(PC_ML,&pc_ml);
463: PetscLogObjectMemory(pc,sizeof(PC_ML));
464: PetscContainerCreate(PETSC_COMM_SELF,&container);
465: PetscContainerSetPointer(container,pc_ml);
466: PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);
467: PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);
468:
469: pc_ml->ml_object = 0;
470: pc_ml->agg_object = 0;
471: pc_ml->gridctx = 0;
472: pc_ml->PetscMLdata = 0;
473: pc_ml->Nlevels = -1;
474: pc_ml->MaxNlevels = 10;
475: pc_ml->MaxCoarseSize = 1;
476: pc_ml->CoarsenScheme = 1; /* ??? */
477: pc_ml->Threshold = 0.0;
478: pc_ml->DampingFactor = 4.0/3.0;
479: pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
480: pc_ml->size = 0;
482: pc_ml->PCSetUp = pc->ops->setup;
483: pc_ml->PCDestroy = pc->ops->destroy;
485: /* overwrite the pointers of PCMG by the functions of PCML */
486: pc->ops->setfromoptions = PCSetFromOptions_ML;
487: pc->ops->setup = PCSetUp_ML;
488: pc->ops->destroy = PCDestroy_ML;
489: return(0);
490: }
493: int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],
494: int allocated_space, int columns[], double values[], int row_lengths[])
495: {
497: Mat Aloc;
498: Mat_SeqAIJ *a;
499: PetscInt m,i,j,k=0,row,*aj;
500: PetscScalar *aa;
501: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
503: Aloc = ml->Aloc;
504: a = (Mat_SeqAIJ*)Aloc->data;
505: MatGetSize(Aloc,&m,PETSC_NULL);
507: for (i = 0; i<N_requested_rows; i++) {
508: row = requested_rows[i];
509: row_lengths[i] = a->ilen[row];
510: if (allocated_space < k+row_lengths[i]) return(0);
511: if ( (row >= 0) || (row <= (m-1)) ) {
512: aj = a->j + a->i[row];
513: aa = a->a + a->i[row];
514: for (j=0; j<row_lengths[i]; j++){
515: columns[k] = aj[j];
516: values[k++] = aa[j];
517: }
518: }
519: }
520: return(1);
521: }
523: int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
524: {
526: FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data);
527: Mat A=ml->A, Aloc=ml->Aloc;
528: PetscMPIInt size;
529: PetscScalar *pwork=ml->pwork;
530: PetscInt i;
532: MPI_Comm_size(A->comm,&size);
533: if (size == 1){
534: VecPlaceArray(ml->x,p);
535: } else {
536: for (i=0; i<in_length; i++) pwork[i] = p[i];
537: PetscML_comm(pwork,ml);
538: VecPlaceArray(ml->x,pwork);
539: }
540: VecPlaceArray(ml->y,ap);
541: MatMult(Aloc,ml->x,ml->y);
542: VecResetArray(ml->x);
543: VecResetArray(ml->y);
544: return 0;
545: }
547: int PetscML_comm(double p[],void *ML_data)
548: {
550: FineGridCtx *ml=(FineGridCtx*)ML_data;
551: Mat A=ml->A;
552: Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
553: PetscMPIInt size;
554: PetscInt i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n;
555: PetscScalar *array;
557: MPI_Comm_size(A->comm,&size);
558: if (size == 1) return 0;
559:
560: VecPlaceArray(ml->y,p);
561: VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
562: VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);
563: VecResetArray(ml->y);
564: VecGetArray(a->lvec,&array);
565: for (i=in_length; i<out_length; i++){
566: p[i] = array[i-in_length];
567: }
568: VecRestoreArray(a->lvec,&array);
569: return 0;
570: }
573: PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
574: {
575: PetscErrorCode ierr;
576: Mat_MLShell *shell;
577: PetscScalar *xarray,*yarray;
578: PetscInt x_length,y_length;
579:
581: MatShellGetContext(A,(void **)&shell);
582: VecGetArray(x,&xarray);
583: VecGetArray(y,&yarray);
584: x_length = shell->mlmat->invec_leng;
585: y_length = shell->mlmat->outvec_leng;
587: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
589: VecRestoreArray(x,&xarray);
590: VecRestoreArray(y,&yarray);
591: return(0);
592: }
593: /* MatMultAdd_ML - Compute y = w + A*x */
596: PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
597: {
598: PetscErrorCode ierr;
599: Mat_MLShell *shell;
600: PetscScalar *xarray,*yarray;
601: PetscInt x_length,y_length;
602:
604: MatShellGetContext(A,(void **)&shell);
605: VecGetArray(x,&xarray);
606: VecGetArray(y,&yarray);
608: x_length = shell->mlmat->invec_leng;
609: y_length = shell->mlmat->outvec_leng;
611: ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray);
613: VecRestoreArray(x,&xarray);
614: VecRestoreArray(y,&yarray);
615: VecAXPY(y,1.0,w);
617: return(0);
618: }
620: /* newtype is ignored because "ml" is not listed under Petsc MatType yet */
623: PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
624: {
625: PetscErrorCode ierr;
626: Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data;
627: Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
628: PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
629: PetscScalar *aa=a->a,*ba=b->a,*ca;
630: PetscInt am=A->rmap.n,an=A->cmap.n,i,j,k;
631: PetscInt *ci,*cj,ncols;
634: if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
636: if (scall == MAT_INITIAL_MATRIX){
637: PetscMalloc((1+am)*sizeof(PetscInt),&ci);
638: ci[0] = 0;
639: for (i=0; i<am; i++){
640: ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
641: }
642: PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);
643: PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);
645: k = 0;
646: for (i=0; i<am; i++){
647: /* diagonal portion of A */
648: ncols = ai[i+1] - ai[i];
649: for (j=0; j<ncols; j++) {
650: cj[k] = *aj++;
651: ca[k++] = *aa++;
652: }
653: /* off-diagonal portion of A */
654: ncols = bi[i+1] - bi[i];
655: for (j=0; j<ncols; j++) {
656: cj[k] = an + (*bj); bj++;
657: ca[k++] = *ba++;
658: }
659: }
660: if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
662: /* put together the new matrix */
663: an = mpimat->A->cmap.n+mpimat->B->cmap.n;
664: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);
666: /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
667: /* Since these are PETSc arrays, change flags to free them as necessary. */
668: mat = (Mat_SeqAIJ*)(*Aloc)->data;
669: mat->free_a = PETSC_TRUE;
670: mat->free_ij = PETSC_TRUE;
672: mat->nonew = 0;
673: } else if (scall == MAT_REUSE_MATRIX){
674: mat=(Mat_SeqAIJ*)(*Aloc)->data;
675: ci = mat->i; cj = mat->j; ca = mat->a;
676: for (i=0; i<am; i++) {
677: /* diagonal portion of A */
678: ncols = ai[i+1] - ai[i];
679: for (j=0; j<ncols; j++) *ca++ = *aa++;
680: /* off-diagonal portion of A */
681: ncols = bi[i+1] - bi[i];
682: for (j=0; j<ncols; j++) *ca++ = *ba++;
683: }
684: } else {
685: SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
686: }
687: return(0);
688: }
692: PetscErrorCode MatDestroy_ML(Mat A)
693: {
695: Mat_MLShell *shell;
698: MatShellGetContext(A,(void **)&shell);
699: VecDestroy(shell->y);
700: PetscFree(shell);
701: MatDestroy_Shell(A);
702: PetscObjectChangeTypeName((PetscObject)A,0);
703: return(0);
704: }
708: PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
709: {
710: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
711: PetscErrorCode ierr;
712: PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max;
713: PetscInt *ml_cols=matdata->columns,*aj,i,j,k;
714: PetscScalar *ml_vals=matdata->values,*aa;
715:
717: if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
718: if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
719: if (reuse){
720: Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
721: aij->i = matdata->rowptr;
722: aij->j = ml_cols;
723: aij->a = ml_vals;
724: } else {
725: MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);
726: }
727: return(0);
728: }
730: /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */
731: MatCreate(PETSC_COMM_SELF,newmat);
732: MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);
733: MatSetType(*newmat,MATSEQAIJ);
735: PetscMalloc((m+1)*sizeof(PetscInt),&nnz);
736: nz_max = 1;
737: for (i=0; i<m; i++) {
738: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
739: if (nnz[i] > nz_max) nz_max += nnz[i];
740: }
742: MatSeqAIJSetPreallocation(*newmat,0,nnz);
743: MatSetOption(*newmat,MAT_COLUMNS_SORTED);
744:
745: PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
746: aa = (PetscScalar*)(aj + nz_max);
747:
748: for (i=0; i<m; i++){
749: k = 0;
750: /* diagonal entry */
751: aj[k] = i; aa[k++] = ml_vals[i];
752: /* off diagonal entries */
753: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
754: aj[k] = ml_cols[j]; aa[k++] = ml_vals[j];
755: }
756: /* sort aj and aa */
757: PetscSortIntWithScalarArray(nnz[i],aj,aa);
758: MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);
759: }
760: MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);
761: MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);
763: PetscFree(aj);
764: PetscFree(nnz);
765: return(0);
766: }
770: PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
771: {
773: PetscInt m,n;
774: ML_Comm *MLcomm;
775: Mat_MLShell *shellctx;
778: m = mlmat->outvec_leng;
779: n = mlmat->invec_leng;
780: if (!m || !n){
781: newmat = PETSC_NULL;
782: return(0);
783: }
785: if (reuse){
786: MatShellGetContext(*newmat,(void **)&shellctx);
787: shellctx->mlmat = mlmat;
788: return(0);
789: }
791: MLcomm = mlmat->comm;
792: PetscNew(Mat_MLShell,&shellctx);
793: MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);
794: MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);
795: MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);
796: shellctx->A = *newmat;
797: shellctx->mlmat = mlmat;
798: VecCreate(PETSC_COMM_WORLD,&shellctx->y);
799: VecSetSizes(shellctx->y,m,PETSC_DECIDE);
800: VecSetFromOptions(shellctx->y);
801: (*newmat)->ops->destroy = MatDestroy_ML;
802: return(0);
803: }
807: PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat)
808: {
809: struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data;
810: PetscInt *ml_cols=matdata->columns,*aj;
811: PetscScalar *ml_vals=matdata->values,*aa;
812: PetscErrorCode ierr;
813: PetscInt i,j,k,*gordering;
814: PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row;
815: Mat A;
818: if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
819: n = mlmat->invec_leng;
820: if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
822: MatCreate(mlmat->comm->USR_comm,&A);
823: MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);
824: MatSetType(A,MATMPIAIJ);
825: PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);
826:
827: nz_max = 0;
828: for (i=0; i<m; i++){
829: nnz[i] = ml_cols[i+1] - ml_cols[i] + 1;
830: if (nz_max < nnz[i]) nz_max = nnz[i];
831: nnzA[i] = 1; /* diag */
832: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
833: if (ml_cols[j] < m) nnzA[i]++;
834: }
835: nnzB[i] = nnz[i] - nnzA[i];
836: }
837: MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);
839: /* insert mat values -- remap row and column indices */
840: nz_max++;
841: PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);
842: aa = (PetscScalar*)(aj + nz_max);
843: /* create global row numbering for a ML_Operator */
844: ML_build_global_numbering(mlmat,&gordering,"rows");
845: for (i=0; i<m; i++){
846: row = gordering[i];
847: k = 0;
848: /* diagonal entry */
849: aj[k] = row; aa[k++] = ml_vals[i];
850: /* off diagonal entries */
851: for (j=ml_cols[i]; j<ml_cols[i+1]; j++){
852: aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j];
853: }
854: MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);
855: }
856: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
857: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
858: *newmat = A;
860: PetscFree3(nnzA,nnzB,nnz);
861: PetscFree(aj);
862: return(0);
863: }