Actual source code: subcomm.c

  1: #define PETSC_DLL
  2: /*
  3:      Provides utility routines for split MPI communicator.
  4: */
 5:  #include petsc.h
 6:  #include petscsys.h

 10: PetscErrorCode  PetscSubcommDestroy(PetscSubcomm psubcomm)
 11: {

 15:   PetscFree(psubcomm);
 16:   return(0);
 17: }

 21: /*@C
 22:   PetscSubcommCreate - Create a PetscSubcomm context.

 24:    Collective on MPI_Comm

 26:    Input Parameter:
 27: +  comm - MPI communicator
 28: -  nsubcomm - the number of subcommunicators to be created

 30:    Output Parameter:
 31: .  psubcomm - location to store the PetscSubcomm context


 34:    Notes:
 35:    To avoid data scattering from subcomm back to original comm, we create subcommunicators 
 36:    by iteratively taking a process into a subcommunicator. 
 37:    Example: size=4, nsubcomm=(*psubcomm)->n=3
 38:      comm=(*psubcomm)->parent:
 39:       rank:     [0]  [1]  [2]  [3]
 40:       color:     0    1    2    0

 42:      subcomm=(*psubcomm)->comm:
 43:       subrank:  [0]  [0]  [0]  [1]    

 45:      dupcomm=(*psubcomm)->dupparent:
 46:       duprank:  [0]  [2]  [3]  [1]

 48:      Here, subcomm[color = 0] has subsize=2, owns process [0] and [3]
 49:            subcomm[color = 1] has subsize=1, owns process [1]
 50:            subcomm[color = 2] has subsize=1, owns process [2]
 51:            dupcomm has same number of processes as comm, and its duprank maps
 52:            processes in subcomm contiguously into a 1d array:
 53:             duprank: [0] [1]      [2]         [3]
 54:             rank:    [0] [3]      [1]         [2]
 55:                     subcomm[0] subcomm[1]  subcomm[2]

 57:    Level: advanced

 59: .keywords: communicator, create

 61: .seealso: PetscSubcommDestroy()
 62: @*/
 63: PetscErrorCode  PetscSubcommCreate(MPI_Comm comm,PetscInt nsubcomm,PetscSubcomm *psubcomm)
 64: {
 66:   PetscMPIInt    rank,size,*subsize,duprank,subrank;
 67:   PetscInt       np_subcomm,nleftover,i,j,color;
 68:   MPI_Comm       subcomm=0,dupcomm=0;
 69:   PetscSubcomm   psubcomm_tmp;

 72:   MPI_Comm_rank(comm,&rank);
 73:   MPI_Comm_size(comm,&size);
 74:   if (nsubcomm < 1 || nsubcomm > size) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE, "Num of subcommunicators %D cannot be < 1 or > input comm size %D",nsubcomm,size);

 76:   /* get size of each subcommunicator */
 77:   PetscMalloc((1+nsubcomm)*sizeof(PetscMPIInt),&subsize);
 78:   np_subcomm = size/nsubcomm;
 79:   nleftover  = size - nsubcomm*np_subcomm;
 80:   for (i=0; i<nsubcomm; i++){
 81:     subsize[i] = np_subcomm;
 82:     if (i<nleftover) subsize[i]++;
 83:   }

 85:   /* find color for this proc */
 86:   color   = rank%nsubcomm;
 87:   subrank = rank/nsubcomm;

 89:   MPI_Comm_split(comm,color,subrank,&subcomm);

 91:   j = 0; duprank = 0;
 92:   for (i=0; i<nsubcomm; i++){
 93:     if (j == color){
 94:       duprank += subrank;
 95:       break;
 96:     }
 97:     duprank += subsize[i]; j++;
 98:   }
 99:   PetscFree(subsize);
100: 
101:   /* create dupcomm with same size as comm, but its rank, duprank, maps subcomm's contiguously into dupcomm */
102:   MPI_Comm_split(comm,0,duprank,&dupcomm);
103: 
104:   PetscNew(struct _n_PetscSubcomm,&psubcomm_tmp);
105:   psubcomm_tmp->parent    = comm;
106:   psubcomm_tmp->dupparent = dupcomm;
107:   psubcomm_tmp->comm      = subcomm;
108:   psubcomm_tmp->n         = nsubcomm;
109:   psubcomm_tmp->color     = color;
110:   *psubcomm = psubcomm_tmp;
111:   return(0);
112: }