Actual source code: shvec.c
1: #define PETSCVEC_DLL
2: /*
3: This file contains routines for Parallel vector operations that use shared memory
4: */
5: #include src/vec/vec/impls/mpi/pvecimpl.h
7: /*
8: Could not get the include files to work properly on the SGI with
9: the C++ compiler.
10: */
11: #if defined(PETSC_USE_SHARED_MEMORY) && !defined(__cplusplus)
13: EXTERN PetscErrorCode PetscSharedMalloc(MPI_Comm,PetscInt,PetscInt,void**);
17: PetscErrorCode VecDuplicate_Shared(Vec win,Vec *v)
18: {
20: Vec_MPI *w = (Vec_MPI *)win->data;
21: PetscScalar *array;
25: /* first processor allocates entire array and sends it's address to the others */
26: PetscSharedMalloc(win->comm,win->map.n*sizeof(PetscScalar),win->map.N*sizeof(PetscScalar),(void**)&array);
28: VecCreate(win->comm,v);
29: VecSetSizes(*v,win->map.n,win->map.N);
30: VecCreate_MPI_Private(*v,w->nghost,array,win->map);
32: /* New vector should inherit stashing property of parent */
33: (*v)->stash.donotstash = win->stash.donotstash;
34: (*v)->stash.ignorenegidx = win->stash.ignorenegidx;
35:
36: PetscOListDuplicate(win->olist,&(*v)->olist);
37: PetscFListDuplicate(win->qlist,&(*v)->qlist);
39: if (win->mapping) {
40: PetscObjectReference((PetscObject)win->mapping);
41: (*v)->mapping = win->mapping;
42: }
43: if (win->bmapping) {
44: PetscObjectReference((PetscObject)win->bmapping);
45: (*v)->bmapping = win->bmapping;
46: }
47: (*v)->ops->duplicate = VecDuplicate_Shared;
48: (*v)->map.bs = win->map.bs;
49: (*v)->bstash.bs = win->bstash.bs;
50: return(0);
51: }
57: PetscErrorCode VecCreate_Shared(Vec vv)
58: {
60: PetscScalar *array;
63: PetscSplitOwnership(vv->comm,&vv->map.n,&vv->map.N);
64: PetscSharedMalloc(vv->comm,vv->map.n*sizeof(PetscScalar),vv->map.N*sizeof(PetscScalar),(void**)&array);
66: VecCreate_MPI_Private(vv,0,array,PETSC_NULL);
67: vv->ops->duplicate = VecDuplicate_Shared;
69: return(0);
70: }
74: /* ----------------------------------------------------------------------------------------
75: Code to manage shared memory allocation under the SGI with MPI
77: We associate with a communicator a shared memory "areana" from which memory may be shmalloced.
78: */
79: #include petscsys.h
80: #include "petscfix.h"
81: #if defined(PETSC_HAVE_PWD_H)
82: #include <pwd.h>
83: #endif
84: #include <ctype.h>
85: #include <sys/types.h>
86: #include <sys/stat.h>
87: #if defined(PETSC_HAVE_UNISTD_H)
88: #include <unistd.h>
89: #endif
90: #if defined(PETSC_HAVE_STDLIB_H)
91: #include <stdlib.h>
92: #endif
93: #if defined(PETSC_HAVE_SYS_PARAM_H)
94: #include <sys/param.h>
95: #endif
96: #if defined(PETSC_HAVE_SYS_UTSNAME_H)
97: #include <sys/utsname.h>
98: #endif
99: #include <fcntl.h>
100: #include <time.h>
101: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
102: #include <sys/systeminfo.h>
103: #endif
104: #include "petscfix.h"
106: static PetscMPIInt Petsc_Shared_keyval = MPI_KEYVAL_INVALID;
107: static PetscInt Petsc_Shared_size = 100000000;
111: /*
112: Private routine to delete internal storage when a communicator is freed.
113: This is called by MPI, not by users.
115: The binding for the first argument changed from MPI 1.0 to 1.1; in 1.0
116: it was MPI_Comm *comm.
117: */
118: static PetscErrorCode Petsc_DeleteShared(MPI_Comm comm,PetscInt keyval,void* attr_val,void* extra_state)
119: {
123: PetscFree(attr_val);
124: PetscFunctionReturn(MPI_SUCCESS);
125: }
129: PetscErrorCode PetscSharedMemorySetSize(PetscInt s)
130: {
132: Petsc_Shared_size = s;
133: return(0);
134: }
136: #include "petscfix.h"
138: #include <ulocks.h>
142: PetscErrorCode PetscSharedInitialize(MPI_Comm comm)
143: {
145: PetscMPIInt rank,flag;
146: char filename[PETSC_MAX_PATH_LEN];
147: usptr_t **arena;
151: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
152: /*
153: The calling sequence of the 2nd argument to this function changed
154: between MPI Standard 1.0 and the revisions 1.1 Here we match the
155: new standard, if you are using an MPI implementation that uses
156: the older version you will get a warning message about the next line;
157: it is only a warning message and should do no harm.
158: */
159: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DeleteShared,&Petsc_Shared_keyval,0);
160: }
162: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
164: if (!flag) {
165: /* This communicator does not yet have a shared memory areana */
166: PetscMalloc(sizeof(usptr_t*),&arena);
168: MPI_Comm_rank(comm,&rank);
169: if (!rank) {
170: PetscStrcpy(filename,"/tmp/PETScArenaXXXXXX");
171: #ifdef PETSC_HAVE_MKSTEMP
172: if (mkstemp(filename) < 0) {
173: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
174: }
175: #else
176: if (!mktemp(filename)) {
177: SETERRQ1(PETSC_ERR_FILE_OPEN, "Unable to open temporary file %s", filename);
178: }
179: #endif
180: }
181: MPI_Bcast(filename,PETSC_MAX_PATH_LEN,MPI_CHAR,0,comm);
182: PetscOptionsGetInt(PETSC_NULL,"-shared_size",&Petsc_Shared_size,&flag);
183: usconfig(CONF_INITSIZE,Petsc_Shared_size);
184: *arena = usinit(filename);
185: MPI_Attr_put(comm,Petsc_Shared_keyval,arena);
186: }
188: return(0);
189: }
193: PetscErrorCode PetscSharedMalloc(MPI_Comm comm,PetscInt llen,PetscInt len,void **result)
194: {
195: char *value;
197: PetscInt shift;
198: PetscMPIInt rank,flag;
199: usptr_t **arena;
202: *result = 0;
203: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
204: PetscSharedInitialize(comm);
205: }
206: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
207: if (!flag) {
208: PetscSharedInitialize(comm);
209: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
210: if (!flag) SETERRQ(PETSC_ERR_LIB,"Unable to initialize shared memory");
211: }
213: MPI_Scan(&llen,&shift,1,MPI_INT,MPI_SUM,comm);
214: shift -= llen;
216: MPI_Comm_rank(comm,&rank);
217: if (!rank) {
218: value = (char*)usmalloc((size_t) len,*arena);
219: if (!value) {
220: (*PetscErrorPrintf)("Unable to allocate shared memory location\n");
221: (*PetscErrorPrintf)("Run with option -shared_size <size> \n");
222: (*PetscErrorPrintf)("with size > %d \n",(int)(1.2*(Petsc_Shared_size+len)));
223: SETERRQ(PETSC_ERR_LIB,"Unable to malloc shared memory");
224: }
225: }
226: MPI_Bcast(&value,8,MPI_BYTE,0,comm);
227: value += shift;
229: return(0);
230: }
232: #else
241: PetscErrorCode VecCreate_Shared(Vec vv)
242: {
244: PetscMPIInt size;
247: MPI_Comm_size(vv->comm,&size);
248: if (size > 1) {
249: SETERRQ(PETSC_ERR_SUP_SYS,"No supported for shared memory vector objects on this machine");
250: }
251: VecCreate_Seq(vv);
252: return(0);
253: }
256: #endif
260: /*@
261: VecCreateShared - Creates a parallel vector that uses shared memory.
263: Input Parameters:
264: . comm - the MPI communicator to use
265: . n - local vector length (or PETSC_DECIDE to have calculated if N is given)
266: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
268: Output Parameter:
269: . vv - the vector
271: Collective on MPI_Comm
272:
273: Notes:
274: Currently VecCreateShared() is available only on the SGI; otherwise,
275: this routine is the same as VecCreateMPI().
277: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
278: same type as an existing vector.
280: Level: advanced
282: Concepts: vectors^creating with shared memory
284: .seealso: VecCreateSeq(), VecCreate(), VecCreateMPI(), VecDuplicate(), VecDuplicateVecs(),
285: VecCreateGhost(), VecCreateMPIWithArray(), VecCreateGhostWithArray()
287: @*/
288: PetscErrorCode VecCreateShared(MPI_Comm comm,PetscInt n,PetscInt N,Vec *v)
289: {
293: VecCreate(comm,v);
294: VecSetSizes(*v,n,N);
295: VecSetType(*v,VECSHARED);
296: return(0);
297: }