Flow123d  JS_before_hm-2203-gd2ee21200
mpi.h
Go to the documentation of this file.
1 /*
2  This is a special set of bindings for uni-processor use of MPI by the PETSc library.
3 
4  NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
5 
6  For example,
7  * Does not implement send to self.
8  * Does not implement attributes correctly.
9 */
10 
11 /*
12  The following info is a response to one of the petsc-maint questions
13  regarding MPIUNI.
14 
15  MPIUNI was developed with the aim of getting PETSc compiled, and
16  usable in the absence of a full MPI implementation. With this, we
17  were able to provide PETSc on Windows, Windows64 even before any MPI
18  implementation was available on these platforms. [Or with certain
19  compilers - like borland, that do not have a usable MPI
20  implementation]
21 
22  However - providing a seqential, standards compliant MPI
23  implementation is *not* the goal of MPIUNI. The development strategy
24  was - to make enough changes to it so that PETSc sources, examples
25  compile without errors, and runs in the uni-processor mode. This is
26  the reason each function is not documented.
27 
28  PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29  interface is also provided - to get PETSc fortran examples with a
30  few MPI calls working.
31 
32  One of the optimzation with MPIUNI, is to avoid the function call
33  overhead, when possible. Hence most of the C functions are
34  implemented as macros. However the function calls cannot be avoided
35  with fortran usage.
36 
37  Most PETSc objects have both sequential and parallel
38  implementations, which are separate. For eg: We have two types of
39  sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40  routines are used in the Seq part, but most of them are used in the
41  MPI part. The send/receive calls can be found mostly in the MPI
42  part.
43 
44  When MPIUNI is used, only the Seq version of the PETSc objects are
45  used, even though the MPI variant of the objects are compiled. Since
46  there are no send/receive calls in the Seq variant, PETSc works fine
47  with MPIUNI in seq mode.
48 
49  The reason some send/receive functions are defined to abort(), is to
50  detect sections of code that use send/receive functions, and gets
51  executed in the sequential mode. (which shouldn't happen in case of
52  PETSc).
53 
54  Proper implementation of send/receive would involve writing a
55  function for each of them. Inside each of these functions, we have
56  to check if the send is to self or receive is from self, and then
57  doing the buffering accordingly (until the receive is called) - or
58  what if a nonblocking receive is called, do a copy etc.. Handling
59  the buffering aspects might be complicated enough, that in this
60  case, a proper implementation of MPI might as well be used. This is
61  the reason the send to self is not implemented in MPIUNI, and never
62  will be.
63 
64  Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65  available for most machines. When these packages are available, Its
66  generally preferable to use one of them instead of MPIUNI - even if
67  the user is using PETSc sequentially.
68 
69  - MPIUNI does not support all MPI functions [or functionality].
70  Hence it might not work with external packages or user code that
71  might have MPI calls in it.
72 
73  - MPIUNI is not a standards compliant implementation for np=1.
74  For eg: if the user code has send/recv to self, then it will
75  abort. [Similar issues with a number of other MPI functionality]
76  However MPICH & OpenMPI are the correct implementations of MPI
77  standard for np=1.
78 
79  - When user code uses multiple MPI based packages that have their
80  own *internal* stubs equivalent to MPIUNI - in sequential mode,
81  invariably these multiple implementations of MPI for np=1 conflict
82  with each other. The correct thing to do is: make all such
83  packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84  satisfy this requirement correctly [and hence the correct choice].
85 
86  - Using MPICH/OpenMPI sequentially should have minimal
87  disadvantages. [for eg: these binaries can be run without
88  mpirun/mpiexec as ./executable, without requiring any extra
89  configurations for ssh/rsh/daemons etc..]. This should not be a
90  reason to avoid these packages for sequential use.
91 
92  Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93  - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94  - remove reference to petscconf.h from mpi.h
95  - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96  - ar cr libmpiuni.a mpi.o
97 
98 */
99 
100 #if !defined(__MPIUNI_H)
101 #define __MPIUNI_H
102 
103 /* Requred by abort() in mpi.c & for win64 */
104 //#include "petscconf.h"
105 #include <stdlib.h>
106 
107 #if defined(__cplusplus)
108 extern "C" {
109 #endif
110 
111 /* require an int variable large enough to hold a pointer */
112 #if !defined(MPIUNI_INTPTR)
113 #define MPIUNI_INTPTR long
114 #endif
115 
116 /*
117 
118  MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
119 from generating warning messages about unused variables while compiling PETSc.
120 */
121 extern void *MPIUNI_TMP;
122 
123 #define MPI_COMM_WORLD 1
124 #define MPI_COMM_SELF MPI_COMM_WORLD
125 #define MPI_COMM_NULL 0
126 #define MPI_SUCCESS 0
127 #define MPI_IDENT 0
128 #define MPI_CONGRUENT 1
129 #define MPI_SIMILAR 2
130 #define MPI_UNEQUAL 3
131 #define MPI_ANY_SOURCE (-2)
132 #define MPI_KEYVAL_INVALID 0
133 #define MPI_ERR_UNKNOWN 18
134 #define MPI_ERR_INTERN 21
135 #define MPI_ERR_OTHER 1
136 #define MPI_TAG_UB 0
137 #define MPI_ERRORS_RETURN 0
138 #define MPI_UNDEFINED (-32766)
139 
140 /* External types */
141 typedef int MPI_Comm;
142 typedef void *MPI_Request;
143 typedef void *MPI_Group;
144 typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
145 typedef char *MPI_Errhandler;
146 typedef int MPI_Fint;
147 typedef int MPI_File;
148 typedef int MPI_Info;
149 typedef int MPI_Offset;
150 
151 
152 /* In order to handle datatypes, we make them into "sizeof(raw-type)";
153  this allows us to do the MPIUNI_Memcpy's easily */
154 #define MPI_Datatype int
155 #define MPI_FLOAT sizeof(float)
156 #define MPI_DOUBLE sizeof(double)
157 #define MPI_LONG_DOUBLE sizeof(long double)
158 #define MPI_CHAR sizeof(char)
159 #define MPI_BYTE sizeof(char)
160 #define MPI_INT sizeof(int)
161 #define MPI_LONG sizeof(long)
162 #define MPI_LONG_LONG_INT sizeof(long long)
163 #define MPI_SHORT sizeof(short)
164 #define MPI_UNSIGNED_SHORT sizeof(unsigned short)
165 #define MPI_UNSIGNED sizeof(unsigned)
166 #define MPI_UNSIGNED_CHAR sizeof(unsigned char)
167 #define MPI_UNSIGNED_LONG sizeof(unsigned long)
168 #define MPI_COMPLEX 2*sizeof(float)
169 #define MPI_C_COMPLEX 2*sizeof(float)
170 #define MPI_C_DOUBLE_COMPLEX 2*sizeof(double)
171 #define MPI_FLOAT_INT (sizeof(float) + sizeof(int))
172 #define MPI_DOUBLE_INT (sizeof(double) + sizeof(int))
173 #define MPI_LONG_INT (sizeof(long) + sizeof(int))
174 #define MPI_SHORT_INT (sizeof(short) + sizeof(int))
175 #define MPI_2INT (2* sizeof(int))
176 
177 #if defined(PETSC_USE_REAL___FLOAT128)
178 extern MPI_Datatype MPIU___FLOAT128;
179 #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? 2*sizeof(double) : datatype)
180 #else
181 #define MPI_sizeof(datatype) (datatype)
182 #endif
183 extern int MPIUNI_Memcpy(void*,const void*,int);
184 
185 
186 #define MPI_REQUEST_NULL ((MPI_Request)0)
187 #define MPI_GROUP_NULL ((MPI_Group)0)
188 #define MPI_INFO_NULL ((MPI_Info)0)
189 #define MPI_BOTTOM (void *)0
190 typedef int MPI_Op;
191 
192 #define MPI_MODE_RDONLY 0
193 #define MPI_MODE_WRONLY 0
194 #define MPI_MODE_CREATE 0
195 
196 #define MPI_SUM 0
197 #define MPI_MAX 0
198 #define MPI_MIN 0
199 #define MPI_ANY_TAG (-1)
200 #define MPI_DATATYPE_NULL 0
201 #define MPI_PACKED 0
202 #define MPI_MAX_ERROR_STRING 2056
203 #define MPI_STATUS_IGNORE (MPI_Status *)1
204 #define MPI_ORDER_FORTRAN 57
205 #define MPI_IN_PLACE (void *) -1
206 
207 /*
208  Prototypes of some functions which are implemented in mpi.c
209 */
210 typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
211 typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
212 typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
213 
214 /*
215  In order that the PETSc MPIUNI can be used with another package that has its
216  own MPIUni we map the following function names to a unique PETSc name. Those functions
217  are defined in mpi.c and put into the libpetscsys.a or libpetsc.a library.
218 
219  Note that this does not work for the MPIUni Fortran symbols which are explicitly in the
220  PETSc libraries unless the flag MPIUNI_AVOID_MPI_NAMESPACE is set.
221 */
222 #define MPI_Abort Petsc_MPI_Abort
223 #define MPI_Attr_get Petsc_MPI_Attr_get
224 #define MPI_Keyval_free Petsc_MPI_Keyval_free
225 #define MPI_Attr_put Petsc_MPI_Attr_put
226 #define MPI_Attr_delete Petsc_MPI_Attr_delete
227 #define MPI_Keyval_create Petsc_MPI_Keyval_create
228 #define MPI_Comm_free Petsc_MPI_Comm_free
229 #define MPI_Comm_dup Petsc_MPI_Comm_dup
230 #define MPI_Comm_create Petsc_MPI_Comm_create
231 #define MPI_Init Petsc_MPI_Init
232 #define MPI_Finalize Petsc_MPI_Finalize
233 #define MPI_Initialized Petsc_MPI_Initialized
234 #define MPI_Finalized Petsc_MPI_Finalized
235 #define MPI_Comm_size Petsc_MPI_Comm_size
236 #define MPI_Comm_rank Petsc_MPI_Comm_rank
237 
238 extern int MPI_Abort(MPI_Comm,int);
239 extern int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
240 extern int MPI_Keyval_free(int*);
241 extern int MPI_Attr_put(MPI_Comm,int,void *);
242 extern int MPI_Attr_delete(MPI_Comm,int);
243 extern int MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
244 extern int MPI_Comm_free(MPI_Comm*);
245 extern int MPI_Comm_dup(MPI_Comm,MPI_Comm *);
247 extern int MPI_Init(int *, char ***);
248 extern int MPI_Finalize(void);
249 extern int MPI_Initialized(int*);
250 extern int MPI_Finalized(int*);
251 extern int MPI_Comm_size(MPI_Comm,int*);
252 extern int MPI_Comm_rank(MPI_Comm,int*);
253 
254 #define MPI_Aint MPIUNI_INTPTR
255 /*
256  Routines we have replace with macros that do nothing
257  Some return error codes others return success
258 */
259 
260 #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
261 #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
262 
263 #define MPI_Send(buf,count,datatype,dest,tag,comm) \
264  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
265  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
266  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
267  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
268  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
269  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
270  MPI_Abort(MPI_COMM_WORLD,0))
271 #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
272  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
273  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
274  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
275  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
276  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
277  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
278  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
279  MPI_Abort(MPI_COMM_WORLD,0))
280 #define MPI_Get_count(status, datatype,count) \
281  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
282  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
283  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
284  MPI_Abort(MPI_COMM_WORLD,0))
285 #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
286  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
287  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
288  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
289  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
290  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
291  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
292  MPI_Abort(MPI_COMM_WORLD,0))
293 #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
294  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
295  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
296  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
297  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
298  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
299  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
300  MPI_Abort(MPI_COMM_WORLD,0))
301 #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
302  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
303  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
304  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
305  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
306  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
307  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
308  MPI_Abort(MPI_COMM_WORLD,0))
309 #define MPI_Buffer_attach(buffer,size) \
310  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
311  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
312  MPI_SUCCESS)
313 #define MPI_Buffer_detach(buffer,size)\
314  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
315  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
316  MPI_SUCCESS)
317 #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
318  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
319  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
320  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
321  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
322  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
323  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
324  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
325  MPI_Abort(MPI_COMM_WORLD,0))
326 #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
327  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
328  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
329  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
330  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
331  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
332  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
333  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
334  MPI_Abort(MPI_COMM_WORLD,0))
335 #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
336  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
337  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
338  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
339  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
340  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
341  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
342  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
343  MPI_Abort(MPI_COMM_WORLD,0))
344 #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
345  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
346  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
347  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
348  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
349  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
350  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
351  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
352  MPI_Abort(MPI_COMM_WORLD,0))
353 #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
354  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
355  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
356  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
357  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
358  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
359  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
360  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
361  MPI_Abort(MPI_COMM_WORLD,0))
362 #define MPI_Wait(request,status) \
363  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
364  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
365  MPI_SUCCESS)
366 #define MPI_Test(request,flag,status) \
367  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
368  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
369  *(flag) = 0, \
370  MPI_SUCCESS)
371 #define MPI_Request_free(request) \
372  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
373  MPI_SUCCESS)
374 #define MPI_Waitany(a,b,c,d) \
375  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
376  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
377  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
378  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),(*c = 0), \
379  MPI_SUCCESS)
380 #define MPI_Testany(a,b,c,d,e) \
381  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
382  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
383  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
384  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
385  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
386  MPI_SUCCESS)
387 #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
388  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
389  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
390  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
391  MPI_SUCCESS)
392 #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
393  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
394  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
395  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
396  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
397  MPI_SUCCESS)
398 #define MPI_Waitsome(incount,array_of_requests,outcount,\
399  array_of_indices,array_of_statuses) \
400  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
401  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
402  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
403  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
404  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
405  MPI_SUCCESS)
406 #define MPI_Comm_group(comm,group) \
407  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
408  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
409  MPI_SUCCESS)
410 #define MPI_Group_incl(group,n,ranks,newgroup) \
411  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
412  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
413  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
414  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
415  MPI_SUCCESS)
416 #define MPI_Testsome(incount,array_of_requests,outcount,\
417  array_of_indices,array_of_statuses) MPI_SUCCESS
418 #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
419 #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
420 #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
421 #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
422 #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
423  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
424  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
425  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
426  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
427  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
428  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
429  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
430  MPI_SUCCESS)
431 #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
432  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
433  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
434  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
435  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
436  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
437  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
438  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
439  MPI_SUCCESS)
440 #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
441  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
442  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
443  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
444  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
445  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
446  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
447  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
448  MPI_SUCCESS)
449 #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
450  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
451  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
452  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
453  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
454  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
455  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
456  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
457  MPI_SUCCESS)
458 #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
459  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
460  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
461  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
462  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
463  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
464  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
465  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
466  MPI_SUCCESS)
467 #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
468  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
469  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
470  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
471  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
472  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
473  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
474  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
475  MPI_SUCCESS)
476 #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
477 #define MPI_Startall(count,array_of_requests) \
478  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
479  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
480  MPI_SUCCESS)
481 #define MPI_Op_create(function,commute,op) \
482  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
483  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
484  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
485  MPI_SUCCESS)
486 #define MPI_Op_free(op) \
487  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
488  MPI_SUCCESS)
489  /* Need to determine sizeof "sendtype" */
490 #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
491  dest,sendtag,recvbuf,recvcount,\
492  recvtype,source,recvtag,\
493  comm,status) \
494  MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * MPI_sizeof(sendtype))
495 #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
496  source,recvtag,comm,status) MPI_SUCCESS
497 #define MPI_Type_contiguous(count, oldtype,newtype) \
498  (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
499 #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
500 #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
501 #define MPI_Type_indexed(count,array_of_blocklengths,\
502  array_of_displacements, oldtype,\
503  newtype) MPI_SUCCESS
504 #define MPI_Type_hindexed(count,array_of_blocklengths,\
505  array_of_displacements, oldtype,\
506  newtype) MPI_SUCCESS
507 #define MPI_Type_struct(count,array_of_blocklengths,\
508  array_of_displacements,\
509  array_of_types, newtype) MPI_SUCCESS
510 #define MPI_Address(location,address) \
511  (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
512 #define MPI_Type_extent(datatype,extent) *(extent) = datatype
513 #define MPI_Type_size(datatype,size) *(size) = datatype
514 #define MPI_Type_lb(datatype,displacement) \
515  MPI_Abort(MPI_COMM_WORLD,0)
516 #define MPI_Type_ub(datatype,displacement) \
517  MPI_Abort(MPI_COMM_WORLD,0)
518 #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
519  MPI_SUCCESS)
520 #define MPI_Type_free(datatype) MPI_SUCCESS
521 #define MPI_Get_elements(status, datatype,count) \
522  MPI_Abort(MPI_COMM_WORLD,0)
523 #define MPI_Pack(inbuf,incount, datatype,outbuf,\
524  outsize,position, comm) \
525  MPI_Abort(MPI_COMM_WORLD,0)
526 #define MPI_Unpack(inbuf,insize,position,outbuf,\
527  outcount, datatype,comm) \
528  MPI_Abort(MPI_COMM_WORLD,0)
529 #define MPI_Pack_size(incount, datatype,comm,size) \
530  MPI_Abort(MPI_COMM_WORLD,0)
531 #define MPI_Barrier(comm) \
532  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
533  MPI_SUCCESS)
534 #define MPI_Bcast(buffer,count,datatype,root,comm) \
535  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
536  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
537  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
538  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
539  MPI_SUCCESS)
540 #define MPI_Gather(sendbuf,sendcount, sendtype,\
541  recvbuf,recvcount, recvtype,\
542  root,comm) \
543  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
544  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
545  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
546  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
547  MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
548  MPI_SUCCESS)
549 #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
550  recvbuf,recvcounts,displs,\
551  recvtype,root,comm) \
552  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
553  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
554  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
555  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
556  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
557  MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
558  MPI_SUCCESS)
559 #define MPI_Scatter(sendbuf,sendcount, sendtype,\
560  recvbuf,recvcount, recvtype,\
561  root,comm) \
562  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
563  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
564  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
565  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
566  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
567  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
568  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
569  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
570 #define MPI_Scatterv(sendbuf,sendcounts,displs,\
571  sendtype, recvbuf,recvcount,\
572  recvtype,root,comm) \
573  (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)),\
574  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
575  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
576  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
577  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
578  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
579  MPI_SUCCESS)
580 #define MPI_Allgather(sendbuf,sendcount, sendtype,\
581  recvbuf,recvcount, recvtype,comm) \
582  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
583  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
584  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
585  MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
586  MPI_SUCCESS)
587 #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
588  recvbuf,recvcounts,displs,recvtype,comm) \
589  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
590  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
591  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
592  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
593  (sendbuf != MPI_IN_PLACE) ? MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)*MPI_sizeof(sendtype)) : 0, \
594  MPI_SUCCESS)
595 #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
596  recvbuf,recvcount, recvtype,comm) \
597  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
598  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
599  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
600  MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
601  MPI_SUCCESS)
602 #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
603  sendtype, recvbuf,recvcounts,\
604  rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
605 #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
606  sendtypes, recvbuf,recvcounts,\
607  rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
608 #define MPI_Reduce(sendbuf, recvbuf,count,\
609  datatype,op,root,comm) \
610  (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
611  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
612 #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
613  ((sendbuf != MPI_IN_PLACE) ? MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)) : 0, \
614  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
615 #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
616  (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
617  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
618 #define MPI_Exscan(sendbuf, recvbuf,count,datatype,op,comm) MPI_SUCCESS
619 #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
620  datatype,op,comm) \
621  MPI_Abort(MPI_COMM_WORLD,0)
622 #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
623 #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
624 #define MPI_Group_translate_ranks (group1,n,ranks1,\
625  group2,ranks2) MPI_Abort(MPI_COMM_WORLD,0)
626 #define MPI_Group_compare(group1,group2,result) \
627  (*(result)=1,MPI_SUCCESS)
628 #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
629 #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
630 #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
631 #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
632 #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
633 #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
634 #define MPI_Group_free(group) \
635  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
636  MPI_SUCCESS)
637 #define MPI_Comm_compare(comm1,comm2,result) \
638  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
639  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
640  *(result)=MPI_IDENT,\
641  MPI_SUCCESS)
642 #define MPI_Comm_split(comm,color,key,newcomm) MPI_Comm_dup(comm,newcomm)
643 #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
644 #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
645 #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
646 #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
647  remote_leader,tag,newintercomm) MPI_SUCCESS
648 #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
649 
650 #define MPI_Topo_test(comm,status) MPI_SUCCESS
651 #define MPI_Cart_create(comm_old,ndims,dims,periods,\
652  reorder,comm_cart) MPI_SUCCESS
653 #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
654 #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
655 #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
656 #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
657 #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
658 #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
659  MPI_Abort(MPI_COMM_WORLD,0)
660 #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
661 #define MPI_Cart_coords(comm,rank,maxdims,coords) \
662  MPI_Abort(MPI_COMM_WORLD,0)
663 #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
664  MPI_Abort(MPI_COMM_WORLD,0)
665 #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
666  MPI_Abort(MPI_COMM_WORLD,0)
667 #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
668  MPI_Abort(MPI_COMM_WORLD,0)
669 #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
670 #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
671 #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
672 #define MPI_Get_processor_name(name,result_len) \
673  (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
674 #define MPI_Errhandler_create(function,errhandler) (*(errhandler) = (MPI_Errhandler) 0, MPI_SUCCESS)
675 #define MPI_Errhandler_set(comm,errhandler) \
676  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
677  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
678  MPI_SUCCESS)
679 #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
680 #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
681 #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
682 #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
683 #define MPI_Wtick() 1.0
684 #define MPI_Wtime() 0.0
685 #define MPI_Pcontrol(level) MPI_SUCCESS
686 
687 #define MPI_NULL_COPY_FN 0
688 #define MPI_NULL_DELETE_FN 0
689 
690  /* MPI-IO additions */
691 
692 #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
693  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm), \
694  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
695  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
696  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
697  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
698  MPI_Abort(MPI_COMM_WORLD,0))
699 
700 #define MPI_File_close(mpi_fh) \
701  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
702  MPI_Abort(MPI_COMM_WORLD,0))
703 
704 #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
705  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
706  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
707  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
708  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
709  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
710  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
711  MPI_Abort(MPI_COMM_WORLD,0))
712 
713 #define MPI_Type_get_extent(datatype,lb,extent) \
714  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
715  *(lb) = 0, *(extent) = datatype,0)
716 
717 #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
718  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
719  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
720  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
721  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
722  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
723  MPI_Abort(MPI_COMM_WORLD,0))
724 
725 #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
726  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
727  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
728  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
729  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
730  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
731  MPI_Abort(MPI_COMM_WORLD,0))
732 
733  /* called from PetscInitialize() - so return success */
734 #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
735  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name), \
736  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
737  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
738  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
739  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
740  MPI_SUCCESS)
741 
742 #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
743  (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims), \
744  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
745  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
746  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
747  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
748  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
749  MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
750  MPI_Abort(MPI_COMM_WORLD,0))
751 
752 #if defined(__cplusplus)
753 }
754 #endif
755 #endif
756 
MPI_Attr_get
#define MPI_Attr_get
Definition: mpi.h:223
MPI_User_function
void() MPI_User_function(void *, void *, int *, MPI_Datatype *)
Definition: mpi.h:212
MPI_Request
void * MPI_Request
Definition: mpi.h:142
MPI_Op
int MPI_Op
Definition: mpi.h:190
MPI_Init
#define MPI_Init
Definition: mpi.h:231
MPI_Delete_function
int() MPI_Delete_function(MPI_Comm, int, void *, void *)
Definition: mpi.h:211
MPI_Keyval_free
#define MPI_Keyval_free
Definition: mpi.h:224
MPI_Finalized
#define MPI_Finalized
Definition: mpi.h:234
MPI_Attr_delete
#define MPI_Attr_delete
Definition: mpi.h:226
MPI_Abort
#define MPI_Abort
Definition: mpi.h:222
MPI_Copy_function
int() MPI_Copy_function(MPI_Comm, int, void *, void *, void *, int *)
Definition: mpi.h:210
MPI_Comm_free
#define MPI_Comm_free
Definition: mpi.h:228
MPI_Initialized
#define MPI_Initialized
Definition: mpi.h:233
MPI_Comm_rank
#define MPI_Comm_rank
Definition: mpi.h:236
MPI_Keyval_create
#define MPI_Keyval_create
Definition: mpi.h:227
MPI_Comm_size
#define MPI_Comm_size
Definition: mpi.h:235
MPI_Fint
int MPI_Fint
Definition: mpi.h:146
MPI_Comm_create
#define MPI_Comm_create
Definition: mpi.h:230
MPI_File
int MPI_File
Definition: mpi.h:147
MPI_Comm
int MPI_Comm
Definition: mpi.h:141
MPI_Datatype
#define MPI_Datatype
Definition: mpi.h:154
MPIUNI_Memcpy
int MPIUNI_Memcpy(void *, const void *, int)
Definition: mpi.c:43
MPI_Offset
int MPI_Offset
Definition: mpi.h:149
MPI_Status
Definition: mpi.h:144
MPIUNI_TMP
void * MPIUNI_TMP
Definition: mpi.c:19
MPI_Info
int MPI_Info
Definition: mpi.h:148
MPI_Errhandler
char * MPI_Errhandler
Definition: mpi.h:145
MPI_Group
void * MPI_Group
Definition: mpi.h:143
MPI_Finalize
#define MPI_Finalize
Definition: mpi.h:232
MPI_Comm_dup
#define MPI_Comm_dup
Definition: mpi.h:229
MPI_Attr_put
#define MPI_Attr_put
Definition: mpi.h:225
MPI_Status::MPI_TAG
int MPI_TAG
Definition: mpi.h:144