! -*- fortran -*-
-! Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
+! Copyright (c) 2010. The SimGrid Team.
! All rights reserved.
! This program is free software; you can redistribute it and/or modify it
! SMPI's Fortran 77 include file
- integer MPI_THREAD_SINGLE, MPI_THREAD_FUNNELED, MPI_THREAD_SERIALIZED, MPI_THREAD_MULTIPLE
+ integer MPI_THREAD_SINGLE, MPI_THREAD_FUNNELED,
+ > MPI_THREAD_SERIALIZED, MPI_THREAD_MULTIPLE
parameter(MPI_THREAD_SINGLE=0)
parameter(MPI_THREAD_FUNNELED=1)
parameter(MPI_THREAD_SERIALIZED=2)
parameter(MPI_UNDEFINED=-1)
integer MPI_SUCCESS, MPI_ERR_COMM, MPI_ERR_ARG, MPI_ERR_TYPE,
- > MPI_ERR_REQUEST, MPI_ERR_INTERN, MPI_ERR_COUNT, MPI_ERR_RANK,
+ > MPI_ERR_REQUEST, MPI_ERR_INTERN, MPI_ERR_COUNT,
+ > MPI_ERR_RANK,
> MPI_ERR_TAG, MPI_ERR_TRUNCATE, MPI_ERR_GROUP, MPI_ERR_OP,
> MPI_IDENT, MPI_SIMILAR, MPI_UNEQUAL, MPI_CONGRUENT,
> MPI_WTIME_IS_GLOBAL
parameter(MPI_CONGRUENT=3)
parameter(MPI_WTIME_IS_GLOBAL=1)
+! These should be ordered as in smpi_f77.c
+ integer MPI_COMM_NULL, MPI_COMM_WORLD, MPI_COMM_SELF
+ parameter(MPI_COMM_NULL=-1)
+ parameter(MPI_COMM_SELF=-2)
+ parameter(MPI_COMM_WORLD=0)
+
! This should be equal to the number of int fields in MPI_Status
integer MPI_STATUS_SIZE
parameter(MPI_STATUS_SIZE=4)
integer MPI_STATUS_IGNORE(MPI_STATUS_SIZE)
common/smpi/ MPI_STATUS_IGNORE
- parameter(MPI_DATATYPE_NULL=0)
- external MPI_BYTE, MPI_CHARACTER, MPI_LOGICAL, MPI_INTEGER, MPI_INTEGER1,
- > MPI_INTEGER2, MPI_INTEGER4, MPI_INTEGER8, MPI_REAL, MPI_REAL4,
- > MPI_REAL8, MPI_DOUBLE_PRECISION, MPI_COMPLEX,
- > MPI_DOUBLE_COMPLEX, MPI_2INTEGER, MPI_LOGICAL1, MPI_LOGICAL2,
- > MPI_LOGICAL4, MPI_LOGICAL8
+ integer MPI_REQUEST_NULL
+ parameter(MPI_REQUEST_NULL=-1)
+
+! These should be ordered as in smpi_f77.c
+ integer MPI_DATATYPE_NULL, MPI_BYTE, MPI_CHARACTER, MPI_LOGICAL,
+ > MPI_INTEGER, MPI_INTEGER1, MPI_INTEGER2, MPI_INTEGER4,
+ > MPI_INTEGER8, MPI_REAL, MPI_REAL4, MPI_REAL8,
+ > MPI_DOUBLE_PRECISION, MPI_COMPLEX, MPI_DOUBLE_COMPLEX,
+ > MPI_2INTEGER, MPI_LOGICAL1, MPI_LOGICAL2, MPI_LOGICAL4,
+ > MPI_LOGICAL8
+ parameter(MPI_DATATYPE_NULL=-1)
+ parameter(MPI_BYTE=0)
+ parameter(MPI_CHARACTER=1)
+ parameter(MPI_LOGICAL=2)
+ parameter(MPI_INTEGER=3)
+ parameter(MPI_INTEGER1=4)
+ parameter(MPI_INTEGER2=5)
+ parameter(MPI_INTEGER4=6)
+ parameter(MPI_INTEGER8=7)
+ parameter(MPI_REAL=8)
+ parameter(MPI_REAL4=9)
+ parameter(MPI_REAL8=10)
+ parameter(MPI_DOUBLE_PRECISION=11)
+ parameter(MPI_COMPLEX=12)
+ parameter(MPI_DOUBLE_COMPLEX=13)
+ parameter(MPI_2INTEGER=14)
+ parameter(MPI_LOGICAL1=15)
+ parameter(MPI_LOGICAL2=16)
+ parameter(MPI_LOGICAL4=17)
+ parameter(MPI_LOGICAL8=18)
- external MPI_COMM_WORLD, MPI_COMM_SELF
+! These should be ordered as in smpi_f77.c
+ integer MPI_OP_NULL,MPI_MAX, MPI_MIN, MPI_MAXLOC, MPI_MINLOC,
+ > MPI_SUM, MPI_PROD, MPI_LAND, MPI_LOR, MPI_LXOR, MPI_BAND,
+ > MPI_BOR, MPI_BXOR
+ parameter(MPI_OP_NULL=-1)
+ parameter(MPI_MAX=0)
+ parameter(MPI_MIN=1)
+ parameter(MPI_MAXLOC=2)
+ parameter(MPI_MINLOC=3)
+ parameter(MPI_SUM=4)
+ parameter(MPI_PROD=5)
+ parameter(MPI_LAND=6)
+ parameter(MPI_LOR=7)
+ parameter(MPI_LXOR=8)
+ parameter(MPI_BAND=9)
+ parameter(MPI_BOR=10)
+ parameter(MPI_BXOR=11)
- external MPI_INIT, MPI_FINALIZE, MPI_COMM_RANK, MPI_COMM_SIZE
+ external MPI_INIT, MPI_FINALIZE, MPI_ABORT,
+ > MPI_COMM_RANK, MPI_COMM_SIZE, MPI_COMM_DUP, MPI_COMM_SPLIT
+ > MPI_SEND_INIT, MPI_ISEND, MPI_SEND,
+ > MPI_RECV_INIT, MPI_IRECV, MPI_RECV,
+ > MPI_START, MPI_STARTALL,
+ > MPI_WAIT, MPI_WAITANY, MPI_WAITALL,
+ > MPI_BCAST, MPI_BARRIER, MPI_REDUCE, MPI_ALLREDUCE,
+ > MPI_SCATTER, MPI_GATHER, MPI_ALLGATHER, MPI_SCAN,
+ > MPI_ALLTOALL
external MPI_WTIME
double precision MPI_WTIME
struct s_smpi_process_data;
typedef struct s_smpi_process_data *smpi_process_data_t;
-#define PERSISTENT 0x0
-#define NON_PERSISTENT 0x1
-#define SEND 0x0
-#define RECV 0x2
+#define PERSISTENT 0x1
+#define NON_PERSISTENT 0x2
+#define SEND 0x4
+#define RECV 0x8
typedef struct s_smpi_mpi_request {
void *buf;
// f77 wrappers
void mpi_init__(int*);
void mpi_finalize__(int*);
-void mpi_comm_rank__(MPI_Comm** comm, int* rank, int* ierr);
-void mpi_comm_size__(MPI_Comm** comm, int* size, int* ierr);
+void mpi_abort__(int* comm, int* errorcode, int* ierr);
+void mpi_comm_rank__(int* comm, int* rank, int* ierr);
+void mpi_comm_size__(int* comm, int* size, int* ierr);
double mpi_wtime__(void);
-void mpi_send__(void* buf, int* count, MPI_Datatype** datatype, int* dst,
- int* tag, MPI_Comm** comm, int* ierr);
-void mpi_recv__(void* buf, int* count, MPI_Datatype** datatype, int* src,
- int* tag, MPI_Comm** comm, MPI_Status* status, int* ierr);
+void mpi_comm_dup__(int* comm, int* newcomm, int* ierr);
+void mpi_comm_split__(int* comm, int* color, int* key, int* comm_out, int* ierr);
+
+void mpi_send_init__(void *buf, int* count, int* datatype, int* dst, int* tag,
+ int* comm, int* request, int* ierr);
+void mpi_isend__(void *buf, int* count, int* datatype, int* dst,
+ int* tag, int* comm, int* request, int* ierr);
+void mpi_send__(void* buf, int* count, int* datatype, int* dst,
+ int* tag, int* comm, int* ierr);
+void mpi_recv_init__(void *buf, int* count, int* datatype, int* src, int* tag,
+ int* comm, int* request, int* ierr);
+void mpi_irecv__(void *buf, int* count, int* datatype, int* src, int* tag,
+ int* comm, int* request, int* ierr);
+void mpi_recv__(void* buf, int* count, int* datatype, int* src,
+ int* tag, int* comm, MPI_Status* status, int* ierr);
+void mpi_start__(int* request, int* ierr);
+void mpi_startall__(int* count, int* requests, int* ierr);
+void mpi_wait__(int* request, MPI_Status* status, int* ierr);
+void mpi_waitany__(int* count, int* requests, int* index, MPI_Status* status, int* ierr);
+void mpi_waitall__(int* count, int* requests, MPI_Status* status, int* ierr);
+
+void mpi_barrier__(int* comm, int* ierr);
+void mpi_bcast__(void* buf, int* count, int* datatype, int* root, int* comm, int* ierr);
+void mpi_reduce__(void* sendbuf, void* recvbuf, int* count,
+ int* datatype, int* op, int* root, int* comm, int* ierr);
+void mpi_allreduce__(void* sendbuf, void* recvbuf, int* count, int* datatype,
+ int* op, int* comm, int* ierr);
+void mpi_scatter__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr);
+void mpi_gather__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr);
+void mpi_allgather__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* comm, int* ierr);
+void mpi_scan__(void* sendbuf, void* recvbuf, int* count, int* datatype,
+ int* op, int* comm, int* ierr);
+void mpi_alltoall__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr);
#endif
-/* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team.
+/* Copyright (c) 2010. The SimGrid Team.
* All rights reserved. */
/* This program is free software; you can redistribute it and/or modify it
* under the terms of the license (GNU LGPL) which comes with this package. */
+#include <limits.h>
+#include <stdio.h>
+
#include "private.h"
+#include "xbt.h"
extern int xargc;
extern char** xargv;
-void* mpi_byte__ = &MPI_BYTE;
-void* mpi_character__ = &MPI_CHAR;
-void* mpi_logical__ = &MPI_INT;
-void* mpi_integer__ = &MPI_INT;
-void* mpi_integer1__ = &MPI_INT8_T;
-void* mpi_integer2__ = &MPI_INT16_T;
-void* mpi_integer4__ = &MPI_INT32_T;
-void* mpi_integer8__ = &MPI_INT64_T;
-void* mpi_real__ = &MPI_FLOAT;
-void* mpi_real4__ = &MPI_FLOAT;
-void* mpi_real8__ = &MPI_DOUBLE;
-void* mpi_double_precision__ = &MPI_DOUBLE;
-void* mpi_complex__ = &MPI_C_FLOAT_COMPLEX;
-void* mpi_double_complex__ = &MPI_C_DOUBLE_COMPLEX;
-void* mpi_2integer__ = &MPI_2INT;
-void* mpi_logical1__ = &MPI_UINT8_T;
-void* mpi_logical2__ = &MPI_UINT16_T;
-void* mpi_logical4__ = &MPI_UINT32_T;
-void* mpi_logical8__ = &MPI_UINT64_T;
-
-void* mpi_comm_world__ = &MPI_COMM_WORLD;
+static xbt_dynar_t comm_lookup = NULL;
+static xbt_dict_t request_lookup = NULL;
+static xbt_dynar_t datatype_lookup = NULL;
+static xbt_dynar_t op_lookup = NULL;
+
+#define KEY_SIZE (sizeof(int) * 2 + 1)
+
+static int new_comm(MPI_Comm comm) {
+ xbt_dynar_push(comm_lookup, &comm);
+ return (int)xbt_dynar_length(comm_lookup) - 1;
+}
+
+static MPI_Comm get_comm(int comm) {
+ if(comm == -2) {
+ return MPI_COMM_SELF;
+ } else if(comm >= 0) {
+ return *(MPI_Comm*)xbt_dynar_get_ptr(comm_lookup, comm);
+ }
+ return MPI_COMM_NULL;
+}
+
+static char* get_key(char* key, int id) {
+ snprintf(key, KEY_SIZE, "%x", id);
+ return key;
+}
+
+static int new_request(MPI_Request req) {
+ static int request_id = INT_MIN;
+ char key[KEY_SIZE];
+
+ xbt_dict_set(request_lookup, get_key(key, request_id), req, NULL);
+ return request_id++;
+}
+
+static MPI_Request find_request(int req) {
+ char key[KEY_SIZE];
+
+ return (MPI_Request)xbt_dict_get(request_lookup, get_key(key, req));
+}
+
+static int new_datatype(MPI_Datatype datatype) {
+ xbt_dynar_push(datatype_lookup, &datatype);
+ return (int)xbt_dynar_length(datatype_lookup) - 1;
+}
+
+static MPI_Datatype get_datatype(int datatype) {
+ return datatype >= 0
+ ? *(MPI_Datatype*)xbt_dynar_get_ptr(datatype_lookup, datatype)
+ : MPI_DATATYPE_NULL;
+}
+
+static int new_op(MPI_Op op) {
+ xbt_dynar_push(op_lookup, &op);
+ return (int)xbt_dynar_length(op_lookup) - 1;
+}
+
+static MPI_Op get_op(int op) {
+ return op >= 0
+ ? *(MPI_Op*)xbt_dynar_get_ptr(op_lookup, op)
+ : MPI_OP_NULL;
+}
void mpi_init__(int* ierr) {
+ comm_lookup = xbt_dynar_new(sizeof(MPI_Comm), NULL);
+ new_comm(MPI_COMM_WORLD);
+
+ request_lookup = xbt_dict_new();
+
+ datatype_lookup = xbt_dynar_new(sizeof(MPI_Datatype), NULL);
+ new_datatype(MPI_BYTE);
+ new_datatype(MPI_CHAR);
+ new_datatype(MPI_INT);
+ new_datatype(MPI_INT);
+ new_datatype(MPI_INT8_T);
+ new_datatype(MPI_INT16_T);
+ new_datatype(MPI_INT32_T);
+ new_datatype(MPI_INT64_T);
+ new_datatype(MPI_FLOAT);
+ new_datatype(MPI_FLOAT);
+ new_datatype(MPI_DOUBLE);
+ new_datatype(MPI_DOUBLE);
+ new_datatype(MPI_C_FLOAT_COMPLEX);
+ new_datatype(MPI_C_DOUBLE_COMPLEX);
+ new_datatype(MPI_2INT);
+ new_datatype(MPI_UINT8_T);
+ new_datatype(MPI_UINT16_T);
+ new_datatype(MPI_UINT32_T);
+ new_datatype(MPI_UINT64_T);
+
+ op_lookup = xbt_dynar_new(sizeof(MPI_Op), NULL);
+ new_op(MPI_MAX);
+ new_op(MPI_MIN);
+ new_op(MPI_MAXLOC);
+ new_op(MPI_MINLOC);
+ new_op(MPI_SUM);
+ new_op(MPI_PROD);
+ new_op(MPI_LAND);
+ new_op(MPI_LOR);
+ new_op(MPI_LXOR);
+ new_op(MPI_BAND);
+ new_op(MPI_BOR);
+ new_op(MPI_BXOR);
+
/* smpif2c is responsible for generating a call with the final arguments */
*ierr = MPI_Init(NULL, NULL);
}
void mpi_finalize__(int* ierr) {
*ierr = MPI_Finalize();
+ xbt_dynar_free(&op_lookup);
+ xbt_dynar_free(&datatype_lookup);
+ xbt_dict_free(&request_lookup);
+ xbt_dynar_free(&comm_lookup);
}
-void mpi_comm_rank__(MPI_Comm** comm, int* rank, int* ierr) {
- /* Yes, you really get a MPI_Comm** here */
- *ierr = MPI_Comm_rank(**comm, rank);
+void mpi_abort__(int* comm, int* errorcode, int* ierr) {
+ *ierr = MPI_Abort(get_comm(*comm), *errorcode);
}
-void mpi_comm_size__(MPI_Comm** comm, int* size, int* ierr) {
- /* Yes, you really get a MPI_Comm** here */
- *ierr = MPI_Comm_size(**comm, size);
+void mpi_comm_rank__(int* comm, int* rank, int* ierr) {
+ *ierr = MPI_Comm_rank(get_comm(*comm), rank);
+}
+
+void mpi_comm_size__(int* comm, int* size, int* ierr) {
+ *ierr = MPI_Comm_size(get_comm(*comm), size);
}
double mpi_wtime__(void) {
return MPI_Wtime();
}
-void mpi_send__(void* buf, int* count, MPI_Datatype** datatype, int* dst,
- int* tag, MPI_Comm** comm, int* ierr) {
- *ierr = MPI_Send(buf, *count, **datatype, *dst, *tag, **comm);
+void mpi_comm_dup__(int* comm, int* newcomm, int* ierr) {
+ MPI_Comm tmp;
+
+ *ierr = MPI_Comm_dup(get_comm(*comm), &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *newcomm = new_comm(tmp);
+ }
+}
+
+void mpi_comm_split__(int* comm, int* color, int* key, int* comm_out, int* ierr) {
+ MPI_Comm tmp;
+
+ *ierr = MPI_Comm_split(get_comm(*comm), *color, *key, &tmp);
+ if(*ierr == MPI_SUCCESS) {
+ *comm_out = new_comm(tmp);
+ }
+}
+
+void mpi_send_init__(void *buf, int* count, int* datatype, int* dst, int* tag,
+ int* comm, int* request, int* ierr) {
+ MPI_Request req;
+
+ *ierr = MPI_Send_init(buf, *count, get_datatype(*datatype), *dst, *tag,
+ get_comm(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = new_request(req);
+ }
+}
+
+void mpi_isend__(void *buf, int* count, int* datatype, int* dst,
+ int* tag, int* comm, int* request, int* ierr) {
+ MPI_Request req;
+
+ *ierr = MPI_Isend(buf, *count, get_datatype(*datatype), *dst, *tag,
+ get_comm(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = new_request(req);
+ }
+}
+
+void mpi_send__(void* buf, int* count, int* datatype, int* dst,
+ int* tag, int* comm, int* ierr) {
+ *ierr = MPI_Send(buf, *count, get_datatype(*datatype), *dst, *tag,
+ get_comm(*comm));
+}
+
+void mpi_recv_init__(void *buf, int* count, int* datatype, int* src, int* tag,
+ int* comm, int* request, int* ierr) {
+ MPI_Request req;
+
+ *ierr = MPI_Recv_init(buf, *count, get_datatype(*datatype), *src, *tag,
+ get_comm(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = new_request(req);
+ }
+}
+
+void mpi_irecv__(void *buf, int* count, int* datatype, int* src, int* tag,
+ int* comm, int* request, int* ierr) {
+ MPI_Request req;
+
+ *ierr = MPI_Irecv(buf, *count, get_datatype(*datatype), *src, *tag,
+ get_comm(*comm), &req);
+ if(*ierr == MPI_SUCCESS) {
+ *request = new_request(req);
+ }
+}
+
+void mpi_recv__(void* buf, int* count, int* datatype, int* src,
+ int* tag, int* comm, MPI_Status* status, int* ierr) {
+ *ierr = MPI_Recv(buf, *count, get_datatype(*datatype), *src, *tag,
+ get_comm(*comm), status);
+}
+
+void mpi_start__(int* request, int* ierr) {
+ MPI_Request req = find_request(*request);
+
+ *ierr = MPI_Start(&req);
+}
+
+void mpi_startall__(int* count, int* requests, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = find_request(requests[i]);
+ }
+ *ierr = MPI_Startall(*count, reqs);
+ free(reqs);
+}
+
+void mpi_wait__(int* request, MPI_Status* status, int* ierr) {
+ MPI_Request req = find_request(*request);
+
+ *ierr = MPI_Wait(&req, status);
+}
+
+void mpi_waitany__(int* count, int* requests, int* index, MPI_Status* status, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = find_request(requests[i]);
+ }
+ *ierr = MPI_Waitany(*count, reqs, index, status);
+ free(reqs);
+}
+
+void mpi_waitall__(int* count, int* requests, MPI_Status* status, int* ierr) {
+ MPI_Request* reqs;
+ int i;
+
+ reqs = xbt_new(MPI_Request, *count);
+ for(i = 0; i < *count; i++) {
+ reqs[i] = find_request(requests[i]);
+ }
+ *ierr = MPI_Waitall(*count, reqs, status);
+ free(reqs);
+}
+
+void mpi_barrier__(int* comm, int* ierr) {
+ *ierr = MPI_Barrier(get_comm(*comm));
+}
+
+void mpi_bcast__(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) {
+ *ierr = MPI_Bcast(buf, *count, get_datatype(*datatype), *root, get_comm(*comm));
+}
+
+void mpi_reduce__(void* sendbuf, void* recvbuf, int* count,
+ int* datatype, int* op, int* root, int* comm, int* ierr) {
+ *ierr = MPI_Reduce(sendbuf, recvbuf, *count,
+ get_datatype(*datatype), get_op(*op), *root, get_comm(*comm));
+}
+
+void mpi_allreduce__(void* sendbuf, void* recvbuf, int* count, int* datatype,
+ int* op, int* comm, int* ierr) {
+ *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, get_datatype(*datatype),
+ get_op(*op), get_comm(*comm));
+}
+
+void mpi_scatter__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr) {
+ *ierr = MPI_Scatter(sendbuf, *sendcount, get_datatype(*sendtype),
+ recvbuf, *recvcount, get_datatype(*recvtype), *root, get_comm(*comm));
+}
+
+void mpi_gather__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* root, int* comm, int* ierr) {
+ *ierr = MPI_Gather(sendbuf, *sendcount, get_datatype(*sendtype),
+ recvbuf, *recvcount, get_datatype(*recvtype), *root, get_comm(*comm));
+}
+
+void mpi_allgather__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype,
+ int* comm, int* ierr) {
+ *ierr = MPI_Allgather(sendbuf, *sendcount, get_datatype(*sendtype),
+ recvbuf, *recvcount, get_datatype(*recvtype), get_comm(*comm));
+}
+
+void mpi_scan__(void* sendbuf, void* recvbuf, int* count, int* datatype,
+ int* op, int* comm, int* ierr) {
+ *ierr = MPI_Scan(sendbuf, recvbuf, *count, get_datatype(*datatype),
+ get_op(*op), get_comm(*comm));
}
-void mpi_recv__(void* buf, int* count, MPI_Datatype** datatype, int* src,
- int* tag, MPI_Comm** comm, MPI_Status* status, int* ierr) {
- *ierr = MPI_Recv(buf, *count, **datatype, *src, *tag, **comm, status);
+void mpi_alltoall__(void* sendbuf, int* sendcount, int* sendtype,
+ void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) {
+ *ierr = MPI_Alltoall(sendbuf, *sendcount, get_datatype(*sendtype),
+ recvbuf, *recvcount, get_datatype(*recvtype), get_comm(*comm));
}