return time;
}
+int MPI_Address(void *location, MPI_Aint *address) {
+ int retval;
+
+ smpi_bench_end(-1, NULL);
+ if(!address) {
+ retval = MPI_ERR_ARG;
+ } else {
+ *address = (MPI_Aint)location;
+ }
+ smpi_bench_begin(-1, NULL);
+ return retval;
+}
+
+int MPI_Type_free(MPI_Datatype* datatype) {
+ int retval;
+
+ smpi_bench_end(-1, NULL);
+ if(!datatype) {
+ retval = MPI_ERR_ARG;
+ } else {
+ // FIXME: always fail for now
+ retval = MPI_ERR_TYPE;
+ }
+ smpi_bench_begin(-1, NULL);
+ return retval;
+}
+
int MPI_Type_size(MPI_Datatype datatype, size_t* size) {
int retval;
return retval;
}
+int MPI_Type_extent(MPI_Datatype datatype, MPI_Aint* extent) {
+ int retval;
+ MPI_Aint dummy;
+
+ smpi_bench_end(-1, NULL);
+ if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(extent == NULL) {
+ retval = MPI_ERR_ARG;
+ } else {
+ retval = smpi_datatype_extent(datatype, &dummy, extent);
+ }
+ smpi_bench_begin(-1, NULL);
+ return retval;
+}
+
int MPI_Type_lb(MPI_Datatype datatype, MPI_Aint* disp) {
int retval;
return retval;
}
+int MPI_Get_count(MPI_Status *status, MPI_Datatype datatype, int *count) {
+ int retval;
+/*
+ * Returns the number of entries received. (Again, we count entries, each of type datatype, not bytes.)
+ * The datatype argument should match the argument provided by the receive call that set the status variable.
+ * If the size of the datatype is zero, this routine will return a count of zero.
+ * If the amount of data in status is not an exact multiple of the size of datatype
+ * (so that count would not be integral), a count of MPI_UNDEFINED is returned instead.
+ *
+ */
+ smpi_bench_end(-1, NULL); //FIXME
+
+ if( 0==smpi_datatype_size(datatype)) {
+ // also check that the type is 'committed' when we have MPI_Type_commit (s.g. 23/03/21010)
+ retval = MPI_ERR_TYPE;
+ } else {
+ smpi_mpi_get_count(status, datatype, count);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin(-1, NULL);
+ return retval;
+}
+
+
int MPI_Test(MPI_Request* request, int* flag, MPI_Status* status) {
int retval;
int rank = request && (*request)->comm != MPI_COMM_NULL
return retval;
}
+int MPI_Scan(void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) {
+ int retval;
+ int rank = comm != MPI_COMM_NULL ? smpi_comm_rank(comm) : -1;
+
+ smpi_bench_end(rank, "Scan");
+ if(comm == MPI_COMM_NULL) {
+ retval = MPI_ERR_COMM;
+ } else if(datatype == MPI_DATATYPE_NULL) {
+ retval = MPI_ERR_TYPE;
+ } else if(op == MPI_OP_NULL) {
+ retval = MPI_ERR_OP;
+ } else {
+ smpi_mpi_scan(sendbuf, recvbuf, count, datatype, op, comm);
+ retval = MPI_SUCCESS;
+ }
+ smpi_bench_begin(rank, "Scan");
+ return retval;
+}
+
int MPI_Reduce_scatter(void* sendbuf, void* recvbuf, int* recvcounts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) {
int retval, i, size, count;
int* displs;
smpi_bench_begin(rank, "Alltoallv");
return retval;
}
+
+
+int MPI_Get_processor_name( char *name, int *resultlen ) {
+ int retval = MPI_SUCCESS;
+ smpi_bench_end(-1, NULL);
+ strncpy( name , SIMIX_host_get_name(SIMIX_host_self()), MPI_MAX_PROCESSOR_NAME-1);
+ *resultlen= strlen(name) > MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
+
+ smpi_bench_begin(-1, NULL);
+ return retval;
+}
+