X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/b113c052994d9c4ffdf4b886b65f733fcb66b556..e9f0018b823e34405847177b25a85d3facc30ae1:/src/smpi/smpi_mpi.c diff --git a/src/smpi/smpi_mpi.c b/src/smpi/smpi_mpi.c index 433668d8bd..18ae4f4e3a 100644 --- a/src/smpi/smpi_mpi.c +++ b/src/smpi/smpi_mpi.c @@ -1,8 +1,8 @@ -/* Copyright (c) 2007, 2008, 2009, 2010. The SimGrid Team. +/* Copyright (c) 2007-2014. The SimGrid Team. * All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it - * under the terms of the license (GNU LGPL) which comes with this package. */ + * under the terms of the license (GNU LGPL) which comes with this package. */ #include "private.h" #include "simgrid/sg_config.h" @@ -512,6 +512,13 @@ int MPI_Alltoallv(void *sendbuf, int *sendcounts, int *senddisps, comm); } +int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr){ + return PMPI_Alloc_mem(size, info, baseptr); +} + +int MPI_Free_mem(void *baseptr){ + return PMPI_Free_mem(baseptr); +} int MPI_Get_processor_name(char *name, int *resultlen) { @@ -612,13 +619,21 @@ int MPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler) { } int MPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) { - return PMPI_Errhandler_set(comm, errhandler); + return PMPI_Comm_set_errhandler(comm, errhandler); } int MPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler* errhandler) { return PMPI_Errhandler_set(comm, errhandler); } +int MPI_Win_get_group(MPI_Win win, MPI_Group * group){ + return PMPI_Win_get_group(win, group); +} + +int MPI_Win_set_errhandler(MPI_Win win, MPI_Errhandler errhandler) { + return PMPI_Win_set_errhandler(win, errhandler); +} + int MPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* newtype) { return PMPI_Type_contiguous(count, old_type, newtype); } @@ -819,6 +834,44 @@ int MPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI return PMPI_Win_create( base, size, disp_unit, info, comm,win); } +int MPI_Win_set_name(MPI_Win win, char * name) +{ + return PMPI_Win_set_name(win, name); +} + +int MPI_Win_get_name(MPI_Win win, char * name, int* len) +{ + return PMPI_Win_get_name(win,name,len); +} + +int MPI_Win_complete(MPI_Win win){ + return PMPI_Win_complete(win); +} + +int MPI_Win_lock(int lock_type, int rank, int assert, MPI_Win win) { + return PMPI_Win_lock(lock_type, rank, assert, win); +} + +int MPI_Win_post(MPI_Group group, int assert, MPI_Win win){ + return PMPI_Win_post(group, assert, win); +} + +int MPI_Win_start(MPI_Group group, int assert, MPI_Win win){ + return PMPI_Win_start(group, assert, win); +} + +int MPI_Win_test(MPI_Win win, int *flag){ + return PMPI_Win_test(win, flag); +} + +int MPI_Win_unlock(int rank, MPI_Win win){ + return PMPI_Win_unlock(rank, win); +} + +int MPI_Win_wait(MPI_Win win){ + return PMPI_Win_wait(win); +} + int MPI_Info_create( MPI_Info *info){ return PMPI_Info_create( info); } @@ -837,6 +890,18 @@ int MPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, target_disp, target_count,target_datatype, win); } +int MPI_Put( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){ + return PMPI_Put( origin_addr,origin_count, origin_datatype,target_rank, + target_disp, target_count,target_datatype, win); +} + +int MPI_Accumulate( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, + MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win){ + return PMPI_Accumulate( origin_addr,origin_count, origin_datatype,target_rank, + target_disp, target_count,target_datatype,op, win); +} + int MPI_Type_get_envelope( MPI_Datatype datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner){ return PMPI_Type_get_envelope( datatype, num_integers, @@ -1027,3 +1092,67 @@ int MPI_Comm_get_parent( MPI_Comm *parent){ int MPI_Type_create_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) { return PMPI_Type_create_hvector(count, blocklen, stride, old_type, new_type); } + +MPI_Datatype MPI_Type_f2c(MPI_Fint datatype){ + return PMPI_Type_f2c(datatype); +} + +MPI_Fint MPI_Type_c2f(MPI_Datatype datatype){ + return PMPI_Type_c2f( datatype); +} + +MPI_Group MPI_Group_f2c(MPI_Fint group){ + return PMPI_Group_f2c( group); +} + +MPI_Fint MPI_Group_c2f(MPI_Group group){ + return PMPI_Group_c2f(group); +} + +MPI_Request MPI_Request_f2c(MPI_Fint request){ + return PMPI_Request_f2c(request); +} + +MPI_Fint MPI_Request_c2f(MPI_Request request) { + return PMPI_Request_c2f(request); +} + +MPI_Win MPI_Win_f2c(MPI_Fint win){ + return PMPI_Win_f2c(win); +} + +MPI_Fint MPI_Win_c2f(MPI_Win win){ + return PMPI_Win_c2f(win); +} + +MPI_Op MPI_Op_f2c(MPI_Fint op){ + return PMPI_Op_f2c(op); +} + +MPI_Fint MPI_Op_c2f(MPI_Op op){ + return PMPI_Op_c2f(op); +} + +MPI_Comm MPI_Comm_f2c(MPI_Fint comm){ + return PMPI_Comm_f2c(comm); +} + +MPI_Fint MPI_Comm_c2f(MPI_Comm comm){ + return PMPI_Comm_c2f(comm); +} + +MPI_Info MPI_Info_f2c(MPI_Fint info){ + return PMPI_Info_f2c(info); +} + +MPI_Fint MPI_Info_c2f(MPI_Info info){ + return PMPI_Info_c2f(info); +} + +MPI_Errhandler MPI_Errhandler_f2c(MPI_Fint errhandler){ + return PMPI_Errhandler_f2c(errhandler); +} + +MPI_Fint MPI_Errhandler_c2f(MPI_Errhandler errhandler){ + return PMPI_Errhandler_c2f(errhandler); +}