X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/blobdiff_plain/7cd237f9659b2df47fca65a4340ca7b5407f46a0..7d28d93b90eedd2a49da8b9b990296669e46d05c:/include/smpi/smpi.h diff --git a/include/smpi/smpi.h b/include/smpi/smpi.h index 141a23151a..1bfbc620fd 100644 --- a/include/smpi/smpi.h +++ b/include/smpi/smpi.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */ +/* Copyright (c) 2007-2020. The SimGrid Team. All rights reserved. */ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ @@ -309,7 +309,7 @@ XBT_PUBLIC_DATA const MPI_Datatype MPI_COUNT; #define MPI_LOGICAL MPI_LONG #endif -#define MPI_Fint int +typedef int MPI_Fint; #define MPI_COMPLEX MPI_C_FLOAT_COMPLEX #define MPI_DOUBLE_COMPLEX MPI_C_DOUBLE_COMPLEX @@ -368,6 +368,27 @@ typedef SMPI_Request* MPI_Request; typedef SMPI_Errhandler* MPI_Errhandler; #define MPI_ERRHANDLER_NULL ((MPI_Errhandler)NULL) +typedef enum SMPI_Combiner_enum{ + MPI_COMBINER_NAMED, + MPI_COMBINER_DUP, + MPI_COMBINER_CONTIGUOUS, + MPI_COMBINER_VECTOR, + MPI_COMBINER_HVECTOR_INTEGER, + MPI_COMBINER_HVECTOR, + MPI_COMBINER_INDEXED, + MPI_COMBINER_HINDEXED_INTEGER, + MPI_COMBINER_HINDEXED, + MPI_COMBINER_INDEXED_BLOCK, + MPI_COMBINER_STRUCT_INTEGER, + MPI_COMBINER_STRUCT, + MPI_COMBINER_SUBARRAY, + MPI_COMBINER_DARRAY, + MPI_COMBINER_F90_REAL, + MPI_COMBINER_F90_COMPLEX, + MPI_COMBINER_F90_INTEGER, + MPI_COMBINER_RESIZED, + MPI_COMBINER_HINDEXED_BLOCK +}SMPI_Combiner_enum; typedef enum SMPI_Topo_type { MPI_GRAPH=1, @@ -817,7 +838,11 @@ MPI_CALL(XBT_PUBLIC int, MPI_Comm_call_errhandler, (MPI_Comm comm, int errorcode MPI_CALL(XBT_PUBLIC int, MPI_Win_set_errhandler, (MPI_Win win, MPI_Errhandler errhandler)); MPI_CALL(XBT_PUBLIC int, MPI_Win_get_errhandler, (MPI_Win win, MPI_Errhandler* errhandler)); MPI_CALL(XBT_PUBLIC int, MPI_Win_create_errhandler, (MPI_Win_errhandler_fn * function, MPI_Errhandler* errhandler)); -MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode));MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope, +MPI_CALL(XBT_PUBLIC int, MPI_Win_call_errhandler, (MPI_Win win, int errorcode)); +MPI_CALL(XBT_PUBLIC int, MPI_Type_get_contents, + (MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, + MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes)); +MPI_CALL(XBT_PUBLIC int, MPI_Type_get_envelope, (MPI_Datatype datatype, int* num_integers, int* num_addresses, int* num_datatypes, int* combiner)); MPI_CALL(XBT_PUBLIC int, MPI_File_call_errhandler, (MPI_File fh, int errorcode)); MPI_CALL(XBT_PUBLIC int, MPI_File_create_errhandler, @@ -826,27 +851,6 @@ MPI_CALL(XBT_PUBLIC int, MPI_File_set_errhandler, (MPI_File file, MPI_Errhandler MPI_CALL(XBT_PUBLIC int, MPI_File_get_errhandler, (MPI_File file, MPI_Errhandler* errhandler)); //FIXME: these are not yet implemented -typedef enum MPIR_Combiner_enum{ - MPI_COMBINER_NAMED, - MPI_COMBINER_DUP, - MPI_COMBINER_CONTIGUOUS, - MPI_COMBINER_VECTOR, - MPI_COMBINER_HVECTOR_INTEGER, - MPI_COMBINER_HVECTOR, - MPI_COMBINER_INDEXED, - MPI_COMBINER_HINDEXED_INTEGER, - MPI_COMBINER_HINDEXED, - MPI_COMBINER_INDEXED_BLOCK, - MPI_COMBINER_STRUCT_INTEGER, - MPI_COMBINER_STRUCT, - MPI_COMBINER_SUBARRAY, - MPI_COMBINER_DARRAY, - MPI_COMBINER_F90_REAL, - MPI_COMBINER_F90_COMPLEX, - MPI_COMBINER_F90_INTEGER, - MPI_COMBINER_RESIZED, - MPI_COMBINER_HINDEXED_BLOCK -}MPIR_Combiner_enum; typedef void* MPI_Message; #define MPI_DUP_FN 1 @@ -887,9 +891,6 @@ MPI_CALL(XBT_PUBLIC int, MPI_Irsend, (const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request)); MPI_CALL(XBT_PUBLIC int, MPI_Get_elements, (MPI_Status * status, MPI_Datatype datatype, int* elements)); MPI_CALL(XBT_PUBLIC int, MPI_Pcontrol, (const int level, ...)); -MPI_CALL(XBT_PUBLIC int, MPI_Type_get_contents, - (MPI_Datatype datatype, int max_integers, int max_addresses, int max_datatypes, int* array_of_integers, - MPI_Aint* array_of_addresses, MPI_Datatype* array_of_datatypes)); MPI_CALL(XBT_PUBLIC int, MPI_Type_create_darray, (int size, int rank, int ndims, int* array_of_gsizes, int* array_of_distribs, int* array_of_dargs, int* array_of_psizes, int order, MPI_Datatype oldtype, MPI_Datatype* newtype)); @@ -980,6 +981,7 @@ XBT_PUBLIC void smpi_process_set_user_data(void*); XBT_PUBLIC void smpi_init_options(); XBT_PUBLIC void smpi_execute_flops(double flops); +XBT_PUBLIC void smpi_execute_flops_benched(double flops); XBT_PUBLIC void smpi_execute(double duration); XBT_PUBLIC void smpi_execute_benched(double duration); @@ -1029,7 +1031,7 @@ XBT_PUBLIC void smpi_trace_set_call_location__(const char* file, const int* line #define SMPI_SAMPLE_FLOPS(flops) for(smpi_execute_flops(flops); 0; ) XBT_PUBLIC void* smpi_shared_malloc(size_t size, const char* file, int line); #define SMPI_SHARED_MALLOC(size) smpi_shared_malloc((size), __FILE__, __LINE__) -XBT_PUBLIC void* smpi_shared_malloc_partial(size_t size, size_t* shared_block_offsets, int nb_shared_blocks); +XBT_PUBLIC void* smpi_shared_malloc_partial(size_t size, const size_t* shared_block_offsets, int nb_shared_blocks); #define SMPI_PARTIAL_SHARED_MALLOC(size, shared_block_offsets, nb_shared_blocks) \ smpi_shared_malloc_partial((size), (shared_block_offsets), (nb_shared_blocks)) @@ -1057,6 +1059,7 @@ XBT_PUBLIC void smpi_replay_run(const char* instance_id, int rank, double start_ XBT_PUBLIC void SMPI_app_instance_register(const char* name, xbt_main_func_t code, int num_processes); XBT_PUBLIC void SMPI_init(); XBT_PUBLIC void SMPI_finalize(); +XBT_PUBLIC void SMPI_thread_create(); SG_END_DECL