From: Augustin Degomme Date: Tue, 24 Sep 2019 14:16:13 +0000 (+0200) Subject: add MPI_Bsend, MPI_Ibsend, MPI_Bsend_init, MPI_Buffer_attach, MPI_Buffer_detach. X-Git-Tag: v3.24~68^2^2~4 X-Git-Url: http://info.iut-bm.univ-fcomte.fr/pub/gitweb/simgrid.git/commitdiff_plain/17d305e0f3a8c6f813bda1aacad5891d67fd1e3c add MPI_Bsend, MPI_Ibsend, MPI_Bsend_init, MPI_Buffer_attach, MPI_Buffer_detach. fun fact: we are not actually using the provided buffer in SMPI, as we use our own buffers internally for all detached calls (bsend means detached). --- diff --git a/src/smpi/bindings/smpi_mpi.cpp b/src/smpi/bindings/smpi_mpi.cpp index 873537a4b8..00d23f8510 100644 --- a/src/smpi/bindings/smpi_mpi.cpp +++ b/src/smpi/bindings/smpi_mpi.cpp @@ -92,6 +92,10 @@ WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Attr_get,(MPI_Comm comm, int keyval, v WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Attr_put,(MPI_Comm comm, int keyval, void* attr_value) ,(comm, keyval, attr_value)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Barrier,(MPI_Comm comm),(comm)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bcast,(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm),(buf, count, datatype, root, comm)) +WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bsend_init,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request)) +WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Bsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) ,(buf, count, datatype, dest, tag, comm)) +WRAPPED_PMPI_CALL(int,MPI_Buffer_attach,(void* buffer, int size) ,(buffer, size)) +WRAPPED_PMPI_CALL(int,MPI_Buffer_detach,(void* buffer, int* size) ,(buffer, size)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cart_coords,(MPI_Comm comm, int rank, int maxdims, int* coords) ,(comm, rank, maxdims, coords)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cart_create,(MPI_Comm comm, int ndims, const int* dims, const int* periods, int reorder, MPI_Comm* comm_cart) ,(comm, ndims, dims, periods, reorder, comm_cart)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Cartdim_get,(MPI_Comm comm, int* ndims) ,(comm, ndims)) @@ -195,6 +199,7 @@ WRAPPED_PMPI_CALL(int,MPI_Init_thread,(int *argc, char ***argv, int required, in WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Iprobe,(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status) ,(source, tag, comm, flag, status)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Irecv,(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, src, tag, comm, request)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Isend,(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request * request),(buf, count, datatype, dst, tag, comm, request)) +WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Ibsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Issend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request)) WRAPPED_PMPI_CALL_ERRHANDLER_COMM(int,MPI_Irsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request)) WRAPPED_PMPI_CALL(int,MPI_Is_thread_main,(int *flag),(flag)) @@ -369,10 +374,6 @@ WRAPPED_PMPI_CALL_ERRHANDLER_FILE(int, MPI_File_get_errhandler,( MPI_File fh, MP UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_class,( int *errorclass),( errorclass)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_code,(int errorclass, int *errorcode),(errorclass, errorcode)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Add_error_string,( int errorcode, char *string),(errorcode, string)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Bsend_init,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request),(buf, count, datatype, dest, tag, comm, request)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Bsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) ,(buf, count, datatype, dest, tag, comm)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Buffer_attach,(void* buffer, int size) ,(buffer, size)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Buffer_detach,(void* buffer, int* size) ,(buffer, size)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Cart_map,(MPI_Comm comm_old, int ndims, const int* dims, const int* periods, int* newrank) ,(comm_old, ndims, dims, periods, newrank)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Close_port,(const char *port_name),( port_name)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Comm_accept,(const char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm),( port_name, info, root, comm, newcomm)) @@ -424,7 +425,6 @@ UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_get,(MPI_Comm comm, int maxindex, UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_map,(MPI_Comm comm_old, int nnodes, const int* index, const int* edges, int* newrank) ,(comm_old, nnodes, index, edges, newrank)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_neighbors_count,(MPI_Comm comm, int rank, int* nneighbors) ,(comm, rank, nneighbors)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Graph_neighbors,(MPI_Comm comm, int rank, int maxneighbors, int* neighbors) ,(comm, rank, maxneighbors, neighbors)) -UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Ibsend,(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) ,(buf, count, datatype, dest, tag, comm, request)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Intercomm_create,(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag,MPI_Comm* comm_out) ,(local_comm, local_leader, peer_comm, remote_leader, tag, comm_out)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Intercomm_merge,(MPI_Comm comm, int high, MPI_Comm* comm_out) ,(comm, high, comm_out)) UNIMPLEMENTED_WRAPPED_PMPI_CALL(int,MPI_Lookup_name,( char *service_name, MPI_Info info, char *port_name),( service_name, info, port_name)) diff --git a/src/smpi/bindings/smpi_pmpi.cpp b/src/smpi/bindings/smpi_pmpi.cpp index 2b8f28e299..7b406659c5 100644 --- a/src/smpi/bindings/smpi_pmpi.cpp +++ b/src/smpi/bindings/smpi_pmpi.cpp @@ -244,3 +244,18 @@ MPI_Fint PMPI_Errhandler_c2f(MPI_Errhandler errhan){ return -1; return errhan->c2f(); } + +int PMPI_Buffer_attach(void *buf, int size){ + if(buf==nullptr) + return MPI_ERR_BUFFER; + if(size<0) + return MPI_ERR_ARG; + smpi_process()->set_bsend_buffer(buf, size); + return MPI_SUCCESS; +} + +int PMPI_Buffer_detach(void* buffer, int* size){ + smpi_process()->bsend_buffer((void**)buffer, size); + smpi_process()->set_bsend_buffer(nullptr, 0); + return MPI_SUCCESS; +} diff --git a/src/smpi/bindings/smpi_pmpi_request.cpp b/src/smpi/bindings/smpi_pmpi_request.cpp index 571d4a2fcf..af333ecdb2 100644 --- a/src/smpi/bindings/smpi_pmpi_request.cpp +++ b/src/smpi/bindings/smpi_pmpi_request.cpp @@ -175,6 +175,7 @@ int PMPI_Request_free(MPI_Request * request) smpi_bench_end(); if (*request != MPI_REQUEST_NULL) { simgrid::smpi::Request::unref(request); + *request = MPI_REQUEST_NULL; retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -405,6 +406,130 @@ int PMPI_Rsend(const void* buf, int count, MPI_Datatype datatype, int dst, int t return PMPI_Send(buf, count, datatype, dst, tag, comm); } +int PMPI_Bsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + int retval = 0; + + smpi_bench_end(); + + if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf == nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag < 0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int my_proc_id = simgrid::s4u::this_actor::get_pid(); + int dst_traced = getPid(comm, dst); + int bsend_buf_size = 0; + void* bsend_buf = nullptr; + smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size); + int size = datatype->get_extent() * count; + if(bsend_buf==nullptr || bsend_buf_size < size + MPI_BSEND_OVERHEAD ) + return MPI_ERR_BUFFER; + TRACE_smpi_comm_in(my_proc_id, __func__, + new simgrid::instr::Pt2PtTIData("bsend", dst, + datatype->is_replayable() ? count : count * datatype->size(), + tag, simgrid::smpi::Datatype::encode(datatype))); + if (not TRACE_smpi_view_internals()) { + TRACE_smpi_send(my_proc_id, my_proc_id, dst_traced, tag, count * datatype->size()); + } + + simgrid::smpi::Request::bsend(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_comm_out(my_proc_id); + } + + smpi_bench_begin(); + return retval; +} + +int PMPI_Ibsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) +{ + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (dst == MPI_PROC_NULL) { + *request = MPI_REQUEST_NULL; + retval = MPI_SUCCESS; + } else if (dst >= comm->group()->size() || dst <0){ + retval = MPI_ERR_RANK; + } else if ((count < 0) || (buf==nullptr && count > 0)) { + retval = MPI_ERR_COUNT; + } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if(tag<0 && tag != MPI_ANY_TAG){ + retval = MPI_ERR_TAG; + } else { + int my_proc_id = simgrid::s4u::this_actor::get_pid(); + int trace_dst = getPid(comm, dst); + int bsend_buf_size = 0; + void* bsend_buf = nullptr; + smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size); + int size = datatype->get_extent() * count; + if(bsend_buf==nullptr || bsend_buf_size < size + MPI_BSEND_OVERHEAD ) + return MPI_ERR_BUFFER; + TRACE_smpi_comm_in(my_proc_id, __func__, + new simgrid::instr::Pt2PtTIData("ibsend", dst, + datatype->is_replayable() ? count : count * datatype->size(), + tag, simgrid::smpi::Datatype::encode(datatype))); + + TRACE_smpi_send(my_proc_id, my_proc_id, trace_dst, tag, count * datatype->size()); + + *request = simgrid::smpi::Request::ibsend(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + + TRACE_smpi_comm_out(my_proc_id); + } + + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request!=nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + +int PMPI_Bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request* request) +{ + + int retval = 0; + + smpi_bench_end(); + if (request == nullptr) { + retval = MPI_ERR_ARG; + } else if (comm == MPI_COMM_NULL) { + retval = MPI_ERR_COMM; + } else if (datatype==MPI_DATATYPE_NULL || not datatype->is_valid()) { + retval = MPI_ERR_TYPE; + } else if (dst == MPI_PROC_NULL) { + retval = MPI_SUCCESS; + } else { + int bsend_buf_size = 0; + void* bsend_buf = nullptr; + smpi_process()->bsend_buffer(&bsend_buf, &bsend_buf_size); + if( bsend_buf==nullptr || bsend_buf_size < datatype->get_extent() * count + MPI_BSEND_OVERHEAD ) { + retval = MPI_ERR_BUFFER; + } else { + *request = simgrid::smpi::Request::bsend_init(buf, count, datatype, dst, tag, comm); + retval = MPI_SUCCESS; + } + } + smpi_bench_begin(); + if (retval != MPI_SUCCESS && request != nullptr) + *request = MPI_REQUEST_NULL; + return retval; +} + int PMPI_Ssend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { int retval = 0; diff --git a/src/smpi/include/private.hpp b/src/smpi/include/private.hpp index 1791625940..b3bec46d11 100644 --- a/src/smpi/include/private.hpp +++ b/src/smpi/include/private.hpp @@ -26,6 +26,7 @@ constexpr unsigned MPI_REQ_RMA = 0x200; constexpr unsigned MPI_REQ_ACCUMULATE = 0x400; constexpr unsigned MPI_REQ_GENERALIZED = 0x800; constexpr unsigned MPI_REQ_COMPLETE = 0x1000; +constexpr unsigned MPI_REQ_BSEND = 0x2000; enum class SmpiProcessState { UNINITIALIZED, INITIALIZING, INITIALIZED /*(=MPI_Init called)*/, FINALIZED }; diff --git a/src/smpi/include/smpi_actor.hpp b/src/smpi/include/smpi_actor.hpp index f68549e6fe..95e34655c8 100644 --- a/src/smpi/include/smpi_actor.hpp +++ b/src/smpi/include/smpi_actor.hpp @@ -37,7 +37,9 @@ class ActorExt { #endif std::string tracing_category_ = ""; MPI_Info info_env_; - + void* bsend_buffer_ = nullptr; + int bsend_buffer_size_ = 0; + #if HAVE_PAPI /** Contains hardware data as read by PAPI **/ int papi_event_set_; @@ -83,6 +85,8 @@ public: int get_optind(); void set_optind(int optind); MPI_Info info_env(); + void bsend_buffer(void** buf, int* size); + void set_bsend_buffer(void* buf, int size); }; } // namespace smpi diff --git a/src/smpi/include/smpi_request.hpp b/src/smpi/include/smpi_request.hpp index d16e050492..cc6458393c 100644 --- a/src/smpi/include/smpi_request.hpp +++ b/src/smpi/include/smpi_request.hpp @@ -74,6 +74,7 @@ public: static void finish_wait(MPI_Request* request, MPI_Status* status); static void unref(MPI_Request* request); static int wait(MPI_Request* req, MPI_Status* status); + static MPI_Request bsend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request send_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request isend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request ssend_init(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); @@ -83,11 +84,13 @@ public: static MPI_Request rma_recv_init(void* buf, int count, MPI_Datatype datatype, int src, int dst, int tag, MPI_Comm comm, MPI_Op op); static MPI_Request irecv_init(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm); + static MPI_Request ibsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request isend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request issend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static MPI_Request irecv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm); static void recv(void* buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status* status); + static void bsend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static void send(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); static void ssend(const void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); diff --git a/src/smpi/internals/smpi_actor.cpp b/src/smpi/internals/smpi_actor.cpp index bafd01b776..d363eb5879 100644 --- a/src/smpi/internals/smpi_actor.cpp +++ b/src/smpi/internals/smpi_actor.cpp @@ -253,5 +253,17 @@ void ActorExt::set_optind(int new_optind) optind_ = new_optind; } +void ActorExt::bsend_buffer(void** buf, int* size) +{ + *buf = bsend_buffer_; + *size = bsend_buffer_size_; +} + +void ActorExt::set_bsend_buffer(void* buf, int size) +{ + bsend_buffer_ = buf; + bsend_buffer_size_= size; +} + } // namespace smpi } // namespace simgrid diff --git a/src/smpi/mpi/smpi_request.cpp b/src/smpi/mpi/smpi_request.cpp index bb5199b4e4..c5e7c52d5f 100644 --- a/src/smpi/mpi/smpi_request.cpp +++ b/src/smpi/mpi/smpi_request.cpp @@ -170,6 +170,14 @@ void Request::print_request(const char *message) /* factories, to hide the internal flags from the caller */ +MPI_Request Request::bsend_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + + return new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), + comm->group()->actor(dst)->get_pid(), tag, comm, + MPI_REQ_PERSISTENT | MPI_REQ_SEND | MPI_REQ_PREPARED | MPI_REQ_BSEND); +} + MPI_Request Request::send_init(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { @@ -242,6 +250,16 @@ MPI_Request Request::irecv_init(void *buf, int count, MPI_Datatype datatype, int MPI_REQ_PERSISTENT | MPI_REQ_RECV | MPI_REQ_PREPARED); } +MPI_Request Request::ibsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), + comm->group()->actor(dst)->get_pid(), tag, comm, + MPI_REQ_NON_PERSISTENT | MPI_REQ_ISEND | MPI_REQ_SEND | MPI_REQ_BSEND); + request->start(); + return request; +} + MPI_Request Request::isend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ @@ -281,6 +299,17 @@ void Request::recv(void *buf, int count, MPI_Datatype datatype, int src, int tag request = nullptr; } +void Request::bsend(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ + request = new Request(buf == MPI_BOTTOM ? nullptr : buf, count, datatype, simgrid::s4u::this_actor::get_pid(), + comm->group()->actor(dst)->get_pid(), tag, comm, MPI_REQ_NON_PERSISTENT | MPI_REQ_SEND | MPI_REQ_BSEND); + + request->start(); + wait(&request, MPI_STATUS_IGNORE); + request = nullptr; +} + void Request::send(const void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) { MPI_Request request = nullptr; /* MC needs the comm to be set to nullptr during the call */ @@ -407,7 +436,7 @@ void Request::start() void* buf = buf_; if ((flags_ & MPI_REQ_SSEND) == 0 && - ((flags_ & MPI_REQ_RMA) != 0 || + ((flags_ & MPI_REQ_RMA) != 0 || (flags_ & MPI_REQ_BSEND) != 0 || static_cast(size_) < simgrid::config::get_value("smpi/send-is-detached-thresh"))) { void *oldbuf = nullptr; detached_ = true; @@ -422,6 +451,8 @@ void Request::start() XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment "); smpi_switch_data_segment(simgrid::s4u::Actor::by_pid(src_)); } + //we need this temporary buffer even for bsend, as it will be released in the copy callback and we don't have a way to differentiate it + //so actually ... don't use manually attached buffer space. buf = xbt_malloc(size_); memcpy(buf,oldbuf,size_); XBT_DEBUG("buf %p copied into %p",oldbuf,buf); diff --git a/teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt b/teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt index 46fce2334a..90931a6732 100644 --- a/teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt +++ b/teshsuite/smpi/mpich3-test/pt2pt/CMakeLists.txt @@ -10,9 +10,9 @@ if(enable_smpi AND enable_smpi_MPICH3_testsuite) include_directories("${CMAKE_CURRENT_SOURCE_DIR}/../include/") foreach(file anyall bottom eagerdt huge_anysrc huge_underflow inactivereq isendself isendirecv isendselfprobe issendselfcancel cancelanysrc pingping probenull - dtype_send greq1 probe-unexp rqstatus sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any sendself scancel scancel2 rcancel) - # not compiled files: big_count_status bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending mprobe - # cancelrecv icsend large_message pscancel rqfreeb scancel_unmatch + dtype_send greq1 probe-unexp rqstatus sendall sendflood sendrecv1 sendrecv2 sendrecv3 waitany-null waittestnull many_isend manylmt recv_any sendself scancel scancel2 rcancel bsend1 bsend2 bsend3 bsend4 bsend5 bsendalign bsendfrag bsendpending rqfreeb) + # not compiled files: big_count_status mprobe + # cancelrecv icsend large_message pscancel scancel_unmatch add_executable(${file} EXCLUDE_FROM_ALL ${file}.c) add_dependencies(tests ${file}) target_link_libraries(${file} simgrid mtest_c)