From 1eb32ca7e4d755fec407842b7c879499e2034136 Mon Sep 17 00:00:00 2001 From: markls Date: Sun, 5 Aug 2007 09:41:13 +0000 Subject: [PATCH] moving smpi into the mainstream build... git-svn-id: svn+ssh://scm.gforge.inria.fr/svn/simgrid/simgrid/trunk@3963 48e7efb5-ca39-0410-a469-dd3cf9ba447f --- src/Makefile.am | 15 +- src/smpi/private.h | 84 +++++ src/smpi/smpi.h | 114 ++++++ src/smpi/smpi_base.c | 816 +++++++++++++++++++++++++++++++++++++++++++ src/smpi/smpi_mpi.c | 155 ++++++++ 5 files changed, 1182 insertions(+), 2 deletions(-) create mode 100644 src/smpi/private.h create mode 100644 src/smpi/smpi.h create mode 100644 src/smpi/smpi_base.c create mode 100644 src/smpi/smpi_mpi.c diff --git a/src/Makefile.am b/src/Makefile.am index d5229da6fb..96f1f215aa 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -63,7 +63,9 @@ EXTRA_DIST= \ amok/amok_modinter.h \ include/simix/simix.h include/simix/datatypes.h \ - simix/msg_simix_private.h + simix/msg_simix_private.h \ + \ + smpi/private.h smpi/smpi.h #LIBRARY_VERSION= 0:0:0 # | | | @@ -179,6 +181,10 @@ SIMIX_SRC= \ simix/smx_action.c \ simix/smx_synchro.c +SMPI_SRC= \ + smpi/smpi_base.c \ + smpi/smpi_mpi.c + MSG_SRC= msg/msg_config.c \ msg/task.c msg/host.c msg/m_process.c msg/gos.c \ msg/global.c msg/environment.c msg/deployment.c @@ -276,7 +282,7 @@ endif ### Declare the library content ### -lib_LTLIBRARIES= libsimgrid.la libgras.la +lib_LTLIBRARIES= libsimgrid.la libgras.la libsmpi.la libgras_la_SOURCES= $(XBT_SRC) $(GRAS_COMMON_SRC) $(GRAS_RL_SRC) $(AMOK_SRC) libgras_la_LDFLAGS = -no-undefined $(VERSION_INFO) @GRAS_DEP@ @LD_DYNAMIC_FLAGS@ -lm @@ -287,6 +293,11 @@ libsimgrid_la_SOURCES = $(XBT_SRC) $(SURF_SRC) $(GTNETS_USED) $(SDP_SRC) \ $(GRAS_COMMON_SRC) $(GRAS_SG_SRC) $(AMOK_SRC) libsimgrid_la_LDFLAGS = -no-undefined $(VERSION_INFO) @SIMGRID_DEP@ @LD_DYNAMIC_FLAGS@ -lm +libsmpi_la_LIBADD = libsimgrid.la +libsmpi_la_SOURCES = $(SMPI_SRC) +#libsmpi_la_LDFLAGS = -no-undefined $(VERSION_INFO) @SMPI_DEP@ @LD_DYNAMIC_FLAGS@ -lm +libsmpi_la_LDFLAGS = -no-undefined $(VERSION_INFO) @LD_DYNAMIC_FLAGS@ -lm + ## ## Java cruft diff --git a/src/smpi/private.h b/src/smpi/private.h new file mode 100644 index 0000000000..fa42b5c5cf --- /dev/null +++ b/src/smpi/private.h @@ -0,0 +1,84 @@ +#include "smpi.h" +#include "xbt/mallocator.h" +#include "xbt/xbt_os_time.h" + +#define SMPI_DEFAULT_SPEED 100 +#define SMPI_REQUEST_MALLOCATOR_SIZE 100 +#define SMPI_MESSAGE_MALLOCATOR_SIZE 100 + +typedef struct SMPI_Global { + + // config vars + double reference_speed; + + // state vars + int root_ready:1; + int ready_process_count; + smx_mutex_t start_stop_mutex; + smx_cond_t start_stop_cond; + + xbt_mallocator_t request_mallocator; + xbt_mallocator_t message_mallocator; + + xbt_fifo_t *pending_send_request_queues; + smx_mutex_t *pending_send_request_queues_mutexes; + + xbt_fifo_t *pending_recv_request_queues; + smx_mutex_t *pending_recv_request_queues_mutexes; + + xbt_fifo_t *received_message_queues; + smx_mutex_t *received_message_queues_mutexes; + + smx_process_t *sender_processes; + smx_process_t *receiver_processes; + + int running_hosts_count; + smx_mutex_t running_hosts_count_mutex; + + xbt_os_timer_t *timers; + smx_mutex_t *timers_mutexes; + + +} s_SMPI_Global_t, *SMPI_Global_t; + +extern SMPI_Global_t smpi_global; + +struct smpi_received_message_t { + smpi_mpi_communicator_t *comm; + int src; + int dst; + int tag; + void *buf; +}; + +typedef struct smpi_received_message_t smpi_received_message_t; + +// function prototypes +int smpi_mpi_comm_size(smpi_mpi_communicator_t *comm); +int smpi_mpi_comm_rank(smpi_mpi_communicator_t *comm, smx_host_t host); +int smpi_mpi_comm_rank_self(smpi_mpi_communicator_t *comm); +int smpi_mpi_comm_world_rank_self(void); +int smpi_sender(int argc, char **argv); +int smpi_receiver(int argc, char **argv); +void *smpi_request_new(void); +void smpi_request_free(void *pointer); +void smpi_request_reset(void *pointer); +void *smpi_message_new(void); +void smpi_message_free(void *pointer); +void smpi_message_reset(void *pointer); +void smpi_global_init(void); +void smpi_global_destroy(void); +int smpi_run_simulation(int argc, char **argv); +void smpi_mpi_land_func(void *x, void *y, void *z); +void smpi_mpi_sum_func(void *x, void *y, void *z); +void smpi_mpi_init(void); +void smpi_mpi_finalize(void); +void smpi_bench_begin(void); +void smpi_bench_end(void); +void smpi_barrier(smpi_mpi_communicator_t *comm); +int smpi_comm_rank(smpi_mpi_communicator_t *comm, smx_host_t host); +int smpi_create_request(void *buf, int count, smpi_mpi_datatype_t *datatype, + int src, int dst, int tag, smpi_mpi_communicator_t *comm, smpi_mpi_request_t **request); +int smpi_isend(smpi_mpi_request_t *request); +int smpi_irecv(smpi_mpi_request_t *request); +void smpi_wait(smpi_mpi_request_t *request, smpi_mpi_status_t *status); diff --git a/src/smpi/smpi.h b/src/smpi/smpi.h new file mode 100644 index 0000000000..5061bef854 --- /dev/null +++ b/src/smpi/smpi.h @@ -0,0 +1,114 @@ +#include +#include + +#define SMPI_RAND_SEED 5 + +#define MPI_ANY_SOURCE -1 + +// errorcodes +#define MPI_SUCCESS 0 +#define MPI_ERR_COMM 1 +#define MPI_ERR_ARG 2 +#define MPI_ERR_TYPE 3 +#define MPI_ERR_REQUEST 4 +#define MPI_ERR_INTERN 5 +#define MPI_ERR_COUNT 6 +#define MPI_ERR_RANK 7 +#define MPI_ERR_TAG 8 + +// MPI_Comm +struct smpi_mpi_communicator_t { + int size; + smx_host_t *hosts; + + smx_process_t *processes; + int barrier_count; + smx_mutex_t barrier_mutex; + smx_cond_t barrier_cond; +}; +typedef struct smpi_mpi_communicator_t smpi_mpi_communicator_t; +typedef smpi_mpi_communicator_t *MPI_Comm; + +// MPI_Status +struct smpi_mpi_status_t { + int MPI_SOURCE; +}; +typedef struct smpi_mpi_status_t smpi_mpi_status_t; +typedef smpi_mpi_status_t MPI_Status; + +// MPI_Datatype +struct smpi_mpi_datatype_t { + size_t size; +}; +typedef struct smpi_mpi_datatype_t smpi_mpi_datatype_t; +typedef smpi_mpi_datatype_t *MPI_Datatype; + +// MPI_Request +struct smpi_mpi_request_t { + smpi_mpi_communicator_t *comm; + int src; + int dst; + int tag; + + void *buf; + smpi_mpi_datatype_t *datatype; + int count; + + short int completed :1; + smx_mutex_t mutex; + smx_cond_t cond; +}; +typedef struct smpi_mpi_request_t smpi_mpi_request_t; +typedef smpi_mpi_request_t *MPI_Request; + +// MPI_Op +struct smpi_mpi_op_t { + void (*func)(void *x, void *y, void *z); +}; +typedef struct smpi_mpi_op_t smpi_mpi_op_t; +typedef smpi_mpi_op_t *MPI_Op; + +// global SMPI data structure +typedef struct SMPI_MPI_Global { + + smpi_mpi_communicator_t *mpi_comm_world; + + smpi_mpi_datatype_t *mpi_byte; + smpi_mpi_datatype_t *mpi_int; + smpi_mpi_datatype_t *mpi_double; + + smpi_mpi_op_t *mpi_land; + smpi_mpi_op_t *mpi_sum; + +} s_SMPI_MPI_Global_t, *SMPI_MPI_Global_t; +extern SMPI_MPI_Global_t smpi_mpi_global; + +#define MPI_COMM_WORLD (smpi_mpi_global->mpi_comm_world) + +#define MPI_STATUS_IGNORE NULL + +#define MPI_BYTE (smpi_mpi_global->mpi_byte) +#define MPI_DOUBLE (smpi_mpi_global->mpi_double) +#define MPI_INT (smpi_mpi_global->mpi_int) + +#define MPI_LAND (smpi_mpi_global->mpi_land) +#define MPI_SUM (smpi_mpi_glboal->mpi_sum) + +// MPI Functions +int MPI_Init(int *argc, char ***argv); +int MPI_Finalize(void); +int MPI_Abort(MPI_Comm comm, int errorcode); +int MPI_Comm_size(MPI_Comm comm, int *size); +int MPI_Comm_rank(MPI_Comm comm, int *rank); +int MPI_Type_size(MPI_Datatype datatype, size_t *size); +int MPI_Barrier(MPI_Comm comm); +int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request *request); +int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status *status); +int MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request *request); +int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm); + +// smpi functions +extern int smpi_simulated_main(int argc, char **argv); +unsigned int smpi_sleep(unsigned int); +void smpi_exit(int); +int smpi_gettimeofday(struct timeval *tv, struct timezone *tz); diff --git a/src/smpi/smpi_base.c b/src/smpi/smpi_base.c new file mode 100644 index 0000000000..c2d0939011 --- /dev/null +++ b/src/smpi/smpi_base.c @@ -0,0 +1,816 @@ +#include +#include +#include + +#include "private.h" + +SMPI_Global_t smpi_global = NULL; + +SMPI_MPI_Global_t smpi_mpi_global = NULL; + +XBT_LOG_NEW_DEFAULT_CATEGORY(smpi, "SMPI"); + +int inline smpi_mpi_comm_size(smpi_mpi_communicator_t *comm) +{ + return comm->size; +} + +// FIXME: smarter algorithm? +int smpi_mpi_comm_rank(smpi_mpi_communicator_t *comm, smx_host_t host) +{ + int i; + + for(i = comm->size - 1; i > 0 && host != comm->hosts[i]; i--); + + return i; +} + +int inline smpi_mpi_comm_rank_self(smpi_mpi_communicator_t *comm) +{ + return smpi_mpi_comm_rank(comm, SIMIX_host_self()); +} + +//int smpi_mpi_comm_world_rank_self() +//{ +// return smpi_mpi_comm_rank(smpi_mpi_global->mpi_comm_world, SIMIX_host_self()); +//} + +int smpi_sender(int argc, char **argv) +{ + smx_process_t self; + smx_host_t shost; + int rank; + + xbt_fifo_t request_queue; + smx_mutex_t request_queue_mutex; + int size; + + int running_hosts_count; + + smpi_mpi_request_t *request; + + smx_host_t dhost; + + smx_action_t communicate_action; + + smpi_received_message_t *message; + + int drank; + + smx_process_t receiver_process; + + self = SIMIX_process_self(); + shost = SIMIX_host_self(); + rank = smpi_mpi_comm_rank(smpi_mpi_global->mpi_comm_world, shost); + + // make sure root is done before own initialization + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + if (!smpi_global->root_ready) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + request_queue = smpi_global->pending_send_request_queues[rank]; + request_queue_mutex = smpi_global->pending_send_request_queues_mutexes[rank]; + size = smpi_mpi_comm_size(smpi_mpi_global->mpi_comm_world); + + smpi_global->sender_processes[rank] = self; + + // wait for all nodes to signal initializatin complete + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count++; + if (smpi_global->ready_process_count < 3 * size) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } else { + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + do { + + SIMIX_mutex_lock(request_queue_mutex); + request = xbt_fifo_shift(request_queue); + SIMIX_mutex_unlock(request_queue_mutex); + + if (NULL == request) { + SIMIX_process_suspend(self); + } else { + + SIMIX_mutex_lock(request->mutex); + + // copy request to appropriate received queue + message = xbt_mallocator_get(smpi_global->message_mallocator); + message->comm = request->comm; + message->src = request->src; + message->dst = request->dst; + message->tag = request->tag; + message->buf = xbt_malloc(request->datatype->size * request->count); + memcpy(message->buf, request->buf, request->datatype->size * request->count); + + dhost = request->comm->hosts[request->dst]; + drank = smpi_mpi_comm_rank(smpi_mpi_global->mpi_comm_world, dhost); + + SIMIX_mutex_lock(smpi_global->received_message_queues_mutexes[drank]); + xbt_fifo_push(smpi_global->received_message_queues[drank], message); + SIMIX_mutex_unlock(smpi_global->received_message_queues_mutexes[drank]); + + request->completed = 1; + + communicate_action = SIMIX_action_communicate(shost, dhost, + NULL, request->datatype->size * request->count * 1.0, -1.0); + + SIMIX_register_condition_to_action(communicate_action, request->cond); + SIMIX_register_action_to_condition(communicate_action, request->cond); + + SIMIX_cond_wait(request->cond, request->mutex); + + SIMIX_mutex_unlock(request->mutex); + + // wake up receiver if necessary + receiver_process = smpi_global->receiver_processes[drank]; + + if (SIMIX_process_is_suspended(receiver_process)) { + SIMIX_process_resume(receiver_process); + } + + } + + SIMIX_mutex_lock(smpi_global->running_hosts_count_mutex); + running_hosts_count = smpi_global->running_hosts_count; + SIMIX_mutex_unlock(smpi_global->running_hosts_count_mutex); + + } while (0 < running_hosts_count); + + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count--; + if (smpi_global->ready_process_count == 0) { + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + } else if (smpi_global->ready_process_count < 0) { + // FIXME: can't happen! abort! + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + return 0; +} + +int smpi_receiver(int argc, char **argv) +{ + smx_process_t self; + int rank; + + xbt_fifo_t request_queue; + smx_mutex_t request_queue_mutex; + xbt_fifo_t message_queue; + smx_mutex_t message_queue_mutex; + int size; + + int running_hosts_count; + + smpi_mpi_request_t *request; + smpi_received_message_t *message; + + xbt_fifo_item_t request_item; + xbt_fifo_item_t message_item; + + self = SIMIX_process_self(); + rank = smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world); + + // make sure root is done before own initialization + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + if (!smpi_global->root_ready) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + request_queue = smpi_global->pending_recv_request_queues[rank]; + request_queue_mutex = smpi_global->pending_recv_request_queues_mutexes[rank]; + message_queue = smpi_global->received_message_queues[rank]; + message_queue_mutex = smpi_global->received_message_queues_mutexes[rank]; + size = smpi_mpi_comm_size(smpi_mpi_global->mpi_comm_world); + + smpi_global->receiver_processes[rank] = self; + + // wait for all nodes to signal initializatin complete + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count++; + if (smpi_global->ready_process_count < 3 * size) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } else { + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + do { + request = NULL; + message = NULL; + + // FIXME: better algorithm, maybe some kind of balanced tree? or a heap? + + // FIXME: not the best way to request multiple locks... + SIMIX_mutex_lock(request_queue_mutex); + SIMIX_mutex_lock(message_queue_mutex); + for (request_item = xbt_fifo_get_first_item(request_queue); + NULL != request_item; + request_item = xbt_fifo_get_next_item(request_item)) { + request = xbt_fifo_get_item_content(request_item); + for (message_item = xbt_fifo_get_first_item(message_queue); + NULL != message_item; + message_item = xbt_fifo_get_next_item(message_item)) { + message = xbt_fifo_get_item_content(message_item); + if (request->comm == message->comm && + (MPI_ANY_SOURCE == request->src || request->src == message->src) && + request->tag == message->tag) { + xbt_fifo_remove_item(request_queue, request_item); + xbt_fifo_remove_item(message_queue, message_item); + goto stopsearch; + } + } + } +stopsearch: + SIMIX_mutex_unlock(message_queue_mutex); + SIMIX_mutex_unlock(request_queue_mutex); + + if (NULL == request || NULL == message) { + SIMIX_process_suspend(self); + } else { + SIMIX_mutex_lock(request->mutex); + + memcpy(request->buf, message->buf, request->datatype->size * request->count); + request->src = message->src; + request->completed = 1; + SIMIX_cond_broadcast(request->cond); + + SIMIX_mutex_unlock(request->mutex); + + xbt_free(message->buf); + xbt_mallocator_release(smpi_global->message_mallocator, message); + } + + SIMIX_mutex_lock(smpi_global->running_hosts_count_mutex); + running_hosts_count = smpi_global->running_hosts_count; + SIMIX_mutex_unlock(smpi_global->running_hosts_count_mutex); + + } while (0 < running_hosts_count); + + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count--; + if (smpi_global->ready_process_count == 0) { + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + } else if (smpi_global->ready_process_count < 0) { + // FIXME: can't happen, abort! + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + return 0; +} + +void *smpi_request_new() +{ + smpi_mpi_request_t *request = xbt_new(smpi_mpi_request_t, 1); + + request->completed = 0; + request->mutex = SIMIX_mutex_init(); + request->cond = SIMIX_cond_init(); + + return request; +} + +void smpi_request_free(void *pointer) +{ + + smpi_mpi_request_t *request = pointer; + + if (NULL != request) { + SIMIX_cond_destroy(request->cond); + SIMIX_mutex_destroy(request->mutex); + xbt_free(request); + } + + return; +} + +void smpi_request_reset(void *pointer) +{ + return; +} + + +void *smpi_message_new() +{ + return xbt_new(smpi_received_message_t, 1); +} + +void smpi_message_free(void *pointer) +{ + if (NULL != pointer) { + xbt_free(pointer); + } + + return; +} + +void smpi_message_reset(void *pointer) +{ + return; +} + +void smpi_global_init() +{ + int i; + + int size = SIMIX_host_get_number(); + + smpi_global = xbt_new(s_SMPI_Global_t, 1); + + // config variable + smpi_global->reference_speed = SMPI_DEFAULT_SPEED; + + smpi_global->root_ready = 0; + smpi_global->ready_process_count = 0; + + // start/stop + smpi_global->start_stop_mutex = SIMIX_mutex_init(); + smpi_global->start_stop_cond = SIMIX_cond_init(); + + // processes + smpi_global->sender_processes = xbt_new(smx_process_t, size); + smpi_global->receiver_processes = xbt_new(smx_process_t, size); + + // running hosts + smpi_global->running_hosts_count_mutex = SIMIX_mutex_init(); + smpi_global->running_hosts_count = 0; + + // mallocators + smpi_global->request_mallocator = xbt_mallocator_new(SMPI_REQUEST_MALLOCATOR_SIZE, + smpi_request_new, smpi_request_free, smpi_request_reset); + smpi_global->message_mallocator = xbt_mallocator_new(SMPI_MESSAGE_MALLOCATOR_SIZE, + smpi_message_new, smpi_message_free, smpi_message_reset); + + // + smpi_global->pending_send_request_queues = xbt_new(xbt_fifo_t, size); + smpi_global->pending_send_request_queues_mutexes = xbt_new(smx_mutex_t, size); + smpi_global->pending_recv_request_queues = xbt_new(xbt_fifo_t, size); + smpi_global->pending_recv_request_queues_mutexes = xbt_new(smx_mutex_t, size); + smpi_global->received_message_queues = xbt_new(xbt_fifo_t, size); + smpi_global->received_message_queues_mutexes = xbt_new(smx_mutex_t, size); + smpi_global->timers = xbt_new(xbt_os_timer_t, size); + smpi_global->timers_mutexes = xbt_new(smx_mutex_t, size); + + for(i = 0; i < size; i++) { + smpi_global->pending_send_request_queues[i] = xbt_fifo_new(); + smpi_global->pending_send_request_queues_mutexes[i] = SIMIX_mutex_init(); + smpi_global->pending_recv_request_queues[i] = xbt_fifo_new(); + smpi_global->pending_recv_request_queues_mutexes[i] = SIMIX_mutex_init(); + smpi_global->received_message_queues[i] = xbt_fifo_new(); + smpi_global->received_message_queues_mutexes[i] = SIMIX_mutex_init(); + smpi_global->timers[i] = xbt_os_timer_new(); + smpi_global->timers_mutexes[i] = SIMIX_mutex_init(); + } + +} + +void smpi_global_destroy() +{ + int i; + + int size = SIMIX_host_get_number(); + + // start/stop + SIMIX_mutex_destroy(smpi_global->start_stop_mutex); + SIMIX_cond_destroy(smpi_global->start_stop_cond); + + // processes + xbt_free(smpi_global->sender_processes); + xbt_free(smpi_global->receiver_processes); + + // running hosts + SIMIX_mutex_destroy(smpi_global->running_hosts_count_mutex); + + // mallocators + xbt_mallocator_free(smpi_global->request_mallocator); + xbt_mallocator_free(smpi_global->message_mallocator); + + for(i = 0; i < size; i++) { + xbt_fifo_free(smpi_global->pending_send_request_queues[i]); + SIMIX_mutex_destroy(smpi_global->pending_send_request_queues_mutexes[i]); + xbt_fifo_free(smpi_global->pending_recv_request_queues[i]); + SIMIX_mutex_destroy(smpi_global->pending_recv_request_queues_mutexes[i]); + xbt_fifo_free(smpi_global->received_message_queues[i]); + SIMIX_mutex_destroy(smpi_global->received_message_queues_mutexes[i]); + xbt_os_timer_free(smpi_global->timers[i]); + SIMIX_mutex_destroy(smpi_global->timers_mutexes[i]); + } + + xbt_free(smpi_global->pending_send_request_queues); + xbt_free(smpi_global->pending_send_request_queues_mutexes); + xbt_free(smpi_global->pending_recv_request_queues); + xbt_free(smpi_global->pending_recv_request_queues_mutexes); + xbt_free(smpi_global->received_message_queues); + xbt_free(smpi_global->received_message_queues_mutexes); + xbt_free(smpi_global->timers); + xbt_free(smpi_global->timers_mutexes); + + xbt_free(smpi_global); +} + +int smpi_run_simulation(int argc, char **argv) +{ + smx_cond_t cond = NULL; + smx_action_t action = NULL; + + xbt_fifo_t actions_failed = xbt_fifo_new(); + xbt_fifo_t actions_done = xbt_fifo_new(); + + srand(SMPI_RAND_SEED); + + SIMIX_global_init(&argc, argv); + + SIMIX_function_register("smpi_simulated_main", smpi_simulated_main); + SIMIX_function_register("smpi_sender", smpi_sender); + SIMIX_function_register("smpi_receiver", smpi_receiver); + + // FIXME: ought to verify these files... + SIMIX_create_environment(argv[1]); + + // must initialize globals between creating environment and launching app.... + smpi_global_init(); + + SIMIX_launch_application(argv[2]); + + /* Prepare to display some more info when dying on Ctrl-C pressing */ + // FIXME: doesn't work + //signal(SIGINT, inthandler); + + /* Clean IO before the run */ + fflush(stdout); + fflush(stderr); + + while (SIMIX_solve(actions_done, actions_failed) != -1.0) { + while ((action = xbt_fifo_pop(actions_failed))) { + DEBUG1("** %s failed **", action->name); + while ((cond = xbt_fifo_pop(action->cond_list))) { + SIMIX_cond_broadcast(cond); + } + SIMIX_action_destroy(action); + } + while ((action = xbt_fifo_pop(actions_done))) { + DEBUG1("** %s done **",action->name); + while ((cond = xbt_fifo_pop(action->cond_list))) { + SIMIX_cond_broadcast(cond); + } + SIMIX_action_destroy(action); + } + } + + xbt_fifo_free(actions_failed); + xbt_fifo_free(actions_done); + + INFO1("simulation time %g", SIMIX_get_clock()); + + smpi_global_destroy(); + + SIMIX_clean(); + + return 0; +} + +void smpi_mpi_land_func(void *x, void *y, void *z) +{ + *(int *)z = *(int *)x && *(int *)y; +} + +void smpi_mpi_sum_func(void *x, void *y, void *z) +{ + *(int *)z = *(int *)x + *(int *)y; +} + + +void smpi_mpi_init() +{ + smx_process_t process; + smx_host_t host; + smx_host_t *hosts; + int size; + + SIMIX_mutex_lock(smpi_global->running_hosts_count_mutex); + smpi_global->running_hosts_count++; + SIMIX_mutex_unlock(smpi_global->running_hosts_count_mutex); + + // initialize some local variables + process = SIMIX_process_self(); + host = SIMIX_host_self(); + hosts = SIMIX_host_get_table(); + size = SIMIX_host_get_number(); + + // node 0 sets the globals + if (host == hosts[0]) { + + smpi_mpi_global = xbt_new(s_SMPI_MPI_Global_t, 1); + + // global communicator + smpi_mpi_global->mpi_comm_world = xbt_new(smpi_mpi_communicator_t, 1); + smpi_mpi_global->mpi_comm_world->size = size; + smpi_mpi_global->mpi_comm_world->barrier_count = 0; + smpi_mpi_global->mpi_comm_world->barrier_mutex = SIMIX_mutex_init(); + smpi_mpi_global->mpi_comm_world->barrier_cond = SIMIX_cond_init(); + smpi_mpi_global->mpi_comm_world->hosts = hosts; + smpi_mpi_global->mpi_comm_world->processes = xbt_new(smx_process_t, size); + smpi_mpi_global->mpi_comm_world->processes[0] = process; + + // mpi datatypes + smpi_mpi_global->mpi_byte = xbt_new(smpi_mpi_datatype_t, 1); + smpi_mpi_global->mpi_byte->size = (size_t)1; + smpi_mpi_global->mpi_int = xbt_new(smpi_mpi_datatype_t, 1); + smpi_mpi_global->mpi_int->size = sizeof(int); + smpi_mpi_global->mpi_double = xbt_new(smpi_mpi_datatype_t, 1); + smpi_mpi_global->mpi_double->size = sizeof(double); + + // mpi operations + smpi_mpi_global->mpi_land = xbt_new(smpi_mpi_op_t, 1); + smpi_mpi_global->mpi_land->func = smpi_mpi_land_func; + smpi_mpi_global->mpi_sum = xbt_new(smpi_mpi_op_t, 1); + smpi_mpi_global->mpi_sum->func = smpi_mpi_sum_func; + + // signal all nodes to perform initialization + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->root_ready = 1; + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + } else { + + // make sure root is done before own initialization + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + if (!smpi_global->root_ready) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + smpi_mpi_global->mpi_comm_world->processes[smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world)] = process; + } + + // wait for all nodes to signal initializatin complete + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count++; + if (smpi_global->ready_process_count < 3 * size) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } else { + SIMIX_cond_broadcast(smpi_global->start_stop_cond); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + return; +} + +void smpi_mpi_finalize() +{ + int i; + + SIMIX_mutex_lock(smpi_global->running_hosts_count_mutex); + i = --smpi_global->running_hosts_count; + SIMIX_mutex_unlock(smpi_global->running_hosts_count_mutex); + + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + smpi_global->ready_process_count--; + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + if (0 >= i) { + + // wake up senders/receivers + for (i = 0; i < smpi_mpi_global->mpi_comm_world->size; i++) { + if (SIMIX_process_is_suspended(smpi_global->sender_processes[i])) { + SIMIX_process_resume(smpi_global->sender_processes[i]); + } + if (SIMIX_process_is_suspended(smpi_global->receiver_processes[i])) { + SIMIX_process_resume(smpi_global->receiver_processes[i]); + } + } + + // wait for senders/receivers to exit... + SIMIX_mutex_lock(smpi_global->start_stop_mutex); + if (smpi_global->ready_process_count > 0) { + SIMIX_cond_wait(smpi_global->start_stop_cond, smpi_global->start_stop_mutex); + } + SIMIX_mutex_unlock(smpi_global->start_stop_mutex); + + SIMIX_mutex_destroy(smpi_mpi_global->mpi_comm_world->barrier_mutex); + SIMIX_cond_destroy(smpi_mpi_global->mpi_comm_world->barrier_cond); + xbt_free(smpi_mpi_global->mpi_comm_world->processes); + xbt_free(smpi_mpi_global->mpi_comm_world); + + xbt_free(smpi_mpi_global->mpi_byte); + xbt_free(smpi_mpi_global->mpi_int); + xbt_free(smpi_mpi_global->mpi_double); + + xbt_free(smpi_mpi_global->mpi_land); + xbt_free(smpi_mpi_global->mpi_sum); + + xbt_free(smpi_mpi_global); + } + +} + +// FIXME: could cause trouble with multithreaded procs on same host... +void smpi_bench_begin() +{ + int rank = smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world); + SIMIX_mutex_lock(smpi_global->timers_mutexes[rank]); + xbt_os_timer_start(smpi_global->timers[rank]); + return; +} + +void smpi_bench_end() +{ + int rank = smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world); + double duration; + smx_host_t host; + smx_action_t compute_action; + smx_mutex_t mutex; + smx_cond_t cond; + + xbt_os_timer_stop(smpi_global->timers[rank]); + + duration = xbt_os_timer_elapsed(smpi_global->timers[rank]); + SIMIX_mutex_unlock(smpi_global->timers_mutexes[rank]); + + host = smpi_mpi_global->mpi_comm_world->hosts[rank]; + compute_action = SIMIX_action_execute(host, NULL, duration * SMPI_DEFAULT_SPEED); + mutex = SIMIX_mutex_init(); + cond = SIMIX_cond_init(); + + SIMIX_register_condition_to_action(compute_action, cond); + SIMIX_register_action_to_condition(compute_action, cond); + SIMIX_mutex_lock(mutex); + SIMIX_cond_wait(cond, mutex); + SIMIX_mutex_unlock(mutex); + + SIMIX_mutex_destroy(mutex); + SIMIX_cond_destroy(cond); + + // FIXME: check for success/failure? + + return; +} + +void smpi_barrier(smpi_mpi_communicator_t *comm) +{ + + SIMIX_mutex_lock(comm->barrier_mutex); + if(++comm->barrier_count < comm->size) { + SIMIX_cond_wait(comm->barrier_cond, comm->barrier_mutex); + } else { + comm->barrier_count = 0; + SIMIX_cond_broadcast(comm->barrier_cond); + } + SIMIX_mutex_unlock(comm->barrier_mutex); + + return; +} + +// FIXME: smarter algorithm... +int smpi_comm_rank(smpi_mpi_communicator_t *comm, smx_host_t host) +{ + int i; + for(i = 0; i < comm->size && host != comm->hosts[i]; i++); + if (i >= comm->size) i = -1; + return i; +} + +int smpi_create_request(void *buf, int count, smpi_mpi_datatype_t *datatype, + int src, int dst, int tag, smpi_mpi_communicator_t *comm, smpi_mpi_request_t **request) +{ + int retval = MPI_SUCCESS; + + *request = NULL; + + if (0 > count) { + retval = MPI_ERR_COUNT; + } else if (NULL == buf) { + retval = MPI_ERR_INTERN; + } else if (NULL == datatype) { + retval = MPI_ERR_TYPE; + } else if (NULL == comm) { + retval = MPI_ERR_COMM; + } else if (MPI_ANY_SOURCE != src && (0 > src || comm->size <= src)) { + retval = MPI_ERR_RANK; + } else if (0 > dst || comm->size <= dst) { + retval = MPI_ERR_RANK; + } else if (0 > tag) { + retval = MPI_ERR_TAG; + } else { + *request = xbt_mallocator_get(smpi_global->request_mallocator); + (*request)->comm = comm; + (*request)->src = src; + (*request)->dst = dst; + (*request)->tag = tag; + (*request)->buf = buf; + (*request)->count = count; + (*request)->datatype = datatype; + } + return retval; +} + +int smpi_isend(smpi_mpi_request_t *request) +{ + int retval = MPI_SUCCESS; + int rank = smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world); + + if (NULL != request) { + SIMIX_mutex_lock(smpi_global->pending_send_request_queues_mutexes[rank]); + xbt_fifo_push(smpi_global->pending_send_request_queues[rank], request); + SIMIX_mutex_unlock(smpi_global->pending_send_request_queues_mutexes[rank]); + } + + if (SIMIX_process_is_suspended(smpi_global->sender_processes[rank])) { + SIMIX_process_resume(smpi_global->sender_processes[rank]); + } + + return retval; +} + +int smpi_irecv(smpi_mpi_request_t *request) +{ + int retval = MPI_SUCCESS; + int rank = smpi_mpi_comm_rank_self(smpi_mpi_global->mpi_comm_world); + + if (NULL != request) { + SIMIX_mutex_lock(smpi_global->pending_recv_request_queues_mutexes[rank]); + xbt_fifo_push(smpi_global->pending_recv_request_queues[rank], request); + SIMIX_mutex_unlock(smpi_global->pending_recv_request_queues_mutexes[rank]); + } + + if (SIMIX_process_is_suspended(smpi_global->receiver_processes[rank])) { + SIMIX_process_resume(smpi_global->receiver_processes[rank]); + } + + return retval; +} + +void smpi_wait(smpi_mpi_request_t *request, smpi_mpi_status_t *status) +{ + if (NULL != request) { + SIMIX_mutex_lock(request->mutex); + if (!request->completed) { + SIMIX_cond_wait(request->cond, request->mutex); + } + if (NULL != status) { + status->MPI_SOURCE = request->src; + } + SIMIX_mutex_unlock(request->mutex); + } +} + +// FIXME: move into own file +int smpi_gettimeofday(struct timeval *tv, struct timezone *tz) +{ + double now; + int retval = 0; + smpi_bench_end(); + if (NULL == tv) { + retval = -1; + } else { + now = SIMIX_get_clock(); + tv->tv_sec = now; + tv->tv_usec = ((now - (double)tv->tv_sec) * 1000000.0); + } + smpi_bench_begin(); + return retval; +} + +unsigned int smpi_sleep(unsigned int seconds) +{ + smx_mutex_t mutex; + smx_cond_t cond; + smx_host_t host; + smx_action_t sleep_action; + + smpi_bench_end(); + host = SIMIX_host_self(); + sleep_action = SIMIX_action_sleep(host, seconds); + mutex = SIMIX_mutex_init(); + cond = SIMIX_cond_init(); + + SIMIX_register_condition_to_action(sleep_action, cond); + SIMIX_register_action_to_condition(sleep_action, cond); + SIMIX_mutex_lock(mutex); + SIMIX_cond_wait(cond, mutex); + SIMIX_mutex_unlock(mutex); + + SIMIX_mutex_destroy(mutex); + SIMIX_cond_destroy(cond); + + // FIXME: check for success/failure? + + smpi_bench_begin(); + return 0; +} + +void smpi_exit(int status) +{ + smpi_bench_end(); + SIMIX_mutex_lock(smpi_global->running_hosts_count_mutex); + smpi_global->running_hosts_count--; + SIMIX_mutex_unlock(smpi_global->running_hosts_count_mutex); + SIMIX_process_kill(SIMIX_process_self()); + return; +} diff --git a/src/smpi/smpi_mpi.c b/src/smpi/smpi_mpi.c new file mode 100644 index 0000000000..432e57f283 --- /dev/null +++ b/src/smpi/smpi_mpi.c @@ -0,0 +1,155 @@ +#include +#include +#include "private.h" + +int MPI_Init(int *argc, char ***argv) +{ + smpi_mpi_init(); + smpi_bench_begin(); + return MPI_SUCCESS; +} + +int MPI_Finalize() +{ + smpi_bench_end(); + smpi_mpi_finalize(); + return MPI_SUCCESS; +} + +// right now this just exits the current node, should send abort signal to all +// hosts in the communicator; +int MPI_Abort(MPI_Comm comm, int errorcode) +{ + smpi_exit(errorcode); + + return 0; +} + +int MPI_Comm_size(MPI_Comm comm, int *size) +{ + int retval = MPI_SUCCESS; + + smpi_bench_end(); + + if (NULL == comm) { + retval = MPI_ERR_COMM; + } else if (NULL == size) { + retval = MPI_ERR_ARG; + } else { + *size = comm->size; + } + + smpi_bench_begin(); + + return retval; +} + +int MPI_Comm_rank(MPI_Comm comm, int *rank) +{ + int retval = MPI_SUCCESS; + + smpi_bench_end(); + + if (NULL == comm) { + retval = MPI_ERR_COMM; + } else if (NULL == rank) { + retval = MPI_ERR_ARG; + } else { + *rank = smpi_comm_rank(comm, SIMIX_host_self()); + } + + smpi_bench_begin(); + + return retval; +} + +int MPI_Type_size(MPI_Datatype datatype, size_t *size) +{ + int retval = MPI_SUCCESS; + + smpi_bench_end(); + + if (NULL == datatype) { + retval = MPI_ERR_TYPE; + } else if (NULL == size) { + retval = MPI_ERR_ARG; + } else { + *size = datatype->size; + } + + smpi_bench_begin(); + + return retval; +} + +int MPI_Barrier(MPI_Comm comm) +{ + smpi_bench_end(); + smpi_barrier(comm); + smpi_bench_begin(); + return MPI_SUCCESS; +} + +int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Request *request) +{ + int retval = MPI_SUCCESS; + int dst; + smpi_bench_end(); + dst = smpi_mpi_comm_rank_self(comm); + retval = smpi_create_request(buf, count, datatype, src, dst, tag, comm, request); + if (NULL != *request) { + smpi_irecv(*request); + } + smpi_bench_begin(); + return retval; +} + +int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status *status) +{ + int retval = MPI_SUCCESS; + int dst; + smpi_mpi_request_t *request; + smpi_bench_end(); + dst = smpi_mpi_comm_rank_self(comm); + retval = smpi_create_request(buf, count, datatype, src, dst, tag, comm, &request); + if (NULL != request) { + smpi_irecv(request); + smpi_wait(request, status); + // FIXME: mallocator + //xbt_free(request); + } + smpi_bench_begin(); + return retval; +} + +int MPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm, MPI_Request *request) +{ + int retval = MPI_SUCCESS; + int src; + smpi_bench_end(); + src = smpi_mpi_comm_rank_self(comm); + retval = smpi_create_request(buf, count, datatype, src, dst, tag, comm, request); + if (NULL != *request) { + smpi_isend(*request); + } + smpi_bench_begin(); + return retval; +} + +int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) +{ + int retval = MPI_SUCCESS; + int src; + smpi_mpi_request_t *request; + smpi_bench_end(); + src = smpi_mpi_comm_rank_self(comm); + retval = smpi_create_request(buf, count, datatype, src, dst, tag, comm, &request); + if (NULL != request) { + smpi_isend(request); + smpi_wait(request, MPI_STATUS_IGNORE); + // FIXME: mallocator + //xbt_free(request) + } + smpi_bench_begin(); + return retval; +} -- 2.20.1