Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
SMPI: Use the amount of processes to malloc the process list, not the amount of hosts
[simgrid.git] / src / smpi / smpi_base.c
1 #include "private.h"
2 #include "xbt/time.h"
3
4 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_base, smpi,
5                                 "Logging specific to SMPI (base)");
6 XBT_LOG_EXTERNAL_CATEGORY(smpi_base);
7 XBT_LOG_EXTERNAL_CATEGORY(smpi_bench);
8 XBT_LOG_EXTERNAL_CATEGORY(smpi_kernel);
9 XBT_LOG_EXTERNAL_CATEGORY(smpi_mpi);
10 XBT_LOG_EXTERNAL_CATEGORY(smpi_receiver);
11 XBT_LOG_EXTERNAL_CATEGORY(smpi_sender);
12 XBT_LOG_EXTERNAL_CATEGORY(smpi_util);
13
14 smpi_mpi_global_t smpi_mpi_global = NULL;
15
16 void smpi_mpi_land_func(void *a, void *b, int *length,
17                         MPI_Datatype * datatype);
18
19 void smpi_mpi_land_func(void *a, void *b, int *length,
20                         MPI_Datatype * datatype)
21 {
22   int i;
23   if (*datatype == smpi_mpi_global->mpi_int) {
24     int *x = a, *y = b;
25     for (i = 0; i < *length; i++) {
26       y[i] = x[i] && y[i];
27     }
28   }
29 }
30
31 void smpi_mpi_sum_func(void *a, void *b, int *length,
32                        MPI_Datatype * datatype);
33
34 void smpi_mpi_sum_func(void *a, void *b, int *length, MPI_Datatype * datatype)
35 {
36   int i;
37   if (*datatype == smpi_mpi_global->mpi_int) {
38     int *x = a, *y = b;
39     for (i = 0; i < *length; i++) {
40       y[i] = x[i] + y[i];
41     }
42   }
43 }
44
45 int smpi_mpi_comm_rank(smpi_mpi_communicator_t comm)
46 {
47   return comm->index_to_rank_map[smpi_process_index()];
48 }
49
50 void smpi_process_init(int *argc, char***argv)
51 {
52   smpi_process_data_t pdata;
53
54   // initialize some local variables
55
56   pdata = xbt_new(s_smpi_process_data_t, 1);
57   SIMIX_process_set_data(SIMIX_process_self(),pdata);
58
59   /* get rank from command line, and remove it from argv */
60   pdata->index = atoi( (*argv)[1] );
61   DEBUG1("I'm rank %d",pdata->index);
62   if (*argc>2) {
63           memmove((*argv)[1],(*argv)[2], sizeof(char*)* (*argc-2));
64           (*argv)[ (*argc)-1] = NULL;
65   }
66   (*argc)--;
67
68   pdata->mutex = SIMIX_mutex_init();
69   pdata->cond = SIMIX_cond_init();
70   pdata->finalize = 0;
71
72   pdata->pending_recv_request_queue = xbt_fifo_new();
73   pdata->pending_send_request_queue = xbt_fifo_new();
74   pdata->received_message_queue = xbt_fifo_new();
75
76   pdata->main = SIMIX_process_self();
77   pdata->sender = SIMIX_process_create("smpi_sender",
78           smpi_sender, pdata,
79           SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
80           /*props */ NULL);
81   pdata->receiver = SIMIX_process_create("smpi_receiver",
82           smpi_receiver, pdata,
83           SIMIX_host_get_name(SIMIX_host_self()), 0, NULL,
84           /*props */ NULL);
85
86   smpi_global->main_processes[pdata->index] = SIMIX_process_self();
87   return;
88 }
89
90 void smpi_process_finalize()
91 {
92   smpi_process_data_t pdata =  SIMIX_process_get_data(SIMIX_process_self());
93
94   pdata->finalize = 2; /* Tell sender and receiver to quit */
95   SIMIX_process_resume(pdata->sender);
96   SIMIX_process_resume(pdata->receiver);
97   while (pdata->finalize>0) { /* wait until it's done */
98           SIMIX_cond_wait(pdata->cond,pdata->mutex);
99   }
100
101   SIMIX_mutex_destroy(pdata->mutex);
102   SIMIX_cond_destroy(pdata->cond);
103   xbt_fifo_free(pdata->pending_recv_request_queue);
104   xbt_fifo_free(pdata->pending_send_request_queue);
105   xbt_fifo_free(pdata->received_message_queue);
106 }
107
108 int smpi_mpi_barrier(smpi_mpi_communicator_t comm)
109 {
110
111   SIMIX_mutex_lock(comm->barrier_mutex);
112   ++comm->barrier_count;
113   if (comm->barrier_count > comm->size) {       // only happens on second barrier...
114     comm->barrier_count = 0;
115   } else if (comm->barrier_count == comm->size) {
116     SIMIX_cond_broadcast(comm->barrier_cond);
117   }
118   while (comm->barrier_count < comm->size) {
119     SIMIX_cond_wait(comm->barrier_cond, comm->barrier_mutex);
120   }
121   SIMIX_mutex_unlock(comm->barrier_mutex);
122
123   return MPI_SUCCESS;
124 }
125
126 int smpi_mpi_isend(smpi_mpi_request_t request)
127 {
128         smpi_process_data_t pdata =  SIMIX_process_get_data(SIMIX_process_self());
129   int retval = MPI_SUCCESS;
130
131   if (NULL == request) {
132     retval = MPI_ERR_INTERN;
133   } else {
134     xbt_fifo_push(pdata->pending_send_request_queue, request);
135     SIMIX_process_resume(pdata->sender);
136   }
137
138   return retval;
139 }
140
141 int smpi_mpi_irecv(smpi_mpi_request_t request)
142 {
143   int retval = MPI_SUCCESS;
144   smpi_process_data_t pdata =  SIMIX_process_get_data(SIMIX_process_self());
145
146   if (NULL == request) {
147     retval = MPI_ERR_INTERN;
148   } else {
149     xbt_fifo_push(pdata->pending_recv_request_queue, request);
150
151     if (SIMIX_process_is_suspended(pdata->receiver)) {
152       SIMIX_process_resume(pdata->receiver);
153     }
154   }
155
156   return retval;
157 }
158
159 int smpi_mpi_wait(smpi_mpi_request_t request, smpi_mpi_status_t * status)
160 {
161   int retval = MPI_SUCCESS;
162
163   if (NULL == request) {
164     retval = MPI_ERR_INTERN;
165   } else {
166     SIMIX_mutex_lock(request->mutex);
167     while (!request->completed) {
168       SIMIX_cond_wait(request->cond, request->mutex);
169     }
170     if (NULL != status) {
171       status->MPI_SOURCE = request->src;
172       status->MPI_TAG = request->tag;
173       status->MPI_ERROR = MPI_SUCCESS;
174     }
175     SIMIX_mutex_unlock(request->mutex);
176   }
177
178   return retval;
179 }