} else return 0;
}
+
+typedef struct s_smpi_factor *smpi_factor_t;
+typedef struct s_smpi_factor {
+ long factor;
+ int nb_values;
+ double values[4];//arbitrary set to 4
+} s_smpi_factor_t;
+xbt_dynar_t smpi_os_values = NULL;
+xbt_dynar_t smpi_or_values = NULL;
+xbt_dynar_t smpi_ois_values = NULL;
+
+// Methods used to parse and store the values for timing injections in smpi
+// These are taken from surf/network.c and generalized to have more factors
+// These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
+
+static int factor_cmp(const void *pa, const void *pb)
+{
+ return (((s_smpi_factor_t*)pa)->factor > ((s_smpi_factor_t*)pb)->factor);
+}
+
+
+static xbt_dynar_t parse_factor(const char *smpi_coef_string)
+{
+ char *value = NULL;
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ int i=0;
+ xbt_dynar_t smpi_factor, radical_elements, radical_elements2 = NULL;
+
+ smpi_factor = xbt_dynar_new(sizeof(s_smpi_factor_t), NULL);
+ radical_elements = xbt_str_split(smpi_coef_string, ";");
+ xbt_dynar_foreach(radical_elements, iter, value) {
+ fact.nb_values=0;
+ radical_elements2 = xbt_str_split(value, ":");
+ if (xbt_dynar_length(radical_elements2) <2 || xbt_dynar_length(radical_elements2) > 5)
+ xbt_die("Malformed radical for smpi factor!");
+ for(i =0; i<xbt_dynar_length(radical_elements2);i++ ){
+ if (i==0){
+ fact.factor = atol(xbt_dynar_get_as(radical_elements2, i, char *));
+ }else{
+ fact.values[fact.nb_values] = atof(xbt_dynar_get_as(radical_elements2, i, char *));
+ fact.nb_values++;
+ }
+ }
+
+ xbt_dynar_push_as(smpi_factor, s_smpi_factor_t, fact);
+ XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
+ xbt_dynar_free(&radical_elements2);
+ }
+ xbt_dynar_free(&radical_elements);
+ iter=0;
+ xbt_dynar_sort(smpi_factor, &factor_cmp);
+ xbt_dynar_foreach(smpi_factor, iter, fact) {
+ XBT_DEBUG("smpi_factor:\t%ld : %d values, first: %f", fact.factor, fact.nb_values ,fact.values[0]);
+ }
+ return smpi_factor;
+}
+
+static double smpi_os(double size)
+{
+ if (!smpi_os_values)
+ smpi_os_values =
+ parse_factor(sg_cfg_get_string("smpi/os"));
+
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_os_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("os : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else{
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ }
+ XBT_DEBUG("os : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
+}
+
+static double smpi_ois(double size)
+{
+ if (!smpi_ois_values)
+ smpi_ois_values =
+ parse_factor(sg_cfg_get_string("smpi/ois"));
+
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_ois_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("ois : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else{
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ }
+ XBT_DEBUG("ois : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
+}
+
+static double smpi_or(double size)
+{
+ if (!smpi_or_values)
+ smpi_or_values =
+ parse_factor(sg_cfg_get_string("smpi/or"));
+
+ unsigned int iter = 0;
+ s_smpi_factor_t fact;
+ double current=0.0;
+ xbt_dynar_foreach(smpi_or_values, iter, fact) {
+ if (size <= fact.factor) {
+ XBT_DEBUG("or : %lf <= %ld return %f", size, fact.factor, current);
+ return current;
+ }else
+ current=fact.values[0]+fact.values[1]*size;
+ }
+ XBT_DEBUG("or : %lf > %ld return %f", size, fact.factor, current);
+
+ return current;
+}
+
static MPI_Request build_request(void *buf, int count,
MPI_Datatype datatype, int src, int dst,
int tag, MPI_Comm comm, unsigned flags)
if(datatype->has_subtype == 1){
// This part handles the problem of non-contiguous memory
old_buf = buf;
- buf = malloc(count*smpi_datatype_size(datatype));
+ buf = xbt_malloc(count*smpi_datatype_size(datatype));
if (flags & SEND) {
subtype->serialize(old_buf, buf, count, datatype->substruct);
}
}
-void smpi_empty_status(MPI_Status * status) {
+void smpi_empty_status(MPI_Status * status)
+{
if(status != MPI_STATUS_IGNORE) {
- status->MPI_SOURCE=MPI_ANY_SOURCE;
- status->MPI_TAG=MPI_ANY_TAG;
- status->count=0;
+ status->MPI_SOURCE = MPI_ANY_SOURCE;
+ status->MPI_TAG = MPI_ANY_TAG;
+ status->MPI_ERROR = MPI_SUCCESS;
+ status->count=0;
}
}
return request;
}
+MPI_Request smpi_mpi_ssend_init(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, PERSISTENT | SSEND | SEND);
+ request->refcount++;
+ return request;
+}
+
MPI_Request smpi_mpi_recv_init(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
request->real_size=request->size;
smpi_datatype_use(request->old_type);
request->action = simcall_comm_irecv(mailbox, request->buf, &request->real_size, &match_recv, request);
+
+ //integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
+ double sleeptime = request->detached ? smpi_or(request->size) : 0.0;
+ if(sleeptime!=0.0){
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("receiving size of %zu : sleep %lf ", request->size, smpi_or(request->size));
+ }
+
} else {
int receiver = smpi_group_index(smpi_comm_group(request->comm), request->dst);
XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
mailbox = smpi_process_remote_mailbox(receiver);
}
- if (request->size < 64*1024 ) { //(FIXME: this limit should be configurable)
+ if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
void *oldbuf = NULL;
request->detached = 1;
request->refcount++;
if(request->old_type->has_subtype == 0){
oldbuf = request->buf;
if (oldbuf){
- request->buf = malloc(request->size);
+ request->buf = xbt_malloc(request->size);
memcpy(request->buf,oldbuf,request->size);
}
}
request->real_size=request->size;
smpi_datatype_use(request->old_type);
+ //if we are giving back the control to the user without waiting for completion, we have to inject timings
+ double sleeptime =0.0;
+ if(request->detached || (request->flags & (ISEND|SSEND))){// issend should be treated as isend
+ //isend and send timings may be different
+ sleeptime = (request->flags & ISEND)? smpi_ois(request->size) : smpi_os(request->size);
+ }
+
+ if(sleeptime!=0.0){
+ simcall_process_sleep(sleeptime);
+ XBT_DEBUG("sending size of %zu : sleep %lf ", request->size, smpi_os(request->size));
+ }
+
request->action =
simcall_comm_isend(mailbox, request->size, -1.0,
request->buf, request->real_size,
{
MPI_Request request =
build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
- comm, NON_PERSISTENT | SEND);
+ comm, NON_PERSISTENT | ISEND | SEND);
smpi_mpi_start(request);
return request;
}
+MPI_Request smpi_mpi_issend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, NON_PERSISTENT | ISEND | SSEND | SEND);
+ smpi_mpi_start(request);
+ return request;
+}
+
+
+
MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
void smpi_mpi_send(void *buf, int count, MPI_Datatype datatype, int dst,
int tag, MPI_Comm comm)
{
- MPI_Request request;
- request = smpi_mpi_isend(buf, count, datatype, dst, tag, comm);
+ MPI_Request request =
+ build_request(buf, count, datatype, smpi_comm_rank(comm), dst, tag,
+ comm, NON_PERSISTENT | SEND);
+
+ smpi_mpi_start(request);
smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
}
+void smpi_mpi_ssend(void *buf, int count, MPI_Datatype datatype,
+ int dst, int tag, MPI_Comm comm)
+{
+ MPI_Request request = smpi_mpi_issend(buf, count, datatype, dst, tag, comm);
+ smpi_mpi_wait(&request, MPI_STATUS_IGNORE);
+}
+
void smpi_mpi_sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
int dst, int sendtag, void *recvbuf, int recvcount,
MPI_Datatype recvtype, int src, int recvtag,
smpi_mpi_waitall(2, requests, stats);
if(status != MPI_STATUS_IGNORE) {
// Copy receive status
- memcpy(status, &stats[1], sizeof(MPI_Status));
+ *status = stats[1];
}
}
static void finish_wait(MPI_Request * request, MPI_Status * status)
{
MPI_Request req = *request;
+ if(status != MPI_STATUS_IGNORE)
+ smpi_empty_status(status);
+
if(!(req->detached && req->flags & SEND)){
if(status != MPI_STATUS_IGNORE) {
status->MPI_SOURCE = req->src == MPI_ANY_SOURCE ? req->real_src : req->src;
status->MPI_TAG = req->tag == MPI_ANY_TAG ? req->real_tag : req->tag;
- if(req->truncated)
- status->MPI_ERROR = MPI_ERR_TRUNCATE;
- else status->MPI_ERROR = MPI_SUCCESS ;
+ status->MPI_ERROR = req->truncated ? MPI_ERR_TRUNCATE : MPI_SUCCESS;
// this handles the case were size in receive differs from size in send
// FIXME: really this should just contain the count of receive-type blocks,
// right?
smpi_empty_status(pstat);
}
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
}
return flag;
int index, c;
MPI_Status stat;
MPI_Status *pstat = status == MPI_STATUSES_IGNORE ? MPI_STATUS_IGNORE : &stat;
- int retvalue=MPI_SUCCESS;
+ int retvalue = MPI_SUCCESS;
//tag invalid requests in the set
- for(c = 0; c < count; c++) {
- if(requests[c]==MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE)
+ if (status != MPI_STATUSES_IGNORE) {
+ for (c = 0; c < count; c++) {
+ if (requests[c] == MPI_REQUEST_NULL || requests[c]->dst == MPI_PROC_NULL) {
smpi_empty_status(&status[c]);
- }else if(requests[c]->src == MPI_PROC_NULL ){
- if(status != MPI_STATUSES_IGNORE) {
+ } else if (requests[c]->src == MPI_PROC_NULL) {
smpi_empty_status(&status[c]);
- status[c].MPI_SOURCE=MPI_PROC_NULL;
+ status[c].MPI_SOURCE = MPI_PROC_NULL;
}
}
}
for(c = 0; c < count; c++) {
- if(MC_is_active()) {
- smpi_mpi_wait(&requests[c], pstat);
- index = c;
- } else {
- index = smpi_mpi_waitany(count, requests, pstat);
- if(index == MPI_UNDEFINED) {
- break;
- }
- if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
- if(status[index].MPI_ERROR==MPI_ERR_TRUNCATE)retvalue=MPI_ERR_IN_STATUS;
-
- }
+ if (MC_is_active()) {
+ smpi_mpi_wait(&requests[c], pstat);
+ index = c;
+ } else {
+ index = smpi_mpi_waitany(count, requests, pstat);
+ if (index == MPI_UNDEFINED)
+ break;
+ }
+ if (status != MPI_STATUSES_IGNORE) {
+ status[index] = *pstat;
+ if (status[index].MPI_ERROR == MPI_ERR_TRUNCATE)
+ retvalue = MPI_ERR_IN_STATUS;
}
}
indices[count] = index;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[index], pstat, sizeof(*pstat));
+ status[index] = *pstat;
}
}else{
return MPI_UNDEFINED;
indices[count] = i;
count++;
if(status != MPI_STATUSES_IGNORE) {
- memcpy(&status[i], pstat, sizeof(*pstat));
+ status[i] = *pstat;
}
}
}else{