xbt_dynar_t smpi_or_values = NULL;
xbt_dynar_t smpi_ois_values = NULL;
+double smpi_wtime_sleep = 0.0;
+double smpi_iprobe_sleep = 1e-4;
+double smpi_test_sleep = 1e-4;
+
+
// Methods used to parse and store the values for timing injections in smpi
// These are taken from surf/network.c and generalized to have more factors
// These methods should be merged with those in surf/network.c (moved somewhere in xbt ?)
return current;
}
+double smpi_mpi_wtime(){
+ double time;
+ if (smpi_process_initialized() && !smpi_process_finalized() && !smpi_process_get_sampling()) {
+ smpi_bench_end();
+ time = SIMIX_get_clock();
+ //to avoid deadlocks if called too many times
+ if(smpi_wtime_sleep > 0) simcall_process_sleep(smpi_wtime_sleep);
+ smpi_bench_begin();
+ } else {
+ time = SIMIX_get_clock();
+ }
+ return time;
+}
+
static MPI_Request build_request(void *buf, int count,
MPI_Datatype datatype, int src, int dst,
int tag, MPI_Comm comm, unsigned flags)
s_smpi_subtype_t *subtype = datatype->substruct;
- if(datatype->has_subtype == 1){
+ if(((flags & RECV) && (flags & ACCUMULATE)) || (datatype->has_subtype == 1)){
// This part handles the problem of non-contiguous memory
old_buf = buf;
buf = count==0 ? NULL : xbt_malloc(count*smpi_datatype_size(datatype));
- if (flags & SEND) {
+ if ((datatype->has_subtype == 1) && (flags & SEND)) {
subtype->serialize(old_buf, buf, count, datatype->substruct);
}
}
request->refcount = 1;
else
request->refcount = 0;
-
+ request->op = MPI_REPLACE;
#ifdef HAVE_TRACING
request->send = 0;
request->recv = 0;
action_fp=NULL;
if (path) {
action_fp = fopen(path, "r");
- xbt_assert(action_fp != NULL, "Cannot open %s: %s", path,
- strerror(errno));
+ if (action_fp == NULL)
+ xbt_die("Cannot open %s: %s", path, strerror(errno));
}
if (!xbt_dict_is_empty(action_queues)) {
if (request->flags & RECV) {
print_request("New recv", request);
//FIXME: if receive is posted with a large size, but send is smaller, mailboxes may not match !
- if (request->size < sg_cfg_get_int("smpi/async_small_thres"))
+ if (request->flags & RMA || request->size < sg_cfg_get_int("smpi/async_small_thres"))
mailbox = smpi_process_mailbox_small();
else
mailbox = smpi_process_mailbox();
smpi_comm_use(request->comm);
request->action = simcall_comm_irecv(mailbox, request->buf,
&request->real_size, &match_recv,
+ &smpi_comm_copy_buffer_callback,
request, -1.0);
//integrate pseudo-timing for buffering of small messages, do not bother to execute the simcall if 0
int receiver = request->dst;//smpi_group_index(smpi_comm_group(request->comm), request->dst);
#ifdef HAVE_TRACING
- int rank = smpi_process_index();
+ int rank = request->src;
if (TRACE_smpi_view_internals()) {
TRACE_smpi_send(rank, rank, receiver,request->size);
}
/* return;*/
/* }*/
print_request("New send", request);
- if (request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
+ if (request->flags & RMA || request->size < sg_cfg_get_int("smpi/async_small_thres")) { // eager mode
mailbox = smpi_process_remote_mailbox_small(receiver);
}else{
XBT_DEBUG("Send request %p is not in the permanent receive mailbox (buf: %p)",request,request->buf);
if ( (! (request->flags & SSEND)) && (request->size < sg_cfg_get_int("smpi/send_is_detached_thres"))) {
void *oldbuf = NULL;
request->detached = 1;
+ XBT_DEBUG("Send request %p is detached", request);
request->refcount++;
if(request->old_type->has_subtype == 0){
oldbuf = request->buf;
&& ((char*)request->buf >= start_data_exe)
&& ((char*)request->buf < start_data_exe + size_data_exe )){
XBT_DEBUG("Privatization : We are sending from a zone inside global memory. Switch data segment ");
- switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(request->src);
}
buf = xbt_malloc(request->size);
memcpy(buf,oldbuf,request->size);
+ XBT_DEBUG("buf %p copied into %p",oldbuf,buf);
}
}
- XBT_DEBUG("Send request %p is detached; buf %p copied into %p",request,oldbuf,request->buf);
}
// we make a copy here, as the size is modified by simix, and we may reuse the request in another receive later
simcall_process_sleep(sleeptime);
XBT_DEBUG("sending size of %zu : sleep %f ", request->size, smpi_os(request->size));
}
-
request->action =
- simcall_comm_isend(mailbox, request->size, -1.0,
+ simcall_comm_isend(SIMIX_process_from_PID(request->src+1), mailbox, request->size, -1.0,
buf, request->real_size,
&match_send,
- &xbt_free, // how to free the userdata if a detached send fails
+ &xbt_free_f, // how to free the userdata if a detached send fails
+ &smpi_comm_copy_buffer_callback,
request,
// detach if msg size < eager/rdv switch limit
request->detached);
}
}
+
+MPI_Request smpi_rma_send_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ if(op==MPI_OP_NULL){
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf , count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED);
+ }else{
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | ISEND | SEND | PREPARED | ACCUMULATE);
+ request->op = op;
+ }
+ return request;
+}
+
+MPI_Request smpi_rma_recv_init(void *buf, int count, MPI_Datatype datatype,
+ int src, int dst, int tag, MPI_Comm comm, MPI_Op op)
+{
+ MPI_Request request = NULL; /* MC needs the comm to be set to NULL during the call */
+ if(op==MPI_OP_NULL){
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | RECV | PREPARED);
+ }else{
+ request = build_request(buf==MPI_BOTTOM ? (void*)0 : buf, count, datatype, src, dst, tag,
+ comm, RMA | NON_PERSISTENT | RECV | PREPARED | ACCUMULATE);
+ request->op = op;
+ }
+ return request;
+}
+
+
MPI_Request smpi_isend_init(void *buf, int count, MPI_Datatype datatype,
int dst, int tag, MPI_Comm comm)
{
return request;
}
-
-
MPI_Request smpi_irecv_init(void *buf, int count, MPI_Datatype datatype,
int src, int tag, MPI_Comm comm)
{
print_request("Finishing", req);
MPI_Datatype datatype = req->old_type;
- if(datatype->has_subtype == 1){
+ if((req->flags & ACCUMULATE) || (datatype->has_subtype == 1)){
if (!_xbt_replay_is_active()){
if( smpi_privatize_global_variables
&& ((char*)req->old_buf >= start_data_exe)
&& ((char*)req->old_buf < start_data_exe + size_data_exe )
){
XBT_VERB("Privatization : We are unserializing to a zone in global memory - Switch data segment ");
- switch_data_segment(smpi_process_index());
+ smpi_switch_data_segment(smpi_process_index());
}
}
- // This part handles the problem of non-contignous memory
- // the unserialization at the reception
- s_smpi_subtype_t *subtype = datatype->substruct;
- if(req->flags & RECV) {
- subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct);
+
+ if(datatype->has_subtype == 1){
+ // This part handles the problem of non-contignous memory
+ // the unserialization at the reception
+ s_smpi_subtype_t *subtype = datatype->substruct;
+ if(req->flags & RECV)
+ subtype->unserialize(req->buf, req->old_buf, req->real_size/smpi_datatype_size(datatype) , datatype->substruct, req->op);
+ if(req->detached == 0) free(req->buf);
+ }else if(req->flags & RECV){//apply op on contiguous buffer for accumulate
+ int n =req->real_size/smpi_datatype_size(datatype);
+ smpi_op_apply(req->op, req->buf, req->old_buf, &n, &datatype);
}
- if(req->detached == 0) free(req->buf);
}
smpi_comm_unuse(req->comm);
smpi_datatype_unuse(datatype);
int flag;
//assume that request is not MPI_REQUEST_NULL (filtered in PMPI_Test or smpi_mpi_testall before)
+
+ //to avoid deadlocks
+ //multiplier to the sleeptime, to increase speed of execution, each failed test will increase it
+ static int nsleeps = 1;
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
+
smpi_empty_status(status);
flag = 1;
if (!((*request)->flags & PREPARED)) {
flag = simcall_comm_test((*request)->action);
if (flag) {
finish_wait(request, status);
+ nsleeps=1;//reset the number of sleeps we will do next time
if (*request != MPI_REQUEST_NULL && !((*request)->flags & PERSISTENT))
*request = MPI_REQUEST_NULL;
+ }else{
+ nsleeps++;
}
}
return flag;
}
}
if(size > 0) {
+ //multiplier to the sleeptime, to increase speed of execution, each failed testany will increase it
+ static int nsleeps = 1;
+ if(smpi_test_sleep > 0) simcall_process_sleep(nsleeps*smpi_test_sleep);
+
i = simcall_comm_testany(comms);
// not MPI_UNDEFINED, as this is a simix return code
if(i != -1) {
if (requests[*index] != MPI_REQUEST_NULL && (requests[*index]->flags & NON_PERSISTENT))
requests[*index] = MPI_REQUEST_NULL;
flag = 1;
+ nsleeps=1;
+ }else{
+ nsleeps++;
}
}else{
//all requests are null or inactive, return true
comm, PERSISTENT | RECV);
//to avoid deadlock, we have to sleep some time here, or the timer won't advance and we will only do iprobe simcalls
- double sleeptime= sg_cfg_get_double("smpi/iprobe");
//multiplier to the sleeptime, to increase speed of execution, each failed iprobe will increase it
static int nsleeps = 1;
-
- simcall_process_sleep(sleeptime);
-
+ if(smpi_iprobe_sleep > 0) simcall_process_sleep(nsleeps*smpi_iprobe_sleep);
// behave like a receive, but don't do it
smx_rdv_t mailbox;
if (sg_cfg_get_int("smpi/async_small_thres")>0){
mailbox = smpi_process_mailbox_small();
XBT_DEBUG("trying to probe the perm recv mailbox");
- request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src, request->tag, &match_recv, (void*)request);
}
if (request->action==NULL){
mailbox = smpi_process_mailbox();
XBT_DEBUG("trying to probe the other mailbox");
- request->action = simcall_comm_iprobe(mailbox, request->src, request->tag, &match_recv, (void*)request);
+ request->action = simcall_comm_iprobe(mailbox, 0, request->src,request->tag, &match_recv, (void*)request);
}
if(request->action){