1 /* Copyright (c) 2008-2020. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
6 #include "src/mc/mc_request.hpp"
7 #include "src/include/mc/mc.h"
8 #include "src/kernel/activity/CommImpl.hpp"
9 #include "src/kernel/activity/MutexImpl.hpp"
10 #include "src/mc/ModelChecker.hpp"
11 #include "src/mc/checker/SimcallInspector.hpp"
12 #include "src/mc/mc_smx.hpp"
15 using simgrid::mc::remote;
16 using simgrid::simix::Simcall;
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(mc_request, mc, "Logging specific to MC (request)");
20 static inline simgrid::kernel::activity::CommImpl* MC_get_comm(smx_simcall_t r)
23 case Simcall::COMM_WAIT:
24 return simcall_comm_wait__getraw__comm(r);
25 case Simcall::COMM_TEST:
26 return simcall_comm_test__getraw__comm(r);
33 smx_mailbox_t MC_get_mbox(smx_simcall_t r)
36 case Simcall::COMM_ISEND:
37 return simcall_comm_isend__get__mbox(r);
38 case Simcall::COMM_IRECV:
39 return simcall_comm_irecv__get__mbox(r);
50 bool request_depend_asymmetric(smx_simcall_t r1, smx_simcall_t r2)
52 if (r1->call_ == Simcall::COMM_ISEND && r2->call_ == Simcall::COMM_IRECV)
55 if (r1->call_ == Simcall::COMM_IRECV && r2->call_ == Simcall::COMM_ISEND)
58 // Those are internal requests, we do not need indirection because those objects are copies:
59 const kernel::activity::CommImpl* synchro1 = MC_get_comm(r1);
60 const kernel::activity::CommImpl* synchro2 = MC_get_comm(r2);
62 if ((r1->call_ == Simcall::COMM_ISEND || r1->call_ == Simcall::COMM_IRECV) && r2->call_ == Simcall::COMM_WAIT) {
63 const kernel::activity::MailboxImpl* mbox = MC_get_mbox(r1);
65 if (mbox != synchro2->mbox_cpy
66 && simcall_comm_wait__get__timeout(r2) <= 0)
69 if ((r1->issuer_ != synchro2->src_actor_.get()) && (r1->issuer_ != synchro2->dst_actor_.get()) &&
70 simcall_comm_wait__get__timeout(r2) <= 0)
73 if ((r1->call_ == Simcall::COMM_ISEND) && (synchro2->type_ == kernel::activity::CommImpl::Type::SEND) &&
74 (synchro2->src_buff_ != simcall_comm_isend__get__src_buff(r1)) && simcall_comm_wait__get__timeout(r2) <= 0)
77 if ((r1->call_ == Simcall::COMM_IRECV) && (synchro2->type_ == kernel::activity::CommImpl::Type::RECEIVE) &&
78 (synchro2->dst_buff_ != simcall_comm_irecv__get__dst_buff(r1)) && simcall_comm_wait__get__timeout(r2) <= 0)
82 /* FIXME: the following rule assumes that the result of the isend/irecv call is not stored in a buffer used in the
85 if((r1->call == Simcall::COMM_ISEND || r1->call == Simcall::COMM_IRECV)
86 && r2->call == Simcall::COMM_TEST)
90 if (r1->call_ == Simcall::COMM_WAIT && (r2->call_ == Simcall::COMM_WAIT || r2->call_ == Simcall::COMM_TEST) &&
91 (synchro1->src_actor_.get() == nullptr || synchro1->dst_actor_.get() == nullptr))
94 if (r1->call_ == Simcall::COMM_TEST &&
95 (simcall_comm_test__get__comm(r1) == nullptr || synchro1->src_buff_ == nullptr || synchro1->dst_buff_ == nullptr))
98 if (r1->call_ == Simcall::COMM_TEST && r2->call_ == Simcall::COMM_WAIT &&
99 synchro1->src_buff_ == synchro2->src_buff_ && synchro1->dst_buff_ == synchro2->dst_buff_)
102 if (r1->call_ == Simcall::COMM_WAIT && r2->call_ == Simcall::COMM_TEST && synchro1->src_buff_ != nullptr &&
103 synchro1->dst_buff_ != nullptr && synchro2->src_buff_ != nullptr && synchro2->dst_buff_ != nullptr &&
104 synchro1->dst_buff_ != synchro2->src_buff_ && synchro1->dst_buff_ != synchro2->dst_buff_ &&
105 synchro2->dst_buff_ != synchro1->src_buff_)
111 // Those are internal_req
112 bool request_depend(smx_simcall_t req1, smx_simcall_t req2)
114 if (req1->issuer_ == req2->issuer_)
117 /* Wait with timeout transitions are not considered by the independence theorem, thus we consider them as dependent with all other transitions */
118 if ((req1->call_ == Simcall::COMM_WAIT && simcall_comm_wait__get__timeout(req1) > 0) ||
119 (req2->call_ == Simcall::COMM_WAIT && simcall_comm_wait__get__timeout(req2) > 0))
122 if (req1->call_ != req2->call_)
123 return request_depend_asymmetric(req1, req2) && request_depend_asymmetric(req2, req1);
125 // Those are internal requests, we do not need indirection because those objects are copies:
126 const kernel::activity::CommImpl* synchro1 = MC_get_comm(req1);
127 const kernel::activity::CommImpl* synchro2 = MC_get_comm(req2);
129 switch (req1->call_) {
130 case Simcall::COMM_ISEND:
131 return simcall_comm_isend__get__mbox(req1) == simcall_comm_isend__get__mbox(req2);
132 case Simcall::COMM_IRECV:
133 return simcall_comm_irecv__get__mbox(req1) == simcall_comm_irecv__get__mbox(req2);
134 case Simcall::COMM_WAIT:
135 if (synchro1->src_buff_ == synchro2->src_buff_ && synchro1->dst_buff_ == synchro2->dst_buff_)
137 if (synchro1->src_buff_ != nullptr && synchro1->dst_buff_ != nullptr && synchro2->src_buff_ != nullptr &&
138 synchro2->dst_buff_ != nullptr && synchro1->dst_buff_ != synchro2->src_buff_ &&
139 synchro1->dst_buff_ != synchro2->dst_buff_ && synchro2->dst_buff_ != synchro1->src_buff_)
148 } // namespace simgrid
153 bool request_is_enabled_by_idx(smx_simcall_t req, unsigned int idx)
155 kernel::activity::CommImpl* remote_act = nullptr;
156 switch (req->call_) {
157 case Simcall::COMM_WAIT:
158 /* FIXME: check also that src and dst processes are not suspended */
159 remote_act = simcall_comm_wait__getraw__comm(req);
162 case Simcall::COMM_WAITANY:
163 remote_act = mc_model_checker->get_remote_simulation().read(remote(simcall_comm_waitany__get__comms(req) + idx));
166 case Simcall::COMM_TESTANY:
167 remote_act = mc_model_checker->get_remote_simulation().read(remote(simcall_comm_testany__get__comms(req) + idx));
174 Remote<kernel::activity::CommImpl> temp_comm;
175 mc_model_checker->get_remote_simulation().read(temp_comm, remote(remote_act));
176 const kernel::activity::CommImpl* comm = temp_comm.get_buffer();
177 return comm->src_actor_.get() && comm->dst_actor_.get();
181 } // namespace simgrid