Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'master' of https://framagit.org/simgrid/simgrid
[simgrid.git] / src / kernel / activity / ExecImpl.cpp
1 /* Copyright (c) 2007-2023. The SimGrid Team. All rights reserved.          */
2
3 /* This program is free software; you can redistribute it and/or modify it
4  * under the terms of the license (GNU LGPL) which comes with this package. */
5
6 #include <simgrid/Exception.hpp>
7 #include <simgrid/kernel/routing/NetPoint.hpp>
8 #include <simgrid/modelchecker.h>
9 #include <simgrid/s4u/Engine.hpp>
10
11 #include "src/kernel/activity/ExecImpl.hpp"
12 #include "src/kernel/actor/ActorImpl.hpp"
13 #include "src/kernel/actor/SimcallObserver.hpp"
14 #include "src/kernel/resource/CpuImpl.hpp"
15 #include "src/kernel/resource/HostImpl.hpp"
16 #include "src/mc/mc_replay.hpp"
17
18 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(ker_cpu, kernel, "Kernel cpu-related synchronization");
19
20 namespace simgrid::kernel::activity {
21
22 ExecImpl::ExecImpl()
23 {
24   piface_                = new s4u::Exec(this);
25   actor::ActorImpl* self = actor::ActorImpl::self();
26   if (self) {
27     set_actor(self);
28     self->activities_.insert(this);
29   }
30 }
31
32 ExecImpl& ExecImpl::set_host(s4u::Host* host)
33 {
34   ActivityImpl::set_hosts({host});
35   return *this;
36 }
37
38 ExecImpl& ExecImpl::set_hosts(const std::vector<s4u::Host*>& hosts)
39 {
40   ActivityImpl::set_hosts(hosts);
41   return *this;
42 }
43
44 ExecImpl& ExecImpl::set_flops_amount(double flops_amount)
45 {
46   flops_amounts_.assign(1, flops_amount);
47   return *this;
48 }
49
50 ExecImpl& ExecImpl::set_flops_amounts(const std::vector<double>& flops_amounts)
51 {
52   flops_amounts_ = flops_amounts;
53   return *this;
54 }
55
56 ExecImpl& ExecImpl::set_bytes_amounts(const std::vector<double>& bytes_amounts)
57 {
58   bytes_amounts_ = bytes_amounts;
59
60   return *this;
61 }
62 ExecImpl& ExecImpl::set_thread_count(int thread_count)
63 {
64   thread_count_ = thread_count;
65
66   return *this;
67 }
68
69 ExecImpl* ExecImpl::start()
70 {
71   set_state(State::RUNNING);
72   if (not MC_is_active() && not MC_record_replay_is_active()) {
73     if (get_hosts().size() == 1) {
74       xbt_assert(not flops_amounts_.empty(), "Cannot start Exec: no flops_amount defined.");
75       if (thread_count_ == 1) {
76         model_action_ = get_host()->get_cpu()->execution_start(flops_amounts_.front(), bound_);
77         model_action_->set_sharing_penalty(sharing_penalty_);
78       } else {
79         auto host_model = get_host()->get_netpoint()->get_englobing_zone()->get_host_model();
80         model_action_   = host_model->execute_thread(get_host(), flops_amounts_.front(), thread_count_);
81       }
82       model_action_->set_category(get_tracing_category());
83     } else {
84       // get the model from first host since we have only 1 by now
85       auto host_model = get_host()->get_netpoint()->get_englobing_zone()->get_host_model();
86       model_action_   = host_model->execute_parallel(get_hosts(), flops_amounts_.data(), bytes_amounts_.data(), -1);
87     }
88     model_action_->set_activity(this);
89     set_start_time(model_action_->get_start_time());
90   }
91
92   XBT_DEBUG("Create execute synchro %p: %s", this, get_cname());
93   return this;
94 }
95
96 double ExecImpl::get_remaining() const
97 {
98   if (get_state() == State::WAITING || get_state() == State::FAILED)
99     return flops_amounts_.front();
100   return ActivityImpl::get_remaining();
101 }
102
103 double ExecImpl::get_seq_remaining_ratio()
104 {
105   if (get_state() == State::WAITING)
106     return 1;
107   return (model_action_ == nullptr) ? 0 : model_action_->get_remains() / model_action_->get_cost();
108 }
109
110 double ExecImpl::get_par_remaining_ratio()
111 {
112   // parallel task: their remain is already between 0 and 1
113   if (get_state() == State::WAITING)
114     return 1;
115   return (model_action_ == nullptr) ? 0 : model_action_->get_remains();
116 }
117
118 ExecImpl& ExecImpl::set_bound(double bound)
119 {
120   bound_ = bound;
121   return *this;
122 }
123
124 ExecImpl& ExecImpl::set_sharing_penalty(double sharing_penalty)
125 {
126   sharing_penalty_ = sharing_penalty;
127   return *this;
128 }
129
130 ExecImpl& ExecImpl::update_sharing_penalty(double sharing_penalty)
131 {
132   sharing_penalty_ = sharing_penalty;
133   model_action_->set_sharing_penalty(sharing_penalty);
134   return *this;
135 }
136
137 void ExecImpl::set_exception(actor::ActorImpl* issuer)
138 {
139   switch (get_state()) {
140     case State::FAILED:
141       static_cast<s4u::Exec*>(get_iface())->complete(s4u::Activity::State::FAILED);
142       if (issuer->get_host()->is_on())
143         issuer->exception_ = std::make_exception_ptr(HostFailureException(XBT_THROW_POINT, "Host failed"));
144       else /* else, the actor will be killed with no possibility to survive */
145         issuer->set_wannadie();
146       break;
147
148     case State::CANCELED:
149       issuer->exception_ = std::make_exception_ptr(CancelException(XBT_THROW_POINT, "Execution Canceled"));
150       break;
151
152     case State::TIMEOUT:
153       issuer->exception_ = std::make_exception_ptr(TimeoutException(XBT_THROW_POINT, "Timeouted"));
154       break;
155
156     default:
157       xbt_assert(get_state() == State::DONE, "Internal error in ExecImpl::finish(): unexpected synchro state %s",
158                  get_state_str());
159   }
160 }
161 void ExecImpl::finish()
162 {
163   XBT_DEBUG("ExecImpl::finish() in state %s", get_state_str());
164   if (model_action_ != nullptr) {
165     if (auto const& hosts = get_hosts();
166         std::any_of(hosts.begin(), hosts.end(), [](const s4u::Host* host) { return not host->is_on(); })) {
167       /* If one of the hosts running the synchro failed, notice it. This way, the asking
168        * process can be killed if it runs on that host itself */
169       set_state(State::FAILED);
170     } else if (model_action_->get_state() == resource::Action::State::FAILED) {
171       /* If all the hosts are running the synchro didn't fail, then the synchro was canceled */
172       set_state(State::CANCELED);
173     } else {
174       set_state(State::DONE);
175     }
176
177     clean_action();
178   }
179
180   if (get_actor() != nullptr)
181     get_actor()->activities_.erase(this);
182
183   while (not simcalls_.empty()) {
184     actor::Simcall* simcall = simcalls_.front();
185     simcalls_.pop_front();
186
187     if (simcall->call_ == actor::Simcall::Type::NONE) // FIXME: maybe a better way to handle this case
188       continue;                                       // if process handling comm is killed
189
190     handle_activity_waitany(simcall);
191
192     set_exception(simcall->issuer_);
193
194     simcall->issuer_->waiting_synchro_ = nullptr;
195     /* Fail the process if the host is down */
196     if (simcall->issuer_->get_host()->is_on())
197       simcall->issuer_->simcall_answer();
198     else
199       simcall->issuer_->set_wannadie();
200   }
201 }
202
203 void ExecImpl::reset()
204 {
205   clear_hosts();
206   set_start_time(-1.0);
207 }
208
209 ActivityImpl* ExecImpl::migrate(s4u::Host* to)
210 {
211   if (not MC_is_active() && not MC_record_replay_is_active()) {
212     resource::Action* old_action = this->model_action_;
213     resource::Action* new_action = to->get_cpu()->execution_start(old_action->get_cost(), old_action->get_user_bound());
214     new_action->set_remains(old_action->get_remains());
215     new_action->set_activity(this);
216     new_action->set_sharing_penalty(old_action->get_sharing_penalty());
217     new_action->set_user_bound(old_action->get_user_bound());
218
219     old_action->set_activity(nullptr);
220     old_action->cancel();
221     old_action->unref();
222     this->model_action_ = new_action;
223   }
224
225   on_migration(*this, to);
226   return this;
227 }
228
229 /*************
230  * Callbacks *
231  *************/
232 xbt::signal<void(ExecImpl const&, s4u::Host*)> ExecImpl::on_migration;
233
234 } // namespace simgrid::kernel::activity