#include "src/kernel/activity/MutexImpl.hpp"
#include "src/mc/mc_replay.hpp"
#include "src/plugins/vm/VirtualMachineImpl.hpp"
-#include "src/simix/smx_host_private.hpp"
-
-XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(simix);
#include "popping_bodies.cpp"
-/**
- * @ingroup simix_process_management
- * @brief Creates a synchro that may involve parallel computation on
- * several hosts and communication between them.
- *
- * @param name Name of the execution synchro to create
- * @param host_nb Number of hosts where the synchro will be executed
- * @param host_list Array (of size host_nb) of hosts where the synchro will be executed
- * @param flops_amount Array (of size host_nb) of computation amount of hosts (in bytes)
- * @param bytes_amount Array (of size host_nb * host_nb) representing the communication
- * amount between each pair of hosts
- * @param rate the SURF action rate
- * @param timeout timeout
- * @return A new SIMIX execution synchronization
- */
-smx_activity_t simcall_execution_parallel_start(const std::string& name, int host_nb, const sg_host_t* host_list,
- const double* flops_amount, const double* bytes_amount, double rate,
- double timeout)
-{
- /* Check that we are not mixing VMs and PMs in the parallel task */
- bool is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[0]));
- for (int i = 1; i < host_nb; i++) {
- bool tmp_is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[i]));
- xbt_assert(is_a_vm == tmp_is_a_vm, "parallel_execute: mixing VMs and PMs is not supported (yet).");
- }
-
- /* checking for infinite values */
- for (int i = 0 ; i < host_nb ; ++i) {
- if (flops_amount != nullptr)
- xbt_assert(std::isfinite(flops_amount[i]), "flops_amount[%d] is not finite!", i);
- if (bytes_amount != nullptr) {
- for (int j = 0 ; j < host_nb ; ++j) {
- xbt_assert(std::isfinite(bytes_amount[i + host_nb * j]),
- "bytes_amount[%d+%d*%d] is not finite!", i, host_nb, j);
- }
- }
- }
-
- xbt_assert(std::isfinite(rate), "rate is not finite!");
- return simgrid::simix::simcall([name, host_nb, host_list, flops_amount, bytes_amount, rate, timeout] {
- return SIMIX_execution_parallel_start(std::move(name), host_nb, host_list, flops_amount, bytes_amount, rate,
- timeout);
- });
-}
-
/**
* @ingroup simix_host_management
* @brief Waits for the completion of an execution synchro and destroy it.
return (e_smx_state_t)simcall_BODY_execution_wait(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
}
-e_smx_state_t simcall_execution_test(const smx_activity_t& execution)
+bool simcall_execution_test(const smx_activity_t& execution)
{
- return (e_smx_state_t)simcall_BODY_execution_test(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
+ return simcall_BODY_execution_test(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
}
void simcall_process_join(smx_actor_t process, double timeout)
comm = nullptr;
}
else {
- simcall_BODY_comm_send(sender, mbox, task_size, rate, src_buff, src_buff_size,
- match_fun, copy_data_fun, data, timeout);
+ simcall_BODY_comm_send(sender, mbox, task_size, rate, static_cast<unsigned char*>(src_buff), src_buff_size,
+ match_fun, copy_data_fun, data, timeout);
}
}
int (*match_fun)(void*, void*, simgrid::kernel::activity::CommImpl*),
void (*clean_fun)(void*),
void (*copy_data_fun)(simgrid::kernel::activity::CommImpl*, void*, size_t),
- void* data, int detached)
+ void* data, bool detached)
{
/* checking for infinite values */
xbt_assert(std::isfinite(task_size), "task_size is not finite!");
xbt_assert(mbox, "No rendez-vous point defined for isend");
- return simcall_BODY_comm_isend(sender, mbox, task_size, rate, src_buff,
- src_buff_size, match_fun,
- clean_fun, copy_data_fun, data, detached);
+ return simcall_BODY_comm_isend(sender, mbox, task_size, rate, static_cast<unsigned char*>(src_buff), src_buff_size,
+ match_fun, clean_fun, copy_data_fun, data, detached);
}
/**
comm = nullptr;
}
else {
- simcall_BODY_comm_recv(receiver, mbox, dst_buff, dst_buff_size,
- match_fun, copy_data_fun, data, timeout, rate);
+ simcall_BODY_comm_recv(receiver, mbox, static_cast<unsigned char*>(dst_buff), dst_buff_size, match_fun,
+ copy_data_fun, data, timeout, rate);
}
}
/**
{
xbt_assert(mbox, "No rendez-vous point defined for irecv");
- return simcall_BODY_comm_irecv(receiver, mbox, dst_buff, dst_buff_size,
- match_fun, copy_data_fun, data, rate);
+ return simcall_BODY_comm_irecv(receiver, mbox, static_cast<unsigned char*>(dst_buff), dst_buff_size, match_fun,
+ copy_data_fun, data, rate);
}
/**
* @ingroup simix_comm_management
*
*/
-int simcall_comm_test(const smx_activity_t& comm)
+bool simcall_comm_test(const smx_activity_t& comm)
{
return simcall_BODY_comm_test(static_cast<simgrid::kernel::activity::CommImpl*>(comm.get()));
}
[exec, bound] { boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(exec)->set_bound(bound); });
}
+// deprecated
smx_activity_t simcall_execution_start(const std::string& name, const std::string& category, double flops_amount,
double priority, double bound, sg_host_t host)
{
return simgrid::simix::simcall([name, category, flops_amount, priority, bound, host] {
- return simgrid::kernel::activity::ExecImplPtr(
- new simgrid::kernel::activity::ExecImpl(std::move(name), std::move(category)))
- ->set_host(host)
- ->start(flops_amount, priority, bound);
+ simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl();
+ (*exec)
+ .set_name(name)
+ .set_tracing_category(category)
+ .set_host(host)
+ .set_priority(priority)
+ .set_bound(bound)
+ .set_flops_amount(flops_amount)
+ .start();
+ return simgrid::kernel::activity::ExecImplPtr(exec);
+ });
+}
+
+// deprecated
+smx_activity_t simcall_execution_parallel_start(const std::string& name, int host_nb, const sg_host_t* host_list,
+ const double* flops_amount, const double* bytes_amount, double rate,
+ double timeout)
+{
+ /* Check that we are not mixing VMs and PMs in the parallel task */
+ bool is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[0]));
+ for (int i = 1; i < host_nb; i++) {
+ bool tmp_is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[i]));
+ xbt_assert(is_a_vm == tmp_is_a_vm, "parallel_execute: mixing VMs and PMs is not supported (yet).");
+ }
+
+ /* checking for infinite values */
+ for (int i = 0; i < host_nb; ++i) {
+ if (flops_amount != nullptr)
+ xbt_assert(std::isfinite(flops_amount[i]), "flops_amount[%d] is not finite!", i);
+ if (bytes_amount != nullptr) {
+ for (int j = 0; j < host_nb; ++j) {
+ xbt_assert(std::isfinite(bytes_amount[i + host_nb * j]), "bytes_amount[%d+%d*%d] is not finite!", i, host_nb,
+ j);
+ }
+ }
+ }
+ xbt_assert(std::isfinite(rate), "rate is not finite!");
+
+ std::vector<simgrid::s4u::Host*> hosts(host_list, host_list + host_nb);
+ std::vector<double> flops_parallel_amount;
+ std::vector<double> bytes_parallel_amount;
+ if (flops_amount != nullptr)
+ flops_parallel_amount = std::vector<double>(flops_amount, flops_amount + host_nb);
+ if (bytes_amount != nullptr)
+ bytes_parallel_amount = std::vector<double>(bytes_amount, bytes_amount + host_nb * host_nb);
+ return simgrid::simix::simcall([name, hosts, flops_parallel_amount, bytes_parallel_amount, timeout] {
+ simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl();
+ (*exec)
+ .set_name(name)
+ .set_hosts(hosts)
+ .set_timeout(timeout)
+ .set_flops_amounts(flops_parallel_amount)
+ .set_bytes_amounts(bytes_parallel_amount)
+ .start();
+ return simgrid::kernel::activity::ExecImplPtr(exec);
});
}