Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
port a blocking simcall to the modernity
[simgrid.git] / src / simix / libsmx.cpp
index 1da03e0..010360d 100644 (file)
 #include "src/kernel/activity/MutexImpl.hpp"
 #include "src/mc/mc_replay.hpp"
 #include "src/plugins/vm/VirtualMachineImpl.hpp"
-#include "src/simix/smx_host_private.hpp"
-
-XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(simix);
 
 #include "popping_bodies.cpp"
 
-/**
- * @ingroup simix_process_management
- * @brief Creates a synchro that may involve parallel computation on
- * several hosts and communication between them.
- *
- * @param name Name of the execution synchro to create
- * @param host_nb Number of hosts where the synchro will be executed
- * @param host_list Array (of size host_nb) of hosts where the synchro will be executed
- * @param flops_amount Array (of size host_nb) of computation amount of hosts (in bytes)
- * @param bytes_amount Array (of size host_nb * host_nb) representing the communication
- * amount between each pair of hosts
- * @param rate the SURF action rate
- * @param timeout timeout
- * @return A new SIMIX execution synchronization
- */
-smx_activity_t simcall_execution_parallel_start(const std::string& name, int host_nb, const sg_host_t* host_list,
-                                                const double* flops_amount, const double* bytes_amount, double rate,
-                                                double timeout)
-{
-  /* checking for infinite values */
-  for (int i = 0 ; i < host_nb ; ++i) {
-    if (flops_amount != nullptr)
-      xbt_assert(std::isfinite(flops_amount[i]), "flops_amount[%d] is not finite!", i);
-    if (bytes_amount != nullptr) {
-      for (int j = 0 ; j < host_nb ; ++j) {
-        xbt_assert(std::isfinite(bytes_amount[i + host_nb * j]),
-                   "bytes_amount[%d+%d*%d] is not finite!", i, host_nb, j);
-      }
-    }
-  }
-
-  xbt_assert(std::isfinite(rate), "rate is not finite!");
-
-  return simgrid::simix::simcall([name, host_nb, host_list, flops_amount, bytes_amount, rate, timeout] {
-    return SIMIX_execution_parallel_start(std::move(name), host_nb, host_list, flops_amount, bytes_amount, rate,
-                                          timeout);
-  });
-}
-
 /**
  * @ingroup simix_host_management
  * @brief Waits for the completion of an execution synchro and destroy it.
@@ -76,14 +34,19 @@ e_smx_state_t simcall_execution_wait(const smx_activity_t& execution)
   return (e_smx_state_t)simcall_BODY_execution_wait(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
 }
 
-e_smx_state_t simcall_execution_test(const smx_activity_t& execution)
+bool simcall_execution_test(const smx_activity_t& execution)
+{
+  return simcall_BODY_execution_test(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
+}
+
+unsigned int simcall_execution_waitany_for(simgrid::kernel::activity::ExecImpl* execs[], size_t count, double timeout)
 {
-  return (e_smx_state_t)simcall_BODY_execution_test(static_cast<simgrid::kernel::activity::ExecImpl*>(execution.get()));
+  return simcall_BODY_execution_waitany_for(execs, count, timeout);
 }
 
 void simcall_process_join(smx_actor_t process, double timeout)
 {
-  simcall_BODY_process_join(process, timeout);
+  SIMIX_process_self()->join(process, timeout);
 }
 
 /**
@@ -137,8 +100,8 @@ void simcall_comm_send(smx_actor_t sender, smx_mailbox_t mbox, double task_size,
     comm = nullptr;
   }
   else {
-    simcall_BODY_comm_send(sender, mbox, task_size, rate, src_buff, src_buff_size,
-                         match_fun, copy_data_fun, data, timeout);
+    simcall_BODY_comm_send(sender, mbox, task_size, rate, static_cast<unsigned char*>(src_buff), src_buff_size,
+                           match_fun, copy_data_fun, data, timeout);
   }
 }
 
@@ -150,7 +113,7 @@ smx_activity_t simcall_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double
                                   int (*match_fun)(void*, void*, simgrid::kernel::activity::CommImpl*),
                                   void (*clean_fun)(void*),
                                   void (*copy_data_fun)(simgrid::kernel::activity::CommImpl*, void*, size_t),
-                                  void* data, int detached)
+                                  void* data, bool detached)
 {
   /* checking for infinite values */
   xbt_assert(std::isfinite(task_size), "task_size is not finite!");
@@ -158,9 +121,8 @@ smx_activity_t simcall_comm_isend(smx_actor_t sender, smx_mailbox_t mbox, double
 
   xbt_assert(mbox, "No rendez-vous point defined for isend");
 
-  return simcall_BODY_comm_isend(sender, mbox, task_size, rate, src_buff,
-                                 src_buff_size, match_fun,
-                                 clean_fun, copy_data_fun, data, detached);
+  return simcall_BODY_comm_isend(sender, mbox, task_size, rate, static_cast<unsigned char*>(src_buff), src_buff_size,
+                                 match_fun, clean_fun, copy_data_fun, data, detached);
 }
 
 /**
@@ -183,8 +145,8 @@ void simcall_comm_recv(smx_actor_t receiver, smx_mailbox_t mbox, void* dst_buff,
     comm = nullptr;
   }
   else {
-    simcall_BODY_comm_recv(receiver, mbox, dst_buff, dst_buff_size,
-                           match_fun, copy_data_fun, data, timeout, rate);
+    simcall_BODY_comm_recv(receiver, mbox, static_cast<unsigned char*>(dst_buff), dst_buff_size, match_fun,
+                           copy_data_fun, data, timeout, rate);
   }
 }
 /**
@@ -197,8 +159,8 @@ smx_activity_t simcall_comm_irecv(smx_actor_t receiver, smx_mailbox_t mbox, void
 {
   xbt_assert(mbox, "No rendez-vous point defined for irecv");
 
-  return simcall_BODY_comm_irecv(receiver, mbox, dst_buff, dst_buff_size,
-                                 match_fun, copy_data_fun, data, rate);
+  return simcall_BODY_comm_irecv(receiver, mbox, static_cast<unsigned char*>(dst_buff), dst_buff_size, match_fun,
+                                 copy_data_fun, data, rate);
 }
 
 /**
@@ -224,6 +186,11 @@ unsigned int simcall_comm_waitany(smx_activity_t comms[], size_t count, double t
   return simcall_BODY_comm_waitany(rcomms.get(), count, timeout);
 }
 
+unsigned int simcall_comm_waitany(simgrid::kernel::activity::CommImpl* comms[], size_t count, double timeout)
+{
+  return simcall_BODY_comm_waitany(comms, count, timeout);
+}
+
 /**
  * @ingroup simix_comm_management
  */
@@ -238,6 +205,13 @@ int simcall_comm_testany(smx_activity_t comms[], size_t count)
   return simcall_BODY_comm_testany(rcomms.get(), count);
 }
 
+int simcall_comm_testany(simgrid::kernel::activity::CommImpl* comms[], size_t count)
+{
+  if (count == 0)
+    return -1;
+  return simcall_BODY_comm_testany(comms, count);
+}
+
 /**
  * @ingroup simix_comm_management
  */
@@ -251,7 +225,7 @@ void simcall_comm_wait(const smx_activity_t& comm, double timeout)
  * @ingroup simix_comm_management
  *
  */
-int simcall_comm_test(const smx_activity_t& comm)
+bool simcall_comm_test(const smx_activity_t& comm)
 {
   return simcall_BODY_comm_test(static_cast<simgrid::kernel::activity::CommImpl*>(comm.get()));
 }
@@ -373,10 +347,10 @@ const char *SIMIX_simcall_name(e_smx_simcall_t kind) {
 namespace simgrid {
 namespace simix {
 
-void unblock(smx_actor_t process)
+void unblock(smx_actor_t actor)
 {
   xbt_assert(SIMIX_is_maestro());
-  SIMIX_simcall_answer(&process->simcall);
+  actor->simcall_answer();
 }
 } // namespace simix
 } // namespace simgrid
@@ -394,12 +368,6 @@ void simcall_execution_cancel(smx_activity_t exec)
 {
   simgrid::simix::simcall([exec] { boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(exec)->cancel(); });
 }
-void simcall_execution_set_priority(smx_activity_t exec, double priority)
-{
-  simgrid::simix::simcall([exec, priority] {
-    boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(exec)->set_priority(priority);
-  });
-}
 
 void simcall_execution_set_bound(smx_activity_t exec, double bound)
 {
@@ -407,13 +375,66 @@ void simcall_execution_set_bound(smx_activity_t exec, double bound)
       [exec, bound] { boost::static_pointer_cast<simgrid::kernel::activity::ExecImpl>(exec)->set_bound(bound); });
 }
 
+// deprecated
 smx_activity_t simcall_execution_start(const std::string& name, const std::string& category, double flops_amount,
-                                       double priority, double bound, sg_host_t host)
+                                       double sharing_penalty, double bound, sg_host_t host)
+{
+  return simgrid::simix::simcall([name, category, flops_amount, sharing_penalty, bound, host] {
+    simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl();
+    (*exec)
+        .set_name(name)
+        .set_tracing_category(category)
+        .set_host(host)
+        .set_sharing_penalty(sharing_penalty)
+        .set_bound(bound)
+        .set_flops_amount(flops_amount)
+        .start();
+    return simgrid::kernel::activity::ExecImplPtr(exec);
+  });
+}
+
+// deprecated
+smx_activity_t simcall_execution_parallel_start(const std::string& name, int host_nb, const sg_host_t* host_list,
+                                                const double* flops_amount, const double* bytes_amount, double rate,
+                                                double timeout)
 {
-  return simgrid::simix::simcall([name, category, flops_amount, priority, bound, host] {
-    return simgrid::kernel::activity::ExecImplPtr(
-               new simgrid::kernel::activity::ExecImpl(std::move(name), std::move(category), nullptr, host))
-        ->start(flops_amount, priority, bound);
+  /* Check that we are not mixing VMs and PMs in the parallel task */
+  bool is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[0]));
+  for (int i = 1; i < host_nb; i++) {
+    bool tmp_is_a_vm = (nullptr != dynamic_cast<simgrid::s4u::VirtualMachine*>(host_list[i]));
+    xbt_assert(is_a_vm == tmp_is_a_vm, "parallel_execute: mixing VMs and PMs is not supported (yet).");
+  }
+
+  /* checking for infinite values */
+  for (int i = 0; i < host_nb; ++i) {
+    if (flops_amount != nullptr)
+      xbt_assert(std::isfinite(flops_amount[i]), "flops_amount[%d] is not finite!", i);
+    if (bytes_amount != nullptr) {
+      for (int j = 0; j < host_nb; ++j) {
+        xbt_assert(std::isfinite(bytes_amount[i + host_nb * j]), "bytes_amount[%d+%d*%d] is not finite!", i, host_nb,
+                   j);
+      }
+    }
+  }
+  xbt_assert(std::isfinite(rate), "rate is not finite!");
+
+  std::vector<simgrid::s4u::Host*> hosts(host_list, host_list + host_nb);
+  std::vector<double> flops_parallel_amount;
+  std::vector<double> bytes_parallel_amount;
+  if (flops_amount != nullptr)
+    flops_parallel_amount = std::vector<double>(flops_amount, flops_amount + host_nb);
+  if (bytes_amount != nullptr)
+    bytes_parallel_amount = std::vector<double>(bytes_amount, bytes_amount + host_nb * host_nb);
+  return simgrid::simix::simcall([name, hosts, flops_parallel_amount, bytes_parallel_amount, timeout] {
+    simgrid::kernel::activity::ExecImpl* exec = new simgrid::kernel::activity::ExecImpl();
+    (*exec)
+        .set_name(name)
+        .set_hosts(hosts)
+        .set_timeout(timeout)
+        .set_flops_amounts(flops_parallel_amount)
+        .set_bytes_amounts(bytes_parallel_amount)
+        .start();
+    return simgrid::kernel::activity::ExecImplPtr(exec);
   });
 }