Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
proper check for the -std=gnu++11 standard, and take in on clang too
[simgrid.git] / src / surf / vm_hl13.cpp
index ff60c24..918df46 100644 (file)
@@ -23,17 +23,9 @@ void surf_vm_model_init_HL13(void){
  * Model *
  *********/
 
-VMHL13Model::VMHL13Model() : VMModel() {
-  p_cpuModel = surf_cpu_model_vm;
-}
-
-void VMHL13Model::updateActionsState(double /*now*/, double /*delta*/){
-  return;
-}
+VMHL13Model::VMHL13Model() : VMModel() {}
 
-Action *VMHL13Model::communicate(Host *src, Host *dst, double size, double rate){
-  return surf_network_model->communicate(src->p_netElm, dst->p_netElm, size, rate);
-}
+void VMHL13Model::updateActionsState(double /*now*/, double /*delta*/) {}
 
 /* ind means ''indirect'' that this is a reference on the whole dict_elm
  * structure (i.e not on the surf_resource_private infos) */
@@ -130,16 +122,15 @@ double VMHL13Model::shareResources(double now)
   /* 2. Calculate resource share at the virtual machine layer. */
   adjustWeightOfDummyCpuActions();
 
-  double min_by_cpu = p_cpuModel->shareResources(now);
-  double min_by_net = (strcmp(surf_network_model->getName(), "network NS3")) ? surf_network_model->shareResources(now) : -1;
+  double min_by_cpu = surf_cpu_model_vm->shareResources(now);
+  double min_by_net = surf_network_model->shareResourcesIsIdempotent() ? surf_network_model->shareResources(now) : -1;
+  // Fixme: take storage into account once it's implemented
   double min_by_sto = -1;
-  if (p_cpuModel == surf_cpu_model_pm)
-       min_by_sto = surf_storage_model->shareResources(now);
 
   XBT_DEBUG("model %p, %s min_by_cpu %f, %s min_by_net %f, %s min_by_sto %f",
-      this, surf_cpu_model_pm->getName(), min_by_cpu,
-            surf_network_model->getName(), min_by_net,
-            surf_storage_model->getName(), min_by_sto);
+      this, typeid(surf_cpu_model_pm ).name(), min_by_cpu,
+               typeid(surf_network_model).name(), min_by_net,
+            typeid(surf_storage_model).name(), min_by_sto);
 
   double ret = max(max(min_by_cpu, min_by_net), min_by_sto);
   if (min_by_cpu >= 0.0 && min_by_cpu < ret)
@@ -182,17 +173,17 @@ double VMHL13Model::shareResources(double now)
 }
 
 Action *VMHL13Model::executeParallelTask(int host_nb,
-                                        void **host_list,
-                                        double *flops_amount,
-                                        double *bytes_amount,
-                                        double rate){
+                                         sg_host_t *host_list,
+                                                                                double *flops_amount,
+                                                                                double *bytes_amount,
+                                                                                double rate){
 #define cost_or_zero(array,pos) ((array)?(array)[pos]:0.0)
   if ((host_nb == 1)
       && (cost_or_zero(bytes_amount, 0) == 0.0))
-    return static_cast<HostCLM03*>(host_list[0])->execute(flops_amount[0]);
+    return surf_host_execute(host_list[0], flops_amount[0]);
   else if ((host_nb == 1)
            && (cost_or_zero(flops_amount, 0) == 0.0))
-    return communicate(static_cast<HostCLM03*>(host_list[0]), static_cast<HostCLM03*>(host_list[0]),bytes_amount[0], rate);
+    return surf_network_model_communicate(surf_network_model, host_list[0], host_list[0],bytes_amount[0], rate);
   else if ((host_nb == 2)
              && (cost_or_zero(flops_amount, 0) == 0.0)
              && (cost_or_zero(flops_amount, 1) == 0.0)) {
@@ -206,11 +197,11 @@ Action *VMHL13Model::executeParallelTask(int host_nb,
       }
     }
     if (nb == 1)
-      return communicate(static_cast<HostCLM03*>(host_list[0]), static_cast<HostCLM03*>(host_list[1]),value, rate);
+      return surf_network_model_communicate(surf_network_model, host_list[0], host_list[1], value, rate);
   }
 #undef cost_or_zero
 
-  THROW_UNIMPLEMENTED;          /* This model does not implement parallel tasks */
+  THROW_UNIMPLEMENTED;          /* This model does not implement parallel tasks for more than 2 hosts. */
   return NULL;
 }