From d3532ee4558ad2997a36fa8739702a7ff4968916 Mon Sep 17 00:00:00 2001 From: Martin Quinson Date: Mon, 3 Jun 2019 15:02:38 +0200 Subject: [PATCH] Remove the lagrange-based models Sorry Pedro, but this simplification really makes me happy :) --- ChangeLog | 4 + docs/source/Configuring_SimGrid.rst | 2 - .../s4u/app-pingpong/s4u-app-pingpong.tesh | 45 -- src/kernel/lmm/lagrange.cpp | 513 ------------------ src/kernel/lmm/maxmin.hpp | 46 -- src/surf/network_cm02.cpp | 59 +- src/surf/surf_interface.cpp | 9 - src/surf/surf_interface.hpp | 39 -- teshsuite/msg/app-pingpong/app-pingpong.tesh | 45 -- .../basic-parsing-test.tesh | 2 - teshsuite/simdag/platforms/properties.xml | 1 - teshsuite/surf/lmm_usage/lmm_usage.cpp | 114 +--- teshsuite/surf/lmm_usage/lmm_usage.tesh | 12 +- tools/cmake/DefinePackages.cmake | 1 - 14 files changed, 27 insertions(+), 865 deletions(-) delete mode 100644 src/kernel/lmm/lagrange.cpp diff --git a/ChangeLog b/ChangeLog index 00e3147386..722d4ca622 100644 --- a/ChangeLog +++ b/ChangeLog @@ -21,6 +21,10 @@ Model-checker: faster because of cache effects. So there is no need to clutter the code to allow the user to go for the unefficient mode. +Network models: + - Remove the lagrange-based models (Reno/Reno2/Vegas). The regular + models proved to be more accurate than these old experiments. + Fixed bugs (FG=FramaGit; GH=GitHub): - FG#1: Broken link in error messages - FG#2: missing installation documentation diff --git a/docs/source/Configuring_SimGrid.rst b/docs/source/Configuring_SimGrid.rst index 136c019ba7..8303e48643 100644 --- a/docs/source/Configuring_SimGrid.rst +++ b/docs/source/Configuring_SimGrid.rst @@ -218,8 +218,6 @@ models for all existing resources. poorly modeled. This model is described in `A Network Model for Simulation of Grid Application `_. - - **Reno/Reno2/Vegas:** Models from Steven H. Low using lagrange_solve instead of - lmm_solve (experts only; check the code for more info). - **NS3** (only available if you compiled SimGrid accordingly): Use the packet-level network simulators as network models (see :ref:`pls_ns3`). diff --git a/examples/s4u/app-pingpong/s4u-app-pingpong.tesh b/examples/s4u/app-pingpong/s4u-app-pingpong.tesh index fa82acc6c7..8a4042a427 100644 --- a/examples/s4u/app-pingpong/s4u-app-pingpong.tesh +++ b/examples/s4u/app-pingpong/s4u-app-pingpong.tesh @@ -39,51 +39,6 @@ $ ${bindir:=.}/s4u-app-pingpong ${platfdir}/small_platform.xml --cfg=cpu/model:C > [145.639041] (1:pinger@Tremblay) Pong time (bandwidth bound): 145.638 > [145.639041] (0:maestro@) Total simulation time: 145.639 -p Testing the surf network Reno fairness model using lagrangian approach - -$ ${bindir:=.}/s4u-app-pingpong ${platfdir}/small_platform.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Reno" --log=surf_lagrange.thres=critical "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Reno' -> [ 0.000000] (1:pinger@Tremblay) Ping from mailbox Mailbox 1 to mailbox Mailbox 2 -> [ 0.000000] (2:ponger@Jupiter) Pong from mailbox Mailbox 2 to mailbox Mailbox 1 -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - -p Testing the surf network Reno2 fairness model using lagrangian approach - -$ ${bindir:=.}/s4u-app-pingpong ${platfdir}/small_platform.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Reno2" --log=surf_lagrange.thres=critical "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Reno2' -> [ 0.000000] (1:pinger@Tremblay) Ping from mailbox Mailbox 1 to mailbox Mailbox 2 -> [ 0.000000] (2:ponger@Jupiter) Pong from mailbox Mailbox 2 to mailbox Mailbox 1 -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - -p Testing the surf network Vegas fairness model using lagrangian approach - -$ ${bindir:=.}/s4u-app-pingpong ${platfdir}/small_platform.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Vegas" "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Vegas' -> [ 0.000000] (1:pinger@Tremblay) Ping from mailbox Mailbox 1 to mailbox Mailbox 2 -> [ 0.000000] (2:ponger@Jupiter) Pong from mailbox Mailbox 2 to mailbox Mailbox 1 -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - p Testing the surf network constant model $ ${bindir:=.}/s4u-app-pingpong ${platfdir}/small_platform_constant.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Constant" "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" diff --git a/src/kernel/lmm/lagrange.cpp b/src/kernel/lmm/lagrange.cpp deleted file mode 100644 index 5febecfe95..0000000000 --- a/src/kernel/lmm/lagrange.cpp +++ /dev/null @@ -1,513 +0,0 @@ -/* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */ - -/* This program is free software; you can redistribute it and/or modify it - * under the terms of the license (GNU LGPL) which comes with this package. */ - -/* - * Modeling the proportional fairness using the Lagrangian Optimization Approach. For a detailed description see: - * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps". - */ -#include "src/kernel/lmm/maxmin.hpp" -#include "src/surf/surf_interface.hpp" -#include "xbt/log.h" -#include "xbt/sysdep.h" - -#include -#include -#include - -XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf, "Logging specific to SURF (lagrange)"); -XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf_lagrange, "Logging specific to SURF (lagrange dichotomy)"); - -static constexpr double VEGAS_SCALING = 1000.0; -static constexpr double RENO_SCALING = 1.0; -static constexpr double RENO2_SCALING = 1.0; - -namespace simgrid { -namespace kernel { -namespace lmm { - -System* make_new_lagrange_system(bool selective_update) -{ - return new Lagrange(selective_update); -} - -bool Lagrange::check_feasible(bool warn) -{ - for (Constraint const& cnst : active_constraint_set) { - double tmp = 0; - for (Element const& elem : cnst.enabled_element_set_) { - Variable* var = elem.variable; - xbt_assert(var->sharing_weight_ > 0); - tmp += var->value_; - } - - if (double_positive(tmp - cnst.bound_, sg_maxmin_precision)) { - if (warn) - XBT_WARN("The link (%p) is over-used. Expected less than %f and got %f", &cnst, cnst.bound_, tmp); - return false; - } - XBT_DEBUG("Checking feasability for constraint (%p): sat = %f, lambda = %f ", &cnst, tmp - cnst.bound_, - cnst.lambda_); - } - - for (Variable const& var : variable_set) { - if (not var.sharing_weight_) - break; - if (var.bound_ < 0) - continue; - XBT_DEBUG("Checking feasability for variable (%p): sat = %f mu = %f", &var, var.value_ - var.bound_, var.mu_); - - if (double_positive(var.value_ - var.bound_, sg_maxmin_precision)) { - if (warn) - XBT_WARN("The variable (%p) is too large. Expected less than %f and got %f", &var, var.bound_, var.value_); - return false; - } - } - return true; -} - -double Lagrange::new_value(const Variable& var) -{ - double tmp = 0; - - for (Element const& elem : var.cnsts_) { - tmp += elem.constraint->lambda_; - } - if (var.bound_ > 0) - tmp += var.mu_; - XBT_DEBUG("\t Working on var (%p). cost = %e; Weight = %e", &var, tmp, var.sharing_weight_); - // uses the partial differential inverse function - return func_fpi(var, tmp); -} - -double Lagrange::new_mu(const Variable& var) -{ - double mu_i = 0.0; - double sigma_i = 0.0; - - for (Element const& elem : var.cnsts_) { - sigma_i += elem.constraint->lambda_; - } - mu_i = func_fp(var, var.bound_) - sigma_i; - if (mu_i < 0.0) - return 0.0; - return mu_i; -} - -double Lagrange::dual_objective() -{ - double obj = 0.0; - - for (Variable const& var : variable_set) { - double sigma_i = 0.0; - - if (not var.sharing_weight_) - break; - - for (Element const& elem : var.cnsts_) - sigma_i += elem.constraint->lambda_; - - if (var.bound_ > 0) - sigma_i += var.mu_; - - XBT_DEBUG("var %p : sigma_i = %1.20f", &var, sigma_i); - - obj += func_f(var, func_fpi(var, sigma_i)) - sigma_i * func_fpi(var, sigma_i); - - if (var.bound_ > 0) - obj += var.mu_ * var.bound_; - } - - for (Constraint const& cnst : active_constraint_set) - obj += cnst.lambda_ * cnst.bound_; - - return obj; -} - -// solves the proportional fairness using a Lagrangian optimization with dichotomy step -void Lagrange::lagrange_solve() -{ - /* Lagrange Variables. */ - int max_iterations = 100; - double epsilon_min_error = 0.00001; /* this is the precision on the objective function so it's none of the - configurable values and this value is the legacy one */ - double dichotomy_min_error = 1e-14; - double overall_modification = 1; - - XBT_DEBUG("Iterative method configuration snapshot =====>"); - XBT_DEBUG("#### Maximum number of iterations : %d", max_iterations); - XBT_DEBUG("#### Minimum error tolerated : %e", epsilon_min_error); - XBT_DEBUG("#### Minimum error tolerated (dichotomy) : %e", dichotomy_min_error); - - if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) { - print(); - } - - if (not modified_) - return; - - /* Initialize lambda. */ - for (Constraint& cnst : active_constraint_set) { - cnst.lambda_ = 1.0; - cnst.new_lambda_ = 2.0; - XBT_DEBUG("#### cnst(%p)->lambda : %e", &cnst, cnst.lambda_); - } - - /* - * Initialize the active variables. Initialize mu. - */ - for (Variable& var : variable_set) { - if (not var.sharing_weight_) - var.value_ = 0.0; - else { - if (var.bound_ < 0.0) { - XBT_DEBUG("#### NOTE var(%p) is a boundless variable", &var); - var.mu_ = -1.0; - } else { - var.mu_ = 1.0; - var.new_mu_ = 2.0; - } - var.value_ = new_value(var); - XBT_DEBUG("#### var(%p) ->weight : %e", &var, var.sharing_weight_); - XBT_DEBUG("#### var(%p) ->mu : %e", &var, var.mu_); - XBT_DEBUG("#### var(%p) ->weight: %e", &var, var.sharing_weight_); - XBT_DEBUG("#### var(%p) ->bound: %e", &var, var.bound_); - auto weighted = std::find_if(begin(var.cnsts_), end(var.cnsts_), - [](Element const& x) { return x.consumption_weight != 0.0; }); - if (weighted == end(var.cnsts_)) - var.value_ = 1.0; - } - } - - /* Compute dual objective. */ - double obj = dual_objective(); - - /* While doesn't reach a minimum error or a number maximum of iterations. */ - int iteration = 0; - while (overall_modification > epsilon_min_error && iteration < max_iterations) { - iteration++; - XBT_DEBUG("************** ITERATION %d **************", iteration); - XBT_DEBUG("-------------- Gradient Descent ----------"); - - /* Improve the value of mu_i */ - for (Variable& var : variable_set) { - if (var.sharing_weight_ && var.bound_ >= 0) { - XBT_DEBUG("Working on var (%p)", &var); - var.new_mu_ = new_mu(var); - XBT_DEBUG("Updating mu : var->mu (%p) : %1.20f -> %1.20f", &var, var.mu_, var.new_mu_); - var.mu_ = var.new_mu_; - - double new_obj = dual_objective(); - XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj); - xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj); - obj = new_obj; - } - } - - /* Improve the value of lambda_i */ - for (Constraint& cnst : active_constraint_set) { - XBT_DEBUG("Working on cnst (%p)", &cnst); - cnst.new_lambda_ = dichotomy(cnst.lambda_, cnst, dichotomy_min_error); - XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", &cnst, cnst.lambda_, cnst.new_lambda_); - cnst.lambda_ = cnst.new_lambda_; - - double new_obj = dual_objective(); - XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj); - xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj); - obj = new_obj; - } - - /* Now computes the values of each variable (@rho) based on the values of @lambda and @mu. */ - XBT_DEBUG("-------------- Check convergence ----------"); - overall_modification = 0; - for (Variable& var : variable_set) { - if (var.sharing_weight_ <= 0) - var.value_ = 0.0; - else { - double tmp = new_value(var); - - overall_modification = std::max(overall_modification, fabs(var.value_ - tmp)); - - var.value_ = tmp; - XBT_DEBUG("New value of var (%p) = %e, overall_modification = %e", &var, var.value_, overall_modification); - } - } - - XBT_DEBUG("-------------- Check feasability ----------"); - if (not check_feasible(false)) - overall_modification = 1.0; - XBT_DEBUG("Iteration %d: overall_modification : %f", iteration, overall_modification); - } - - check_feasible(true); - - if (overall_modification <= epsilon_min_error) { - XBT_DEBUG("The method converges in %d iterations.", iteration); - } - if (iteration >= max_iterations) { - XBT_DEBUG("Method reach %d iterations, which is the maximum number of iterations allowed.", iteration); - } - - if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) { - print(); - } -} - -/* - * Returns a double value corresponding to the result of a dichotomy process with respect to a given - * variable/constraint (@mu in the case of a variable or @lambda in case of a constraint) and a initial value init. - * - * @param init initial value for @mu or @lambda - * @param diff a function that computes the differential of with respect a @mu or @lambda - * @param var_cnst a pointer to a variable or constraint - * @param min_erro a minimum error tolerated - * - * @return a double corresponding to the result of the dichotomy process - */ -double Lagrange::dichotomy(double init, const Constraint& cnst, double min_error) -{ - double min = init; - double max = init; - double overall_error; - double middle; - double middle_diff; - double diff_0 = 0.0; - - XBT_IN(); - - if (fabs(init) < 1e-20) { - min = 0.5; - max = 0.5; - } - - overall_error = 1; - - diff_0 = partial_diff_lambda(1e-16, cnst); - if (diff_0 >= 0) { - XBT_CDEBUG(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)", diff_0); - XBT_OUT(); - return 0.0; - } - - double min_diff = partial_diff_lambda(min, cnst); - double max_diff = partial_diff_lambda(max, cnst); - - while (overall_error > min_error) { - XBT_CDEBUG(surf_lagrange_dichotomy, "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f", min, max, - min_diff, max_diff); - - if (min_diff > 0 && max_diff > 0) { - if (min == max) { - XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing min"); - min = min / 2.0; - min_diff = partial_diff_lambda(min, cnst); - } else { - XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max"); - max = min; - max_diff = min_diff; - } - } else if (min_diff < 0 && max_diff < 0) { - if (min == max) { - XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing max"); - max = max * 2.0; - max_diff = partial_diff_lambda(max, cnst); - } else { - XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min"); - min = max; - min_diff = max_diff; - } - } else if (min_diff < 0 && max_diff > 0) { - middle = (max + min) / 2.0; - XBT_CDEBUG(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f", middle); - - if ((fabs(min - middle) < 1e-20) || (fabs(max - middle) < 1e-20)) { - XBT_CWARN(surf_lagrange_dichotomy, - "Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f." - " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).", - min, max - min, min_diff, max_diff); - break; - } - middle_diff = partial_diff_lambda(middle, cnst); - - if (middle_diff < 0) { - XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min"); - min = middle; - overall_error = max_diff - middle_diff; - min_diff = middle_diff; - } else if (middle_diff > 0) { - XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max"); - max = middle; - overall_error = max_diff - middle_diff; - max_diff = middle_diff; - } else { - overall_error = 0; - } - } else if (fabs(min_diff) < 1e-20) { - max = min; - overall_error = 0; - } else if (fabs(max_diff) < 1e-20) { - min = max; - overall_error = 0; - } else if (min_diff > 0 && max_diff < 0) { - XBT_CWARN(surf_lagrange_dichotomy, "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0"); - xbt_abort(); - } else { - XBT_CWARN(surf_lagrange_dichotomy, - "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.", min_diff, - max_diff); - xbt_abort(); - } - } - - XBT_CDEBUG(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0); - XBT_OUT(); - return ((min + max) / 2.0); -} - -double Lagrange::partial_diff_lambda(double lambda, const Constraint& cnst) -{ - double diff = 0.0; - - XBT_IN(); - - XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", &cnst); - - for (Element const& elem : cnst.enabled_element_set_) { - Variable& var = *elem.variable; - xbt_assert(var.sharing_weight_ > 0); - XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", &var); - // Initialize the summation variable - double sigma_i = 0.0; - - // Compute sigma_i - for (Element const& elem2 : var.cnsts_) - sigma_i += elem2.constraint->lambda_; - - // add mu_i if this flow has a RTT constraint associated - if (var.bound_ > 0) - sigma_i += var.mu_; - - // replace value of cnst.lambda by the value of parameter lambda - sigma_i = (sigma_i - cnst.lambda_) + lambda; - - diff += -func_fpi(var, sigma_i); - } - - diff += cnst.bound_; - - XBT_CDEBUG(surf_lagrange_dichotomy, "d D/d lambda for cnst (%p) at %1.20f = %1.20f", &cnst, lambda, diff); - XBT_OUT(); - return diff; -} - -/** @brief Attribute the value bound to var->bound. - * - * @param f function (f) - * @param fp partial differential of f (f prime, (f')) - * @param fpi inverse of the partial differential of f (f prime inverse, (f')^{-1}) - * - * Set default functions to the ones passed as parameters. - */ -void Lagrange::set_default_protocol_function(double (*f)(const Variable& var, double x), - double (*fp)(const Variable& var, double x), - double (*fpi)(const Variable& var, double x)) -{ - func_f = f; - func_fp = fp; - func_fpi = fpi; -} - -double (*Lagrange::func_f)(const Variable&, double); -double (*Lagrange::func_fp)(const Variable&, double); -double (*Lagrange::func_fpi)(const Variable&, double); - -/**************** Vegas and Reno functions *************************/ -/* NOTE for Reno: all functions consider the network coefficient (alpha) equal to 1. */ - -/* - * For Vegas: $f(x) = @alpha D_f@ln(x)$ - * Therefore: $fp(x) = @frac{@alpha D_f}{x}$ - * Therefore: $fpi(x) = @frac{@alpha D_f}{x}$ - */ -double func_vegas_f(const Variable& var, double x) -{ - xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x); - return VEGAS_SCALING * var.sharing_weight_ * log(x); -} - -double func_vegas_fp(const Variable& var, double x) -{ - xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x); - return VEGAS_SCALING * var.sharing_weight_ / x; -} - -double func_vegas_fpi(const Variable& var, double x) -{ - xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x); - return var.sharing_weight_ / (x / VEGAS_SCALING); -} - -/* - * For Reno: $f(x) = @frac{@sqrt{3/2}}{D_f} atan(@sqrt{3/2}D_f x)$ - * Therefore: $fp(x) = @frac{3}{3 D_f^2 x^2+2}$ - * Therefore: $fpi(x) = @sqrt{@frac{1}{{D_f}^2 x} - @frac{2}{3{D_f}^2}}$ - */ -double func_reno_f(const Variable& var, double x) -{ - xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!"); - - return RENO_SCALING * sqrt(3.0 / 2.0) / var.sharing_weight_ * atan(sqrt(3.0 / 2.0) * var.sharing_weight_ * x); -} - -double func_reno_fp(const Variable& var, double x) -{ - return RENO_SCALING * 3.0 / (3.0 * var.sharing_weight_ * var.sharing_weight_ * x * x + 2.0); -} - -double func_reno_fpi(const Variable& var, double x) -{ - double res_fpi; - - xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!"); - xbt_assert(x > 0.0, "Don't call me with stupid values!"); - - res_fpi = 1.0 / (var.sharing_weight_ * var.sharing_weight_ * (x / RENO_SCALING)) - - 2.0 / (3.0 * var.sharing_weight_ * var.sharing_weight_); - if (res_fpi <= 0.0) - return 0.0; - return sqrt(res_fpi); -} - -/* Implementing new Reno-2 - * For Reno-2: $f(x) = U_f(x_f) = @frac{{2}{D_f}}*ln(2+x*D_f)$ - * Therefore: $fp(x) = 2/(Weight*x + 2) - * Therefore: $fpi(x) = (2*Weight)/x - 4 - */ -double func_reno2_f(const Variable& var, double x) -{ - xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!"); - return RENO2_SCALING * (1.0 / var.sharing_weight_) * - log((x * var.sharing_weight_) / (2.0 * x * var.sharing_weight_ + 3.0)); -} - -double func_reno2_fp(const Variable& var, double x) -{ - return RENO2_SCALING * 3.0 / (var.sharing_weight_ * x * (2.0 * var.sharing_weight_ * x + 3.0)); -} - -double func_reno2_fpi(const Variable& var, double x) -{ - xbt_assert(x > 0.0, "Don't call me with stupid values!"); - double tmp = x * var.sharing_weight_ * var.sharing_weight_; - double res_fpi = tmp * (9.0 * x + 24.0); - - if (res_fpi <= 0.0) - return 0.0; - - res_fpi = RENO2_SCALING * (-3.0 * tmp + sqrt(res_fpi)) / (4.0 * tmp); - return res_fpi; -} -} -} -} diff --git a/src/kernel/lmm/maxmin.hpp b/src/kernel/lmm/maxmin.hpp index caa2d0dae8..8f91ecab20 100644 --- a/src/kernel/lmm/maxmin.hpp +++ b/src/kernel/lmm/maxmin.hpp @@ -131,20 +131,6 @@ namespace lmm { /** @{ @ingroup SURF_lmm */ -/** Default functions associated to the chosen protocol. When using the lagrangian approach. */ - -XBT_PUBLIC double func_reno_f(const Variable& var, double x); -XBT_PUBLIC double func_reno_fp(const Variable& var, double x); -XBT_PUBLIC double func_reno_fpi(const Variable& var, double x); - -XBT_PUBLIC double func_reno2_f(const Variable& var, double x); -XBT_PUBLIC double func_reno2_fp(const Variable& var, double x); -XBT_PUBLIC double func_reno2_fpi(const Variable& var, double x); - -XBT_PUBLIC double func_vegas_f(const Variable& var, double x); -XBT_PUBLIC double func_vegas_fp(const Variable& var, double x); -XBT_PUBLIC double func_vegas_fpi(const Variable& var, double x); - /** * @brief LMM element * Elements can be seen as glue between constraint objects and variable objects. @@ -613,40 +599,8 @@ private: void bottleneck_solve(); }; -class XBT_PUBLIC Lagrange : public System { -public: - explicit Lagrange(bool selective_update) : System(selective_update) {} - void solve() final { lagrange_solve(); } - - static void set_default_protocol_function(double (*func_f)(const Variable& var, double x), - double (*func_fp)(const Variable& var, double x), - double (*func_fpi)(const Variable& var, double x)); - -private: - void lagrange_solve(); - - bool check_feasible(bool warn); - double dual_objective(); - - static double (*func_f)(const Variable& var, double x); /* (f) */ - static double (*func_fp)(const Variable& var, double x); /* (f') */ - static double (*func_fpi)(const Variable& var, double x); /* (f')^{-1} */ - - /* - * Local prototypes to implement the Lagrangian optimization with optimal step, also called dichotomy. - */ - // computes the value of the dichotomy using a initial values, init, with a specific variable or constraint - static double dichotomy(double init, const Constraint& cnst, double min_error); - // computes the value of the differential of constraint cnst applied to lambda - static double partial_diff_lambda(double lambda, const Constraint& cnst); - - static double new_value(const Variable& var); - static double new_mu(const Variable& var); -}; - XBT_PUBLIC System* make_new_maxmin_system(bool selective_update); XBT_PUBLIC System* make_new_fair_bottleneck_system(bool selective_update); -XBT_PUBLIC System* make_new_lagrange_system(bool selective_update); /** @} */ } diff --git a/src/surf/network_cm02.cpp b/src/surf/network_cm02.cpp index 3a09e280f6..a876c5334a 100644 --- a/src/surf/network_cm02.cpp +++ b/src/surf/network_cm02.cpp @@ -3,9 +3,6 @@ /* This program is free software; you can redistribute it and/or modify it * under the terms of the license (GNU LGPL) which comes with this package. */ -#include -#include - #include "network_cm02.hpp" #include "simgrid/s4u/Host.hpp" #include "simgrid/sg_config.hpp" @@ -13,6 +10,9 @@ #include "src/surf/surf_interface.hpp" #include "surf/surf.hpp" +#include +#include + XBT_LOG_EXTERNAL_DEFAULT_CATEGORY(surf_network); double sg_latency_factor = 1.0; /* default value; can be set by model or from command line */ @@ -66,59 +66,6 @@ void surf_network_model_init_CM02() surf_network_model = new simgrid::kernel::resource::NetworkCm02Model(); } -/***************************************************************************/ -/* The models from Steven H. Low */ -/***************************************************************************/ -/* @article{Low03, */ -/* author={Steven H. Low}, */ -/* title={A Duality Model of {TCP} and Queue Management Algorithms}, */ -/* year={2003}, */ -/* journal={{IEEE/ACM} Transactions on Networking}, */ -/* volume={11}, number={4}, */ -/* } */ -void surf_network_model_init_Reno() -{ - xbt_assert(surf_network_model == nullptr, "Cannot set the network model twice"); - - namespace lmm = simgrid::kernel::lmm; - lmm::Lagrange::set_default_protocol_function(lmm::func_reno_f, lmm::func_reno_fp, lmm::func_reno_fpi); - - simgrid::config::set_default("network/latency-factor", 13.01); - simgrid::config::set_default("network/bandwidth-factor", 0.97); - simgrid::config::set_default("network/weight-S", 20537); - - surf_network_model = new simgrid::kernel::resource::NetworkCm02Model(&simgrid::kernel::lmm::make_new_lagrange_system); -} - - -void surf_network_model_init_Reno2() -{ - xbt_assert(surf_network_model == nullptr, "Cannot set the network model twice"); - - namespace lmm = simgrid::kernel::lmm; - lmm::Lagrange::set_default_protocol_function(lmm::func_reno2_f, lmm::func_reno2_fp, lmm::func_reno2_fpi); - - simgrid::config::set_default("network/latency-factor", 13.01); - simgrid::config::set_default("network/bandwidth-factor", 0.97); - simgrid::config::set_default("network/weight-S", 20537); - - surf_network_model = new simgrid::kernel::resource::NetworkCm02Model(&simgrid::kernel::lmm::make_new_lagrange_system); -} - -void surf_network_model_init_Vegas() -{ - xbt_assert(surf_network_model == nullptr, "Cannot set the network model twice"); - - namespace lmm = simgrid::kernel::lmm; - lmm::Lagrange::set_default_protocol_function(lmm::func_vegas_f, lmm::func_vegas_fp, lmm::func_vegas_fpi); - - simgrid::config::set_default("network/latency-factor", 13.01); - simgrid::config::set_default("network/bandwidth-factor", 0.97); - simgrid::config::set_default("network/weight-S", 20537); - - surf_network_model = new simgrid::kernel::resource::NetworkCm02Model(&simgrid::kernel::lmm::make_new_lagrange_system); -} - namespace simgrid { namespace kernel { namespace resource { diff --git a/src/surf/surf_interface.cpp b/src/surf/surf_interface.cpp index 1cd5f1e3fc..13fb7de2e7 100644 --- a/src/surf/surf_interface.cpp +++ b/src/surf/surf_interface.cpp @@ -69,15 +69,6 @@ const std::vector surf_network_model_description = { "small messages are thus poorly modeled).", &surf_network_model_init_CM02}, {"NS3", "Network pseudo-model using the NS3 tcp model instead of an analytic model", &surf_network_model_init_NS3}, - {"Reno", - "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).", - &surf_network_model_init_Reno}, - {"Reno2", - "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).", - &surf_network_model_init_Reno2}, - {"Vegas", - "Model from Steven H. Low using lagrange_solve instead of lmm_solve (experts only; check the code for more info).", - &surf_network_model_init_Vegas}, }; #if ! HAVE_SMPI diff --git a/src/surf/surf_interface.hpp b/src/surf/surf_interface.hpp index cd88f89f99..cdc5db0f62 100644 --- a/src/surf/surf_interface.hpp +++ b/src/surf/surf_interface.hpp @@ -133,45 +133,6 @@ XBT_ATTRIB_NORETURN #endif XBT_PUBLIC void surf_network_model_init_NS3(); -/** @ingroup SURF_models - * @brief Initializes the platform with the network model Reno - * - * The problem is related to max( sum( arctan(C * Df * xi) ) ). - * - * Reference: - * [LOW03] S. H. Low. A duality model of TCP and queue management algorithms. - * IEEE/ACM Transaction on Networking, 11(4):525-536, 2003. - * - * Call this function only if you plan using surf_host_model_init_compound. - */ -XBT_PUBLIC void surf_network_model_init_Reno(); - -/** @ingroup SURF_models - * @brief Initializes the platform with the network model Reno2 - * - * The problem is related to max( sum( arctan(C * Df * xi) ) ). - * - * Reference: - * [LOW01] S. H. Low. A duality model of TCP and queue management algorithms. - * IEEE/ACM Transaction on Networking, 11(4):525-536, 2003. - * - * Call this function only if you plan using surf_host_model_init_compound. - */ -XBT_PUBLIC void surf_network_model_init_Reno2(); - -/** @ingroup SURF_models - * @brief Initializes the platform with the network model Vegas - * - * This problem is related to max( sum( a * Df * ln(xi) ) ) which is equivalent to the proportional fairness. - * - * Reference: - * [LOW03] S. H. Low. A duality model of TCP and queue management algorithms. - * IEEE/ACM Transaction on Networking, 11(4):525-536, 2003. - * - * Call this function only if you plan using surf_host_model_init_compound. - */ -XBT_PUBLIC void surf_network_model_init_Vegas(); - /** @ingroup SURF_models * @brief Initializes the platform with the current best network and cpu models at hand * diff --git a/teshsuite/msg/app-pingpong/app-pingpong.tesh b/teshsuite/msg/app-pingpong/app-pingpong.tesh index 46a4387080..29a193feda 100644 --- a/teshsuite/msg/app-pingpong/app-pingpong.tesh +++ b/teshsuite/msg/app-pingpong/app-pingpong.tesh @@ -39,51 +39,6 @@ $ ${bindir:=.}/app-pingpong ${platfdir}/small_platform.xml app-pingpong_d.xml -- > [145.639041] (1:pinger@Tremblay) Pong time (bandwidth bound): 145.638 > [145.639041] (0:maestro@) Total simulation time: 145.639 -p Testing the surf network Reno fairness model using lagrangian approach - -$ ${bindir:=.}/app-pingpong ${platfdir}/small_platform.xml app-pingpong_d.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Reno" --log=surf_lagrange.thres=critical "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Reno' -> [ 0.000000] (1:pinger@Tremblay) Ping -> Jupiter -> [ 0.000000] (2:ponger@Jupiter) Pong -> Tremblay -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - -p Testing the surf network Reno2 fairness model using lagrangian approach - -$ ${bindir:=.}/app-pingpong ${platfdir}/small_platform.xml app-pingpong_d.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Reno2" --log=surf_lagrange.thres=critical "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Reno2' -> [ 0.000000] (1:pinger@Tremblay) Ping -> Jupiter -> [ 0.000000] (2:ponger@Jupiter) Pong -> Tremblay -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - -p Testing the surf network Vegas fairness model using lagrangian approach - -$ ${bindir:=.}/app-pingpong ${platfdir}/small_platform.xml app-pingpong_d.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Vegas" "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" -> [ 0.000000] (0:maestro@) Configuration change: Set 'host/model' to 'compound' -> [ 0.000000] (0:maestro@) Configuration change: Set 'cpu/model' to 'Cas01' -> [ 0.000000] (0:maestro@) Configuration change: Set 'network/model' to 'Vegas' -> [ 0.000000] (1:pinger@Tremblay) Ping -> Jupiter -> [ 0.000000] (2:ponger@Jupiter) Pong -> Tremblay -> [ 0.019014] (2:ponger@Jupiter) Task received : small communication (latency bound) -> [ 0.019014] (2:ponger@Jupiter) Ping time (latency bound) 0.019014 -> [ 0.019014] (2:ponger@Jupiter) task_bw->data = 0.019 -> [150.178356] (1:pinger@Tremblay) Task received : large communication (bandwidth bound) -> [150.178356] (1:pinger@Tremblay) Pong time (bandwidth bound): 150.159 -> [150.178356] (0:maestro@) Total simulation time: 150.178 - p Testing the surf network constant model $ ${bindir:=.}/app-pingpong ${platfdir}/small_platform_constant.xml app-pingpong_d.xml "--cfg=host/model:compound cpu/model:Cas01 network/model:Constant" "--log=root.fmt:[%10.6r]%e(%i:%P@%h)%e%m%n" diff --git a/teshsuite/simdag/basic-parsing-test/basic-parsing-test.tesh b/teshsuite/simdag/basic-parsing-test/basic-parsing-test.tesh index 099c46b3f6..4519613d7f 100644 --- a/teshsuite/simdag/basic-parsing-test/basic-parsing-test.tesh +++ b/teshsuite/simdag/basic-parsing-test/basic-parsing-test.tesh @@ -43,7 +43,6 @@ $ ${bindir:=.}/basic-parsing-test ../platforms/properties.xml > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'cpu/optim' to 'TI' > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'host/model' to 'compound' > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '0.000010' -> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'Vegas' > Workstation number: 1, link number: 1 $ ${bindir:=.}/basic-parsing-test ../platforms/properties.xml --cfg=cpu/optim:TI @@ -51,6 +50,5 @@ $ ${bindir:=.}/basic-parsing-test ../platforms/properties.xml --cfg=cpu/optim:TI > [0.000000] [surf_parse/INFO] The custom configuration 'cpu/optim' is already defined by user! > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'host/model' to 'compound' > [0.000000] [xbt_cfg/INFO] Configuration change: Set 'maxmin/precision' to '0.000010' -> [0.000000] [xbt_cfg/INFO] Configuration change: Set 'network/model' to 'Vegas' > Workstation number: 1, link number: 1 diff --git a/teshsuite/simdag/platforms/properties.xml b/teshsuite/simdag/platforms/properties.xml index d52a0edfc2..50de5a4bf7 100644 --- a/teshsuite/simdag/platforms/properties.xml +++ b/teshsuite/simdag/platforms/properties.xml @@ -5,7 +5,6 @@ - diff --git a/teshsuite/surf/lmm_usage/lmm_usage.cpp b/teshsuite/surf/lmm_usage/lmm_usage.cpp index 7c7c8c6fd9..95644ca0d8 100644 --- a/teshsuite/surf/lmm_usage/lmm_usage.cpp +++ b/teshsuite/surf/lmm_usage/lmm_usage.cpp @@ -25,20 +25,9 @@ namespace lmm = simgrid::kernel::lmm; /* ==l1== L2 ==L3== */ /* ------ */ -enum method_t { MAXMIN, LAGRANGE_RENO, LAGRANGE_VEGAS }; - -static lmm::System* new_system(method_t method) +static lmm::System* new_system() { - /* selective update would need real actions instead of NULL as a first parameter to the variable constructor */ - switch (method) { - case MAXMIN: - return lmm::make_new_maxmin_system(false); - case LAGRANGE_VEGAS: - case LAGRANGE_RENO: - return lmm::make_new_lagrange_system(false); - default: - xbt_die("Invalid method"); - } + return lmm::make_new_maxmin_system(false); } double a_test_1 = 0; @@ -99,17 +88,12 @@ static double dichotomy(double min, double max, double min_error) return ((min + max) / 2.0); } -static void test1(method_t method) +static void test1() { double a = 1.0; double b = 10.0; - if (method == LAGRANGE_VEGAS) - lmm::Lagrange::set_default_protocol_function(lmm::func_vegas_f, lmm::func_vegas_fp, lmm::func_vegas_fpi); - else if (method == LAGRANGE_RENO) - lmm::Lagrange::set_default_protocol_function(lmm::func_reno_f, lmm::func_reno_fp, lmm::func_reno_fpi); - - lmm::System* Sys = new_system(method); + lmm::System* Sys = new_system(); lmm::Constraint* L1 = Sys->constraint_new(nullptr, a); lmm::Constraint* L2 = Sys->constraint_new(nullptr, b); lmm::Constraint* L3 = Sys->constraint_new(nullptr, a); @@ -132,49 +116,7 @@ static void test1(method_t method) Sys->expand(L2, R_2, 1.0); Sys->expand(L3, R_3, 1.0); - if (method == MAXMIN) { - Sys->solve(); - } else { - double x; - if (method == LAGRANGE_VEGAS) { - x = 3 * a / 4 - 3 * b / 8 + sqrt(9 * b * b + 4 * a * a - 4 * a * b) / 8; - /* Computed with mupad and D_f=1.0 */ - if (x > a) { - x = a; - } - if (x < 0) { - x = 0; - } - } else if (method == LAGRANGE_RENO) { - a_test_1 = a; - b_test_1 = b; - x = dichotomy(0, a, 1e-13); - - if (x < 0) - x = 0; - if (x > a) - x = a; - } else { - xbt_die( "Invalid method"); - } - - Sys->solve(); - - double max_deviation = 0.0; - max_deviation = std::max(max_deviation, fabs(R_1->get_value() - x)); - max_deviation = std::max(max_deviation, fabs(R_3->get_value() - x)); - max_deviation = std::max(max_deviation, fabs(R_2->get_value() - (b - a + x))); - max_deviation = std::max(max_deviation, fabs(R_1_2_3->get_value() - (a - x))); - - if (max_deviation > 0.00001) { // Legacy value used in lagrange.c - XBT_WARN("Max Deviation from optimal solution : %g", max_deviation); - XBT_WARN("Found x = %1.20f", x); - XBT_WARN("Deviation from optimal solution (R_1 = %g): %1.20f", x, R_1->get_value() - x); - XBT_WARN("Deviation from optimal solution (R_2 = %g): %1.20f", b - a + x, R_2->get_value() - (b - a + x)); - XBT_WARN("Deviation from optimal solution (R_3 = %g): %1.20f", x, R_3->get_value() - x); - XBT_WARN("Deviation from optimal solution (R_1_2_3 = %g): %1.20f", a - x, R_1_2_3->get_value() - (a - x)); - } - } + Sys->solve(); PRINT_VAR(R_1_2_3); PRINT_VAR(R_1); @@ -188,14 +130,9 @@ static void test1(method_t method) delete Sys; } -static void test2(method_t method) +static void test2() { - if (method == LAGRANGE_VEGAS) - lmm::Lagrange::set_default_protocol_function(lmm::func_vegas_f, lmm::func_vegas_fp, lmm::func_vegas_fpi); - if (method == LAGRANGE_RENO) - lmm::Lagrange::set_default_protocol_function(lmm::func_reno_f, lmm::func_reno_fp, lmm::func_reno_fpi); - - lmm::System* Sys = new_system(method); + lmm::System* Sys = new_system(); lmm::Constraint* CPU1 = Sys->constraint_new(nullptr, 200.0); lmm::Constraint* CPU2 = Sys->constraint_new(nullptr, 100.0); @@ -219,7 +156,7 @@ static void test2(method_t method) delete Sys; } -static void test3(method_t method) +static void test3() { int flows = 11; int links = 10; @@ -256,12 +193,7 @@ static void test3(method_t method) A[13][14] = 1.0; A[14][15] = 1.0; - if (method == LAGRANGE_VEGAS) - lmm::Lagrange::set_default_protocol_function(lmm::func_vegas_f, lmm::func_vegas_fp, lmm::func_vegas_fpi); - if (method == LAGRANGE_RENO) - lmm::Lagrange::set_default_protocol_function(lmm::func_reno_f, lmm::func_reno_fp, lmm::func_reno_fpi); - - lmm::System* Sys = new_system(method); + lmm::System* Sys = new_system(); /* Creates the constraints */ lmm::Constraint** tmp_cnst = new lmm::Constraint*[15]; @@ -299,26 +231,14 @@ static void test3(method_t method) int main(int argc, char** argv) { MSG_init(&argc, argv); - XBT_INFO("***** Test 1 (Max-Min)"); - test1(MAXMIN); - XBT_INFO("***** Test 1 (Lagrange - Vegas)"); - test1(LAGRANGE_VEGAS); - XBT_INFO("***** Test 1 (Lagrange - Reno)"); - test1(LAGRANGE_RENO); - - XBT_INFO("***** Test 2 (Max-Min)"); - test2(MAXMIN); - XBT_INFO("***** Test 2 (Lagrange - Vegas)"); - test2(LAGRANGE_VEGAS); - XBT_INFO("***** Test 2 (Lagrange - Reno)"); - test2(LAGRANGE_RENO); - - XBT_INFO("***** Test 3 (Max-Min)"); - test3(MAXMIN); - XBT_INFO("***** Test 3 (Lagrange - Vegas)"); - test3(LAGRANGE_VEGAS); - XBT_INFO("***** Test 3 (Lagrange - Reno)"); - test3(LAGRANGE_RENO); + XBT_INFO("***** Test 1"); + test1(); + + XBT_INFO("***** Test 2"); + test2(); + + XBT_INFO("***** Test 3"); + test3(); return 0; } diff --git a/teshsuite/surf/lmm_usage/lmm_usage.tesh b/teshsuite/surf/lmm_usage/lmm_usage.tesh index d8f68180a8..1ab150d6b2 100644 --- a/teshsuite/surf/lmm_usage/lmm_usage.tesh +++ b/teshsuite/surf/lmm_usage/lmm_usage.tesh @@ -1,12 +1,6 @@ #!/usr/bin/env tesh $ ${bindir:=.}/lmm_usage -> [0.000000] [surf_test/INFO] ***** Test 1 (Max-Min) -> [0.000000] [surf_test/INFO] ***** Test 1 (Lagrange - Vegas) -> [0.000000] [surf_test/INFO] ***** Test 1 (Lagrange - Reno) -> [0.000000] [surf_test/INFO] ***** Test 2 (Max-Min) -> [0.000000] [surf_test/INFO] ***** Test 2 (Lagrange - Vegas) -> [0.000000] [surf_test/INFO] ***** Test 2 (Lagrange - Reno) -> [0.000000] [surf_test/INFO] ***** Test 3 (Max-Min) -> [0.000000] [surf_test/INFO] ***** Test 3 (Lagrange - Vegas) -> [0.000000] [surf_test/INFO] ***** Test 3 (Lagrange - Reno) +> [0.000000] [surf_test/INFO] ***** Test 1 +> [0.000000] [surf_test/INFO] ***** Test 2 +> [0.000000] [surf_test/INFO] ***** Test 3 diff --git a/tools/cmake/DefinePackages.cmake b/tools/cmake/DefinePackages.cmake index 08015db27f..d8fe6cca94 100644 --- a/tools/cmake/DefinePackages.cmake +++ b/tools/cmake/DefinePackages.cmake @@ -305,7 +305,6 @@ set(NS3_SRC src/surf/network_ns3.cpp set(SURF_SRC src/kernel/lmm/fair_bottleneck.cpp - src/kernel/lmm/lagrange.cpp src/kernel/lmm/maxmin.hpp src/kernel/lmm/maxmin.cpp -- 2.20.1