1 /* Copyright (c) 2007-2019. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
7 * Modeling the proportional fairness using the Lagrangian Optimization Approach. For a detailed description see:
8 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
10 #include "src/kernel/lmm/maxmin.hpp"
11 #include "src/surf/surf_interface.hpp"
13 #include "xbt/sysdep.h"
19 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf, "Logging specific to SURF (lagrange)");
20 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf_lagrange, "Logging specific to SURF (lagrange dichotomy)");
22 static constexpr double VEGAS_SCALING = 1000.0;
23 static constexpr double RENO_SCALING = 1.0;
24 static constexpr double RENO2_SCALING = 1.0;
30 System* make_new_lagrange_system(bool selective_update)
32 return new Lagrange(selective_update);
35 bool Lagrange::check_feasible(bool warn)
37 for (Constraint const& cnst : active_constraint_set) {
39 for (Element const& elem : cnst.enabled_element_set_) {
40 Variable* var = elem.variable;
41 xbt_assert(var->sharing_weight_ > 0);
45 if (double_positive(tmp - cnst.bound_, sg_maxmin_precision)) {
47 XBT_WARN("The link (%p) is over-used. Expected less than %f and got %f", &cnst, cnst.bound_, tmp);
50 XBT_DEBUG("Checking feasability for constraint (%p): sat = %f, lambda = %f ", &cnst, tmp - cnst.bound_,
54 for (Variable const& var : variable_set) {
55 if (not var.sharing_weight_)
59 XBT_DEBUG("Checking feasability for variable (%p): sat = %f mu = %f", &var, var.value_ - var.bound_, var.mu_);
61 if (double_positive(var.value_ - var.bound_, sg_maxmin_precision)) {
63 XBT_WARN("The variable (%p) is too large. Expected less than %f and got %f", &var, var.bound_, var.value_);
70 double Lagrange::new_value(const Variable& var)
74 for (Element const& elem : var.cnsts_) {
75 tmp += elem.constraint->lambda_;
79 XBT_DEBUG("\t Working on var (%p). cost = %e; Weight = %e", &var, tmp, var.sharing_weight_);
80 // uses the partial differential inverse function
81 return func_fpi(var, tmp);
84 double Lagrange::new_mu(const Variable& var)
89 for (Element const& elem : var.cnsts_) {
90 sigma_i += elem.constraint->lambda_;
92 mu_i = func_fp(var, var.bound_) - sigma_i;
98 double Lagrange::dual_objective()
102 for (Variable const& var : variable_set) {
103 double sigma_i = 0.0;
105 if (not var.sharing_weight_)
108 for (Element const& elem : var.cnsts_)
109 sigma_i += elem.constraint->lambda_;
114 XBT_DEBUG("var %p : sigma_i = %1.20f", &var, sigma_i);
116 obj += func_f(var, func_fpi(var, sigma_i)) - sigma_i * func_fpi(var, sigma_i);
119 obj += var.mu_ * var.bound_;
122 for (Constraint const& cnst : active_constraint_set)
123 obj += cnst.lambda_ * cnst.bound_;
128 // solves the proportional fairness using a Lagrangian optimization with dichotomy step
129 void Lagrange::lagrange_solve()
131 /* Lagrange Variables. */
132 int max_iterations = 100;
133 double epsilon_min_error = 0.00001; /* this is the precision on the objective function so it's none of the
134 configurable values and this value is the legacy one */
135 double dichotomy_min_error = 1e-14;
136 double overall_modification = 1;
138 XBT_DEBUG("Iterative method configuration snapshot =====>");
139 XBT_DEBUG("#### Maximum number of iterations : %d", max_iterations);
140 XBT_DEBUG("#### Minimum error tolerated : %e", epsilon_min_error);
141 XBT_DEBUG("#### Minimum error tolerated (dichotomy) : %e", dichotomy_min_error);
143 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
150 /* Initialize lambda. */
151 for (Constraint& cnst : active_constraint_set) {
153 cnst.new_lambda_ = 2.0;
154 XBT_DEBUG("#### cnst(%p)->lambda : %e", &cnst, cnst.lambda_);
158 * Initialize the active variables. Initialize mu.
160 for (Variable& var : variable_set) {
161 if (not var.sharing_weight_)
164 if (var.bound_ < 0.0) {
165 XBT_DEBUG("#### NOTE var(%p) is a boundless variable", &var);
171 var.value_ = new_value(var);
172 XBT_DEBUG("#### var(%p) ->weight : %e", &var, var.sharing_weight_);
173 XBT_DEBUG("#### var(%p) ->mu : %e", &var, var.mu_);
174 XBT_DEBUG("#### var(%p) ->weight: %e", &var, var.sharing_weight_);
175 XBT_DEBUG("#### var(%p) ->bound: %e", &var, var.bound_);
176 auto weighted = std::find_if(begin(var.cnsts_), end(var.cnsts_),
177 [](Element const& x) { return x.consumption_weight != 0.0; });
178 if (weighted == end(var.cnsts_))
183 /* Compute dual objective. */
184 double obj = dual_objective();
186 /* While doesn't reach a minimum error or a number maximum of iterations. */
188 while (overall_modification > epsilon_min_error && iteration < max_iterations) {
190 XBT_DEBUG("************** ITERATION %d **************", iteration);
191 XBT_DEBUG("-------------- Gradient Descent ----------");
193 /* Improve the value of mu_i */
194 for (Variable& var : variable_set) {
195 if (var.sharing_weight_ && var.bound_ >= 0) {
196 XBT_DEBUG("Working on var (%p)", &var);
197 var.new_mu_ = new_mu(var);
198 XBT_DEBUG("Updating mu : var->mu (%p) : %1.20f -> %1.20f", &var, var.mu_, var.new_mu_);
199 var.mu_ = var.new_mu_;
201 double new_obj = dual_objective();
202 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
203 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
208 /* Improve the value of lambda_i */
209 for (Constraint& cnst : active_constraint_set) {
210 XBT_DEBUG("Working on cnst (%p)", &cnst);
211 cnst.new_lambda_ = dichotomy(cnst.lambda_, cnst, dichotomy_min_error);
212 XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", &cnst, cnst.lambda_, cnst.new_lambda_);
213 cnst.lambda_ = cnst.new_lambda_;
215 double new_obj = dual_objective();
216 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
217 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
221 /* Now computes the values of each variable (@rho) based on the values of @lambda and @mu. */
222 XBT_DEBUG("-------------- Check convergence ----------");
223 overall_modification = 0;
224 for (Variable& var : variable_set) {
225 if (var.sharing_weight_ <= 0)
228 double tmp = new_value(var);
230 overall_modification = std::max(overall_modification, fabs(var.value_ - tmp));
233 XBT_DEBUG("New value of var (%p) = %e, overall_modification = %e", &var, var.value_, overall_modification);
237 XBT_DEBUG("-------------- Check feasability ----------");
238 if (not check_feasible(false))
239 overall_modification = 1.0;
240 XBT_DEBUG("Iteration %d: overall_modification : %f", iteration, overall_modification);
243 check_feasible(true);
245 if (overall_modification <= epsilon_min_error) {
246 XBT_DEBUG("The method converges in %d iterations.", iteration);
248 if (iteration >= max_iterations) {
249 XBT_DEBUG("Method reach %d iterations, which is the maximum number of iterations allowed.", iteration);
252 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
258 * Returns a double value corresponding to the result of a dichotomy process with respect to a given
259 * variable/constraint (@mu in the case of a variable or @lambda in case of a constraint) and a initial value init.
261 * @param init initial value for @mu or @lambda
262 * @param diff a function that computes the differential of with respect a @mu or @lambda
263 * @param var_cnst a pointer to a variable or constraint
264 * @param min_erro a minimum error tolerated
266 * @return a double corresponding to the result of the dichotomy process
268 double Lagrange::dichotomy(double init, const Constraint& cnst, double min_error)
272 double overall_error;
279 if (fabs(init) < 1e-20) {
286 diff_0 = partial_diff_lambda(1e-16, cnst);
288 XBT_CDEBUG(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)", diff_0);
293 double min_diff = partial_diff_lambda(min, cnst);
294 double max_diff = partial_diff_lambda(max, cnst);
296 while (overall_error > min_error) {
297 XBT_CDEBUG(surf_lagrange_dichotomy, "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f", min, max,
300 if (min_diff > 0 && max_diff > 0) {
302 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing min");
304 min_diff = partial_diff_lambda(min, cnst);
306 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
310 } else if (min_diff < 0 && max_diff < 0) {
312 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing max");
314 max_diff = partial_diff_lambda(max, cnst);
316 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
320 } else if (min_diff < 0 && max_diff > 0) {
321 middle = (max + min) / 2.0;
322 XBT_CDEBUG(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f", middle);
324 if ((fabs(min - middle) < 1e-20) || (fabs(max - middle) < 1e-20)) {
325 XBT_CWARN(surf_lagrange_dichotomy,
326 "Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f."
327 " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).",
328 min, max - min, min_diff, max_diff);
331 middle_diff = partial_diff_lambda(middle, cnst);
333 if (middle_diff < 0) {
334 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
336 overall_error = max_diff - middle_diff;
337 min_diff = middle_diff;
338 } else if (middle_diff > 0) {
339 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
341 overall_error = max_diff - middle_diff;
342 max_diff = middle_diff;
346 } else if (fabs(min_diff) < 1e-20) {
349 } else if (fabs(max_diff) < 1e-20) {
352 } else if (min_diff > 0 && max_diff < 0) {
353 XBT_CWARN(surf_lagrange_dichotomy, "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
356 XBT_CWARN(surf_lagrange_dichotomy,
357 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.", min_diff,
363 XBT_CDEBUG(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
365 return ((min + max) / 2.0);
368 double Lagrange::partial_diff_lambda(double lambda, const Constraint& cnst)
374 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", &cnst);
376 for (Element const& elem : cnst.enabled_element_set_) {
377 Variable& var = *elem.variable;
378 xbt_assert(var.sharing_weight_ > 0);
379 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", &var);
380 // Initialize the summation variable
381 double sigma_i = 0.0;
384 for (Element const& elem2 : var.cnsts_)
385 sigma_i += elem2.constraint->lambda_;
387 // add mu_i if this flow has a RTT constraint associated
391 // replace value of cnst.lambda by the value of parameter lambda
392 sigma_i = (sigma_i - cnst.lambda_) + lambda;
394 diff += -func_fpi(var, sigma_i);
399 XBT_CDEBUG(surf_lagrange_dichotomy, "d D/d lambda for cnst (%p) at %1.20f = %1.20f", &cnst, lambda, diff);
404 /** @brief Attribute the value bound to var->bound.
406 * @param f function (f)
407 * @param fp partial differential of f (f prime, (f'))
408 * @param fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
410 * Set default functions to the ones passed as parameters.
412 void Lagrange::set_default_protocol_function(double (*f)(const Variable& var, double x),
413 double (*fp)(const Variable& var, double x),
414 double (*fpi)(const Variable& var, double x))
421 double (*Lagrange::func_f)(const Variable&, double);
422 double (*Lagrange::func_fp)(const Variable&, double);
423 double (*Lagrange::func_fpi)(const Variable&, double);
425 /**************** Vegas and Reno functions *************************/
426 /* NOTE for Reno: all functions consider the network coefficient (alpha) equal to 1. */
429 * For Vegas: $f(x) = @alpha D_f@ln(x)$
430 * Therefore: $fp(x) = @frac{@alpha D_f}{x}$
431 * Therefore: $fpi(x) = @frac{@alpha D_f}{x}$
433 double func_vegas_f(const Variable& var, double x)
435 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
436 return VEGAS_SCALING * var.sharing_weight_ * log(x);
439 double func_vegas_fp(const Variable& var, double x)
441 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
442 return VEGAS_SCALING * var.sharing_weight_ / x;
445 double func_vegas_fpi(const Variable& var, double x)
447 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
448 return var.sharing_weight_ / (x / VEGAS_SCALING);
452 * For Reno: $f(x) = @frac{@sqrt{3/2}}{D_f} atan(@sqrt{3/2}D_f x)$
453 * Therefore: $fp(x) = @frac{3}{3 D_f^2 x^2+2}$
454 * Therefore: $fpi(x) = @sqrt{@frac{1}{{D_f}^2 x} - @frac{2}{3{D_f}^2}}$
456 double func_reno_f(const Variable& var, double x)
458 xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!");
460 return RENO_SCALING * sqrt(3.0 / 2.0) / var.sharing_weight_ * atan(sqrt(3.0 / 2.0) * var.sharing_weight_ * x);
463 double func_reno_fp(const Variable& var, double x)
465 return RENO_SCALING * 3.0 / (3.0 * var.sharing_weight_ * var.sharing_weight_ * x * x + 2.0);
468 double func_reno_fpi(const Variable& var, double x)
472 xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!");
473 xbt_assert(x > 0.0, "Don't call me with stupid values!");
475 res_fpi = 1.0 / (var.sharing_weight_ * var.sharing_weight_ * (x / RENO_SCALING)) -
476 2.0 / (3.0 * var.sharing_weight_ * var.sharing_weight_);
479 return sqrt(res_fpi);
482 /* Implementing new Reno-2
483 * For Reno-2: $f(x) = U_f(x_f) = @frac{{2}{D_f}}*ln(2+x*D_f)$
484 * Therefore: $fp(x) = 2/(Weight*x + 2)
485 * Therefore: $fpi(x) = (2*Weight)/x - 4
487 double func_reno2_f(const Variable& var, double x)
489 xbt_assert(var.sharing_weight_ > 0.0, "Don't call me with stupid values!");
490 return RENO2_SCALING * (1.0 / var.sharing_weight_) *
491 log((x * var.sharing_weight_) / (2.0 * x * var.sharing_weight_ + 3.0));
494 double func_reno2_fp(const Variable& var, double x)
496 return RENO2_SCALING * 3.0 / (var.sharing_weight_ * x * (2.0 * var.sharing_weight_ * x + 3.0));
499 double func_reno2_fpi(const Variable& var, double x)
501 xbt_assert(x > 0.0, "Don't call me with stupid values!");
502 double tmp = x * var.sharing_weight_ * var.sharing_weight_;
503 double res_fpi = tmp * (9.0 * x + 24.0);
508 res_fpi = RENO2_SCALING * (-3.0 * tmp + sqrt(res_fpi)) / (4.0 * tmp);