1 /* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
7 * Modeling the proportional fairness using the Lagrangian Optimization Approach. For a detailed description see:
8 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
10 #include "src/kernel/lmm/maxmin.hpp"
12 #include "xbt/sysdep.h"
20 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf, "Logging specific to SURF (lagrange)");
21 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf_lagrange, "Logging specific to SURF (lagrange dichotomy)");
23 #define SHOW_EXPR(expr) XBT_CDEBUG(surf_lagrange, #expr " = %g", expr);
24 #define VEGAS_SCALING 1000.0
25 #define RENO_SCALING 1.0
26 #define RENO2_SCALING 1.0
32 double (*func_f_def)(const s_lmm_variable_t&, double);
33 double (*func_fp_def)(const s_lmm_variable_t&, double);
34 double (*func_fpi_def)(const s_lmm_variable_t&, double);
37 * Local prototypes to implement the Lagrangian optimization with optimal step, also called dichotomy.
39 // solves the proportional fairness using a Lagrangian optimization with dichotomy step
40 void lagrange_solve(lmm_system_t sys);
41 // computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
42 static double dichotomy(double init, double diff(double, const s_lmm_constraint_t&), const s_lmm_constraint_t& cnst,
44 // computes the value of the differential of constraint cnst applied to lambda
45 static double partial_diff_lambda(double lambda, const s_lmm_constraint_t& cnst);
47 template <class CnstList>
48 static int __check_feasible(const CnstList& cnst_list, xbt_swag_t var_list, int warn)
52 const_xbt_swag_t elem_list = nullptr;
53 lmm_element_t elem = nullptr;
54 lmm_variable_t var = nullptr;
56 for (s_lmm_constraint_t const& cnst : cnst_list) {
58 elem_list = &cnst.enabled_element_set;
59 xbt_swag_foreach(_elem, elem_list)
61 elem = static_cast<lmm_element_t>(_elem);
63 xbt_assert(var->sharing_weight > 0);
67 if (double_positive(tmp - cnst.bound, sg_maxmin_precision)) {
69 XBT_WARN("The link (%p) is over-used. Expected less than %f and got %f", &cnst, cnst.bound, tmp);
72 XBT_DEBUG("Checking feasability for constraint (%p): sat = %f, lambda = %f ", &cnst, tmp - cnst.bound, cnst.lambda);
75 xbt_swag_foreach(_var, var_list)
77 var = static_cast<lmm_variable_t>(_var);
78 if (not var->sharing_weight)
82 XBT_DEBUG("Checking feasability for variable (%p): sat = %f mu = %f", var, var->value - var->bound, var->mu);
84 if (double_positive(var->value - var->bound, sg_maxmin_precision)) {
86 XBT_WARN("The variable (%p) is too large. Expected less than %f and got %f", var, var->bound, var->value);
93 static double new_value(const s_lmm_variable_t& var)
97 for (s_lmm_element_t const& elem : var.cnsts) {
98 tmp += elem.constraint->lambda;
102 XBT_DEBUG("\t Working on var (%p). cost = %e; Weight = %e", &var, tmp, var.sharing_weight);
103 // uses the partial differential inverse function
104 return var.func_fpi(var, tmp);
107 static double new_mu(const s_lmm_variable_t& var)
110 double sigma_i = 0.0;
112 for (s_lmm_element_t const& elem : var.cnsts) {
113 sigma_i += elem.constraint->lambda;
115 mu_i = var.func_fp(var, var.bound) - sigma_i;
121 template <class CnstList>
122 static double dual_objective(xbt_swag_t var_list, const CnstList& cnst_list)
125 lmm_variable_t var = nullptr;
129 xbt_swag_foreach(_var, var_list)
131 var = static_cast<lmm_variable_t>(_var);
132 double sigma_i = 0.0;
134 if (not var->sharing_weight)
137 for (s_lmm_element_t const& elem : var->cnsts)
138 sigma_i += elem.constraint->lambda;
143 XBT_DEBUG("var %p : sigma_i = %1.20f", var, sigma_i);
145 obj += var->func_f(*var, var->func_fpi(*var, sigma_i)) - sigma_i * var->func_fpi(*var, sigma_i);
148 obj += var->mu * var->bound;
151 for (s_lmm_constraint_t const& cnst : cnst_list)
152 obj += cnst.lambda * cnst.bound;
157 void lagrange_solve(lmm_system_t sys)
159 /* Lagrange Variables. */
160 int max_iterations = 100;
161 double epsilon_min_error = 0.00001; /* this is the precision on the objective function so it's none of the
162 configurable values and this value is the legacy one */
163 double dichotomy_min_error = 1e-14;
164 double overall_modification = 1;
166 XBT_DEBUG("Iterative method configuration snapshot =====>");
167 XBT_DEBUG("#### Maximum number of iterations : %d", max_iterations);
168 XBT_DEBUG("#### Minimum error tolerated : %e", epsilon_min_error);
169 XBT_DEBUG("#### Minimum error tolerated (dichotomy) : %e", dichotomy_min_error);
171 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
175 if (not sys->modified)
178 /* Initialize lambda. */
179 auto& cnst_list = sys->active_constraint_set;
180 for (s_lmm_constraint_t& cnst : cnst_list) {
182 cnst.new_lambda = 2.0;
183 XBT_DEBUG("#### cnst(%p)->lambda : %e", &cnst, cnst.lambda);
187 * Initialize the var list variable with only the active variables.
188 * Associate an index in the swag variables. Initialize mu.
190 xbt_swag_t var_list = &(sys->variable_set);
192 xbt_swag_foreach(_var, var_list)
194 lmm_variable_t var = static_cast<lmm_variable_t>(_var);
195 if (not var->sharing_weight)
198 if (var->bound < 0.0) {
199 XBT_DEBUG("#### NOTE var(%p) is a boundless variable", var);
205 var->value = new_value(*var);
206 XBT_DEBUG("#### var(%p) ->weight : %e", var, var->sharing_weight);
207 XBT_DEBUG("#### var(%p) ->mu : %e", var, var->mu);
208 XBT_DEBUG("#### var(%p) ->weight: %e", var, var->sharing_weight);
209 XBT_DEBUG("#### var(%p) ->bound: %e", var, var->bound);
210 auto weighted = std::find_if(begin(var->cnsts), end(var->cnsts),
211 [](s_lmm_element_t const& x) { return x.consumption_weight != 0.0; });
212 if (weighted == end(var->cnsts))
217 /* Compute dual objective. */
218 double obj = dual_objective(var_list, cnst_list);
220 /* While doesn't reach a minimum error or a number maximum of iterations. */
222 while (overall_modification > epsilon_min_error && iteration < max_iterations) {
224 XBT_DEBUG("************** ITERATION %d **************", iteration);
225 XBT_DEBUG("-------------- Gradient Descent ----------");
227 /* Improve the value of mu_i */
228 xbt_swag_foreach(_var, var_list)
230 lmm_variable_t var = static_cast<lmm_variable_t>(_var);
231 if (var->sharing_weight && var->bound >= 0) {
232 XBT_DEBUG("Working on var (%p)", var);
233 var->new_mu = new_mu(*var);
234 XBT_DEBUG("Updating mu : var->mu (%p) : %1.20f -> %1.20f", var, var->mu, var->new_mu);
235 var->mu = var->new_mu;
237 double new_obj = dual_objective(var_list, cnst_list);
238 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
239 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
244 /* Improve the value of lambda_i */
245 for (s_lmm_constraint_t& cnst : cnst_list) {
246 XBT_DEBUG("Working on cnst (%p)", &cnst);
247 cnst.new_lambda = dichotomy(cnst.lambda, partial_diff_lambda, cnst, dichotomy_min_error);
248 XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", &cnst, cnst.lambda, cnst.new_lambda);
249 cnst.lambda = cnst.new_lambda;
251 double new_obj = dual_objective(var_list, cnst_list);
252 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
253 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
257 /* Now computes the values of each variable (\rho) based on the values of \lambda and \mu. */
258 XBT_DEBUG("-------------- Check convergence ----------");
259 overall_modification = 0;
260 xbt_swag_foreach(_var, var_list)
262 lmm_variable_t var = static_cast<lmm_variable_t>(_var);
263 if (var->sharing_weight <= 0)
266 double tmp = new_value(*var);
268 overall_modification = std::max(overall_modification, fabs(var->value - tmp));
271 XBT_DEBUG("New value of var (%p) = %e, overall_modification = %e", var, var->value, overall_modification);
275 XBT_DEBUG("-------------- Check feasability ----------");
276 if (not __check_feasible(cnst_list, var_list, 0))
277 overall_modification = 1.0;
278 XBT_DEBUG("Iteration %d: overall_modification : %f", iteration, overall_modification);
281 __check_feasible(cnst_list, var_list, 1);
283 if (overall_modification <= epsilon_min_error) {
284 XBT_DEBUG("The method converges in %d iterations.", iteration);
286 if (iteration >= max_iterations) {
287 XBT_DEBUG("Method reach %d iterations, which is the maximum number of iterations allowed.", iteration);
290 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
296 * Returns a double value corresponding to the result of a dichotomy process with respect to a given
297 * variable/constraint (\mu in the case of a variable or \lambda in case of a constraint) and a initial value init.
299 * @param init initial value for \mu or \lambda
300 * @param diff a function that computes the differential of with respect a \mu or \lambda
301 * @param var_cnst a pointer to a variable or constraint
302 * @param min_erro a minimum error tolerated
304 * @return a double corresponding to the result of the dichotomy process
306 static double dichotomy(double init, double diff(double, const s_lmm_constraint_t&), const s_lmm_constraint_t& cnst,
311 double overall_error;
318 if (fabs(init) < 1e-20) {
325 diff_0 = diff(1e-16, cnst);
327 XBT_CDEBUG(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)", diff_0);
332 double min_diff = diff(min, cnst);
333 double max_diff = diff(max, cnst);
335 while (overall_error > min_error) {
336 XBT_CDEBUG(surf_lagrange_dichotomy, "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f", min, max,
339 if (min_diff > 0 && max_diff > 0) {
341 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing min");
343 min_diff = diff(min, cnst);
345 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
349 } else if (min_diff < 0 && max_diff < 0) {
351 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing max");
353 max_diff = diff(max, cnst);
355 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
359 } else if (min_diff < 0 && max_diff > 0) {
360 middle = (max + min) / 2.0;
361 XBT_CDEBUG(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f", middle);
363 if ((fabs(min - middle) < 1e-20) || (fabs(max - middle) < 1e-20)) {
364 XBT_CWARN(surf_lagrange_dichotomy,
365 "Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f."
366 " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).",
367 min, max - min, min_diff, max_diff);
370 middle_diff = diff(middle, cnst);
372 if (middle_diff < 0) {
373 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
375 overall_error = max_diff - middle_diff;
376 min_diff = middle_diff;
377 } else if (middle_diff > 0) {
378 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
380 overall_error = max_diff - middle_diff;
381 max_diff = middle_diff;
385 } else if (fabs(min_diff) < 1e-20) {
388 } else if (fabs(max_diff) < 1e-20) {
391 } else if (min_diff > 0 && max_diff < 0) {
392 XBT_CWARN(surf_lagrange_dichotomy, "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
395 XBT_CWARN(surf_lagrange_dichotomy,
396 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.", min_diff,
402 XBT_CDEBUG(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
404 return ((min + max) / 2.0);
407 static double partial_diff_lambda(double lambda, const s_lmm_constraint_t& cnst)
413 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", &cnst);
415 const_xbt_swag_t elem_list = &cnst.enabled_element_set;
417 xbt_swag_foreach(_elem, elem_list)
419 lmm_element_t elem = static_cast<lmm_element_t>(_elem);
420 lmm_variable_t var = elem->variable;
421 xbt_assert(var->sharing_weight > 0);
422 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", var);
423 // Initialize the summation variable
424 double sigma_i = 0.0;
427 for (s_lmm_element_t const& elem : var->cnsts) {
428 sigma_i += elem.constraint->lambda;
431 // add mu_i if this flow has a RTT constraint associated
435 // replace value of cnst.lambda by the value of parameter lambda
436 sigma_i = (sigma_i - cnst.lambda) + lambda;
438 diff += -var->func_fpi(*var, sigma_i);
443 XBT_CDEBUG(surf_lagrange_dichotomy, "d D/d lambda for cnst (%p) at %1.20f = %1.20f", &cnst, lambda, diff);
448 /** \brief Attribute the value bound to var->bound.
450 * \param func_fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
452 * Set default functions to the ones passed as parameters. This is a polymorphism in C pure, enjoy the roots of
456 void lmm_set_default_protocol_function(double (*func_f)(const s_lmm_variable_t& var, double x),
457 double (*func_fp)(const s_lmm_variable_t& var, double x),
458 double (*func_fpi)(const s_lmm_variable_t& var, double x))
461 func_fp_def = func_fp;
462 func_fpi_def = func_fpi;
465 /**************** Vegas and Reno functions *************************/
466 /* NOTE for Reno: all functions consider the network coefficient (alpha) equal to 1. */
469 * For Vegas: $f(x) = \alpha D_f\ln(x)$
470 * Therefore: $fp(x) = \frac{\alpha D_f}{x}$
471 * Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
473 double func_vegas_f(const s_lmm_variable_t& var, double x)
475 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
476 return VEGAS_SCALING * var.sharing_weight * log(x);
479 double func_vegas_fp(const s_lmm_variable_t& var, double x)
481 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
482 return VEGAS_SCALING * var.sharing_weight / x;
485 double func_vegas_fpi(const s_lmm_variable_t& var, double x)
487 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
488 return var.sharing_weight / (x / VEGAS_SCALING);
492 * For Reno: $f(x) = \frac{\sqrt{3/2}}{D_f} atan(\sqrt{3/2}D_f x)$
493 * Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
494 * Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
496 double func_reno_f(const s_lmm_variable_t& var, double x)
498 xbt_assert(var.sharing_weight > 0.0, "Don't call me with stupid values!");
500 return RENO_SCALING * sqrt(3.0 / 2.0) / var.sharing_weight * atan(sqrt(3.0 / 2.0) * var.sharing_weight * x);
503 double func_reno_fp(const s_lmm_variable_t& var, double x)
505 return RENO_SCALING * 3.0 / (3.0 * var.sharing_weight * var.sharing_weight * x * x + 2.0);
508 double func_reno_fpi(const s_lmm_variable_t& var, double x)
512 xbt_assert(var.sharing_weight > 0.0, "Don't call me with stupid values!");
513 xbt_assert(x > 0.0, "Don't call me with stupid values!");
515 res_fpi = 1.0 / (var.sharing_weight * var.sharing_weight * (x / RENO_SCALING)) -
516 2.0 / (3.0 * var.sharing_weight * var.sharing_weight);
519 return sqrt(res_fpi);
522 /* Implementing new Reno-2
523 * For Reno-2: $f(x) = U_f(x_f) = \frac{{2}{D_f}}*ln(2+x*D_f)$
524 * Therefore: $fp(x) = 2/(Weight*x + 2)
525 * Therefore: $fpi(x) = (2*Weight)/x - 4
527 double func_reno2_f(const s_lmm_variable_t& var, double x)
529 xbt_assert(var.sharing_weight > 0.0, "Don't call me with stupid values!");
530 return RENO2_SCALING * (1.0 / var.sharing_weight) *
531 log((x * var.sharing_weight) / (2.0 * x * var.sharing_weight + 3.0));
534 double func_reno2_fp(const s_lmm_variable_t& var, double x)
536 return RENO2_SCALING * 3.0 / (var.sharing_weight * x * (2.0 * var.sharing_weight * x + 3.0));
539 double func_reno2_fpi(const s_lmm_variable_t& var, double x)
541 xbt_assert(x > 0.0, "Don't call me with stupid values!");
542 double tmp = x * var.sharing_weight * var.sharing_weight;
543 double res_fpi = tmp * (9.0 * x + 24.0);
548 res_fpi = RENO2_SCALING * (-3.0 * tmp + sqrt(res_fpi)) / (4.0 * tmp);