1 /* Copyright (c) 2007-2017. The SimGrid Team. All rights reserved. */
3 /* This program is free software; you can redistribute it and/or modify it
4 * under the terms of the license (GNU LGPL) which comes with this package. */
7 * Modeling the proportional fairness using the Lagrangian Optimization Approach. For a detailed description see:
8 * "ssh://username@scm.gforge.inria.fr/svn/memo/people/pvelho/lagrange/ppf.ps".
11 #include "xbt/sysdep.h"
12 #include "maxmin_private.hpp"
19 XBT_LOG_NEW_DEFAULT_SUBCATEGORY(surf_lagrange, surf, "Logging specific to SURF (lagrange)");
20 XBT_LOG_NEW_SUBCATEGORY(surf_lagrange_dichotomy, surf_lagrange, "Logging specific to SURF (lagrange dichotomy)");
22 #define SHOW_EXPR(expr) XBT_CDEBUG(surf_lagrange,#expr " = %g",expr);
24 double (*func_f_def) (lmm_variable_t, double);
25 double (*func_fp_def) (lmm_variable_t, double);
26 double (*func_fpi_def) (lmm_variable_t, double);
29 * Local prototypes to implement the Lagrangian optimization with optimal step, also called dichotomy.
31 //solves the proportional fairness using a Lagrangian optimization with dichotomy step
32 void lagrange_solve(lmm_system_t sys);
33 //computes the value of the dichotomy using a initial values, init, with a specific variable or constraint
34 static double dichotomy(double init, double diff(double, void *), void *var_cnst, double min_error);
35 //computes the value of the differential of constraint param_cnst applied to lambda
36 static double partial_diff_lambda(double lambda, void *param_cnst);
38 static int __check_feasible(xbt_swag_t cnst_list, xbt_swag_t var_list, int warn)
40 void *_cnst, *_elem, *_var;
41 xbt_swag_t elem_list = nullptr;
42 lmm_element_t elem = nullptr;
43 lmm_constraint_t cnst = nullptr;
44 lmm_variable_t var = nullptr;
46 xbt_swag_foreach(_cnst, cnst_list) {
47 cnst = static_cast<lmm_constraint_t>(_cnst);
49 elem_list = &(cnst->enabled_element_set);
50 xbt_swag_foreach(_elem, elem_list) {
51 elem = static_cast<lmm_element_t>(_elem);
53 xbt_assert(var->sharing_weight > 0);
57 if (double_positive(tmp - cnst->bound, sg_maxmin_precision)) {
59 XBT_WARN ("The link (%p) is over-used. Expected less than %f and got %f", cnst, cnst->bound, tmp);
62 XBT_DEBUG ("Checking feasability for constraint (%p): sat = %f, lambda = %f ", cnst, tmp - cnst->bound,
66 xbt_swag_foreach(_var, var_list) {
67 var = static_cast<lmm_variable_t>(_var);
68 if (not var->sharing_weight)
72 XBT_DEBUG("Checking feasability for variable (%p): sat = %f mu = %f", var, var->value - var->bound, var->mu);
74 if (double_positive(var->value - var->bound, sg_maxmin_precision)) {
76 XBT_WARN ("The variable (%p) is too large. Expected less than %f and got %f", var, var->bound, var->value);
83 static double new_value(lmm_variable_t var)
87 for (int i = 0; i < var->cnsts_number; i++) {
88 tmp += (var->cnsts[i].constraint)->lambda;
92 XBT_DEBUG("\t Working on var (%p). cost = %e; Weight = %e", var, tmp, var->sharing_weight);
93 //uses the partial differential inverse function
94 return var->func_fpi(var, tmp);
97 static double new_mu(lmm_variable_t var)
100 double sigma_i = 0.0;
102 for (int j = 0; j < var->cnsts_number; j++) {
103 sigma_i += (var->cnsts[j].constraint)->lambda;
105 mu_i = var->func_fp(var, var->bound) - sigma_i;
111 static double dual_objective(xbt_swag_t var_list, xbt_swag_t cnst_list)
115 lmm_constraint_t cnst = nullptr;
116 lmm_variable_t var = nullptr;
120 xbt_swag_foreach(_var, var_list) {
121 var = static_cast<lmm_variable_t>(_var);
122 double sigma_i = 0.0;
124 if (not var->sharing_weight)
127 for (int j = 0; j < var->cnsts_number; j++)
128 sigma_i += (var->cnsts[j].constraint)->lambda;
133 XBT_DEBUG("var %p : sigma_i = %1.20f", var, sigma_i);
135 obj += var->func_f(var, var->func_fpi(var, sigma_i)) - sigma_i * var->func_fpi(var, sigma_i);
138 obj += var->mu * var->bound;
141 xbt_swag_foreach(_cnst, cnst_list) {
142 cnst = static_cast<lmm_constraint_t>(_cnst);
143 obj += cnst->lambda * cnst->bound;
149 void lagrange_solve(lmm_system_t sys)
151 /* Lagrange Variables. */
152 int max_iterations = 100;
153 double epsilon_min_error = 0.00001; /* this is the precision on the objective function so it's none of the configurable values and this value is the legacy one */
154 double dichotomy_min_error = 1e-14;
155 double overall_modification = 1;
157 /* Variables to manipulate the data structure proposed to model the maxmin fairness. See documentation for details. */
158 xbt_swag_t cnst_list = nullptr;
160 lmm_constraint_t cnst = nullptr;
162 xbt_swag_t var_list = nullptr;
164 lmm_variable_t var = nullptr;
166 /* Auxiliary variables. */
173 XBT_DEBUG("Iterative method configuration snapshot =====>");
174 XBT_DEBUG("#### Maximum number of iterations : %d", max_iterations);
175 XBT_DEBUG("#### Minimum error tolerated : %e", epsilon_min_error);
176 XBT_DEBUG("#### Minimum error tolerated (dichotomy) : %e", dichotomy_min_error);
178 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
182 if (not sys->modified)
185 /* Initialize lambda. */
186 cnst_list = &(sys->active_constraint_set);
187 xbt_swag_foreach(_cnst, cnst_list) {
188 cnst = (lmm_constraint_t)_cnst;
190 cnst->new_lambda = 2.0;
191 XBT_DEBUG("#### cnst(%p)->lambda : %e", cnst, cnst->lambda);
195 * Initialize the var list variable with only the active variables.
196 * Associate an index in the swag variables. Initialize mu.
198 var_list = &(sys->variable_set);
200 xbt_swag_foreach(_var, var_list) {
201 var = static_cast<lmm_variable_t>(_var);
202 if (not var->sharing_weight)
206 if (var->bound < 0.0) {
207 XBT_DEBUG("#### NOTE var(%d) is a boundless variable", i);
209 var->value = new_value(var);
213 var->value = new_value(var);
215 XBT_DEBUG("#### var(%p) ->weight : %e", var, var->sharing_weight);
216 XBT_DEBUG("#### var(%p) ->mu : %e", var, var->mu);
217 XBT_DEBUG("#### var(%p) ->weight: %e", var, var->sharing_weight);
218 XBT_DEBUG("#### var(%p) ->bound: %e", var, var->bound);
219 for (i = 0; i < var->cnsts_number; i++) {
220 if (var->cnsts[i].consumption_weight == 0.0)
223 if (nb == var->cnsts_number)
228 /* Compute dual objective. */
229 obj = dual_objective(var_list, cnst_list);
231 /* While doesn't reach a minimum error or a number maximum of iterations. */
232 while (overall_modification > epsilon_min_error && iteration < max_iterations) {
234 XBT_DEBUG("************** ITERATION %d **************", iteration);
235 XBT_DEBUG("-------------- Gradient Descent ----------");
237 /* Improve the value of mu_i */
238 xbt_swag_foreach(_var, var_list) {
239 var = static_cast<lmm_variable_t>(_var);
240 if (not var->sharing_weight)
242 if (var->bound >= 0) {
243 XBT_DEBUG("Working on var (%p)", var);
244 var->new_mu = new_mu(var);
245 /* dual_updated += (fabs(var->new_mu-var->mu)>dichotomy_min_error); */
246 /* XBT_DEBUG("dual_updated (%d) : %1.20f",dual_updated,fabs(var->new_mu-var->mu)); */
247 XBT_DEBUG("Updating mu : var->mu (%p) : %1.20f -> %1.20f", var, var->mu, var->new_mu);
248 var->mu = var->new_mu;
250 new_obj = dual_objective(var_list, cnst_list);
251 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
252 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
257 /* Improve the value of lambda_i */
258 xbt_swag_foreach(_cnst, cnst_list) {
259 cnst = static_cast<lmm_constraint_t>(_cnst);
260 XBT_DEBUG("Working on cnst (%p)", cnst);
261 cnst->new_lambda = dichotomy(cnst->lambda, partial_diff_lambda, cnst, dichotomy_min_error);
262 /* dual_updated += (fabs(cnst->new_lambda-cnst->lambda)>dichotomy_min_error); */
263 /* XBT_DEBUG("dual_updated (%d) : %1.20f",dual_updated,fabs(cnst->new_lambda-cnst->lambda)); */
264 XBT_DEBUG("Updating lambda : cnst->lambda (%p) : %1.20f -> %1.20f", cnst, cnst->lambda, cnst->new_lambda);
265 cnst->lambda = cnst->new_lambda;
267 new_obj = dual_objective(var_list, cnst_list);
268 XBT_DEBUG("Improvement for Objective (%g -> %g) : %g", obj, new_obj, obj - new_obj);
269 xbt_assert(obj - new_obj >= -epsilon_min_error, "Our gradient sucks! (%1.20f)", obj - new_obj);
273 /* Now computes the values of each variable (\rho) based on the values of \lambda and \mu. */
274 XBT_DEBUG("-------------- Check convergence ----------");
275 overall_modification = 0;
276 xbt_swag_foreach(_var, var_list) {
277 var = static_cast<lmm_variable_t>(_var);
278 if (var->sharing_weight <= 0)
281 tmp = new_value(var);
283 overall_modification = MAX(overall_modification, fabs(var->value - tmp));
286 XBT_DEBUG("New value of var (%p) = %e, overall_modification = %e", var, var->value, overall_modification);
290 XBT_DEBUG("-------------- Check feasability ----------");
291 if (not __check_feasible(cnst_list, var_list, 0))
292 overall_modification = 1.0;
293 XBT_DEBUG("Iteration %d: overall_modification : %f", iteration, overall_modification);
294 /* if(not dual_updated) { */
295 /* XBT_WARN("Could not improve the convergence at iteration %d. Drop it!",iteration); */
300 __check_feasible(cnst_list, var_list, 1);
302 if (overall_modification <= epsilon_min_error) {
303 XBT_DEBUG("The method converges in %d iterations.", iteration);
305 if (iteration >= max_iterations) {
306 XBT_DEBUG ("Method reach %d iterations, which is the maximum number of iterations allowed.", iteration);
309 if (XBT_LOG_ISENABLED(surf_lagrange, xbt_log_priority_debug)) {
315 * Returns a double value corresponding to the result of a dichotomy process with respect to a given
316 * variable/constraint (\mu in the case of a variable or \lambda in case of a constraint) and a initial value init.
318 * @param init initial value for \mu or \lambda
319 * @param diff a function that computes the differential of with respect a \mu or \lambda
320 * @param var_cnst a pointer to a variable or constraint
321 * @param min_erro a minimum error tolerated
323 * @return a double corresponding to the result of the dichotomy process
325 static double dichotomy(double init, double diff(double, void *), void *var_cnst, double min_error)
329 double overall_error;
336 if (fabs(init) < 1e-20) {
343 diff_0 = diff(1e-16, var_cnst);
345 XBT_CDEBUG(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)", diff_0);
350 double min_diff = diff(min, var_cnst);
351 double max_diff = diff(max, var_cnst);
353 while (overall_error > min_error) {
354 XBT_CDEBUG(surf_lagrange_dichotomy, "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f",
355 min, max, min_diff, max_diff);
357 if (min_diff > 0 && max_diff > 0) {
359 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing min");
361 min_diff = diff(min, var_cnst);
363 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
367 } else if (min_diff < 0 && max_diff < 0) {
369 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing max");
371 max_diff = diff(max, var_cnst);
373 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
377 } else if (min_diff < 0 && max_diff > 0) {
378 middle = (max + min) / 2.0;
379 XBT_CDEBUG(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f", middle);
381 if ((fabs(min - middle) < 1e-20) || (fabs(max - middle) < 1e-20)){
382 XBT_CWARN(surf_lagrange_dichotomy, "Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f."
383 " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).",
384 min, max - min, min_diff, max_diff);
387 middle_diff = diff(middle, var_cnst);
389 if (middle_diff < 0) {
390 XBT_CDEBUG(surf_lagrange_dichotomy, "Increasing min");
392 overall_error = max_diff - middle_diff;
393 min_diff = middle_diff;
394 /* SHOW_EXPR(overall_error); */
395 } else if (middle_diff > 0) {
396 XBT_CDEBUG(surf_lagrange_dichotomy, "Decreasing max");
398 overall_error = max_diff - middle_diff;
399 max_diff = middle_diff;
400 /* SHOW_EXPR(overall_error); */
403 /* SHOW_EXPR(overall_error); */
405 } else if (fabs(min_diff) < 1e-20) {
408 /* SHOW_EXPR(overall_error); */
409 } else if (fabs(max_diff) < 1e-20) {
412 /* SHOW_EXPR(overall_error); */
413 } else if (min_diff > 0 && max_diff < 0) {
414 XBT_CWARN(surf_lagrange_dichotomy, "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
417 XBT_CWARN(surf_lagrange_dichotomy,
418 "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.",
424 XBT_CDEBUG(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
426 return ((min + max) / 2.0);
429 static double partial_diff_lambda(double lambda, void *param_cnst)
431 lmm_constraint_t cnst = static_cast<lmm_constraint_t>(param_cnst);
436 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", cnst);
438 xbt_swag_t elem_list = &(cnst->enabled_element_set);
440 xbt_swag_foreach(_elem, elem_list) {
441 lmm_element_t elem = static_cast<lmm_element_t>(_elem);
442 lmm_variable_t var = elem->variable;
443 xbt_assert(var->sharing_weight > 0);
444 XBT_CDEBUG(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", var);
445 // Initialize the summation variable
446 double sigma_i = 0.0;
449 for (int j = 0; j < var->cnsts_number; j++) {
450 sigma_i += (var->cnsts[j].constraint)->lambda;
453 //add mu_i if this flow has a RTT constraint associated
457 //replace value of cnst->lambda by the value of parameter lambda
458 sigma_i = (sigma_i - cnst->lambda) + lambda;
460 diff += -var->func_fpi(var, sigma_i);
465 XBT_CDEBUG(surf_lagrange_dichotomy, "d D/d lambda for cnst (%p) at %1.20f = %1.20f", cnst, lambda, diff);
470 /** \brief Attribute the value bound to var->bound.
472 * \param func_fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
474 * Set default functions to the ones passed as parameters. This is a polymorphism in C pure, enjoy the roots of
478 void lmm_set_default_protocol_function(double (*func_f) (lmm_variable_t var, double x),
479 double (*func_fp) (lmm_variable_t var, double x),
480 double (*func_fpi) (lmm_variable_t var, double x))
483 func_fp_def = func_fp;
484 func_fpi_def = func_fpi;
487 /**************** Vegas and Reno functions *************************/
488 /* NOTE for Reno: all functions consider the network coefficient (alpha) equal to 1. */
491 * For Vegas: $f(x) = \alpha D_f\ln(x)$
492 * Therefore: $fp(x) = \frac{\alpha D_f}{x}$
493 * Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
495 #define VEGAS_SCALING 1000.0
497 double func_vegas_f(lmm_variable_t var, double x)
499 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
500 return VEGAS_SCALING * var->sharing_weight * log(x);
503 double func_vegas_fp(lmm_variable_t var, double x)
505 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
506 return VEGAS_SCALING * var->sharing_weight / x;
509 double func_vegas_fpi(lmm_variable_t var, double x)
511 xbt_assert(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
512 return var->sharing_weight / (x / VEGAS_SCALING);
516 * For Reno: $f(x) = \frac{\sqrt{3/2}}{D_f} atan(\sqrt{3/2}D_f x)$
517 * Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
518 * Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
520 #define RENO_SCALING 1.0
521 double func_reno_f(lmm_variable_t var, double x)
523 xbt_assert(var->sharing_weight > 0.0, "Don't call me with stupid values!");
525 return RENO_SCALING * sqrt(3.0 / 2.0) / var->sharing_weight * atan(sqrt(3.0 / 2.0) * var->sharing_weight * x);
528 double func_reno_fp(lmm_variable_t var, double x)
530 return RENO_SCALING * 3.0 / (3.0 * var->sharing_weight * var->sharing_weight * x * x + 2.0);
533 double func_reno_fpi(lmm_variable_t var, double x)
537 xbt_assert(var->sharing_weight > 0.0, "Don't call me with stupid values!");
538 xbt_assert(x > 0.0, "Don't call me with stupid values!");
540 res_fpi = 1.0 / (var->sharing_weight * var->sharing_weight * (x / RENO_SCALING)) -
541 2.0 / (3.0 * var->sharing_weight * var->sharing_weight);
544 /* xbt_assert(res_fpi>0.0,"Don't call me with stupid values!"); */
545 return sqrt(res_fpi);
548 /* Implementing new Reno-2
549 * For Reno-2: $f(x) = U_f(x_f) = \frac{{2}{D_f}}*ln(2+x*D_f)$
550 * Therefore: $fp(x) = 2/(Weight*x + 2)
551 * Therefore: $fpi(x) = (2*Weight)/x - 4
553 #define RENO2_SCALING 1.0
554 double func_reno2_f(lmm_variable_t var, double x)
556 xbt_assert(var->sharing_weight > 0.0, "Don't call me with stupid values!");
557 return RENO2_SCALING * (1.0 / var->sharing_weight) *
558 log((x * var->sharing_weight) / (2.0 * x * var->sharing_weight + 3.0));
561 double func_reno2_fp(lmm_variable_t var, double x)
563 return RENO2_SCALING * 3.0 / (var->sharing_weight * x * (2.0 * var->sharing_weight * x + 3.0));
566 double func_reno2_fpi(lmm_variable_t var, double x)
568 xbt_assert(x > 0.0, "Don't call me with stupid values!");
569 double tmp = x * var->sharing_weight * var->sharing_weight;
570 double res_fpi = tmp * (9.0 * x + 24.0);
575 res_fpi = RENO2_SCALING * (-3.0 * tmp + sqrt(res_fpi)) / (4.0 * tmp);