+ CDEBUG1(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
+ XBT_OUT;
+ return ((min + max) / 2.0);
+}
+
+static double partial_diff_lambda(double lambda, void *param_cnst)
+{
+
+ int j;
+ xbt_swag_t elem_list = NULL;
+ lmm_element_t elem = NULL;
+ lmm_variable_t var = NULL;
+ lmm_constraint_t cnst = (lmm_constraint_t) param_cnst;
+ double diff = 0.0;
+ double sigma_i = 0.0;
+
+ XBT_IN;
+ elem_list = &(cnst->element_set);
+
+ CDEBUG1(surf_lagrange_dichotomy, "Computing diff of cnst (%p)", cnst);
+
+ xbt_swag_foreach(elem, elem_list) {
+ var = elem->variable;
+ if (var->weight <= 0)
+ continue;
+
+ CDEBUG1(surf_lagrange_dichotomy, "Computing sigma_i for var (%p)", var);
+ // Initialize the summation variable
+ sigma_i = 0.0;
+
+ // Compute sigma_i
+ for (j = 0; j < var->cnsts_number; j++) {
+ sigma_i += (var->cnsts[j].constraint)->lambda;
+ }
+
+ //add mu_i if this flow has a RTT constraint associated
+ if (var->bound > 0)
+ sigma_i += var->mu;
+
+ //replace value of cnst->lambda by the value of parameter lambda
+ sigma_i = (sigma_i - cnst->lambda) + lambda;
+
+ diff += -var->func_fpi(var, sigma_i);
+ }
+
+
+ diff += cnst->bound;
+
+ CDEBUG3(surf_lagrange_dichotomy,
+ "d D/d lambda for cnst (%p) at %1.20f = %1.20f", cnst, lambda,
+ diff);
+ XBT_OUT;
+ return diff;
+}
+
+/** \brief Attribute the value bound to var->bound.
+ *
+ * \param func_fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
+ *
+ * Set default functions to the ones passed as parameters. This is a polimorfism in C pure, enjoy the roots of programming.
+ *
+ */
+void lmm_set_default_protocol_function(double (*func_f)
+
+
+
+
+
+
+ (lmm_variable_t var, double x),
+ double (*func_fp) (lmm_variable_t var,
+ double x),
+ double (*func_fpi) (lmm_variable_t var,
+ double x))
+{
+ func_f_def = func_f;
+ func_fp_def = func_fp;
+ func_fpi_def = func_fpi;
+}
+
+
+/**************** Vegas and Reno functions *************************/
+/*
+ * NOTE for Reno: all functions consider the network
+ * coeficient (alpha) equal to 1.
+ */
+
+/*
+ * For Vegas: $f(x) = \alpha D_f\ln(x)$
+ * Therefore: $fp(x) = \frac{\alpha D_f}{x}$
+ * Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
+ */
+#define VEGAS_SCALING 1000.0
+
+double func_vegas_f(lmm_variable_t var, double x)
+{
+ xbt_assert1(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
+ return VEGAS_SCALING * var->weight * log(x);
+}
+
+double func_vegas_fp(lmm_variable_t var, double x)
+{
+ xbt_assert1(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
+ return VEGAS_SCALING * var->weight / x;
+}
+
+double func_vegas_fpi(lmm_variable_t var, double x)
+{
+ xbt_assert1(x > 0.0, "Don't call me with stupid values! (%1.20f)", x);
+ return var->weight / (x / VEGAS_SCALING);
+}
+
+/*
+ * For Reno: $f(x) = \frac{\sqrt{3/2}}{D_f} atan(\sqrt{3/2}D_f x)$
+ * Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
+ * Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
+ */
+#define RENO_SCALING 1.0
+double func_reno_f(lmm_variable_t var, double x)
+{
+ xbt_assert0(var->weight > 0.0, "Don't call me with stupid values!");
+
+ return RENO_SCALING * sqrt(3.0 / 2.0) / var->weight * atan(sqrt(3.0 / 2.0) *
+ var->weight * x);
+}
+
+double func_reno_fp(lmm_variable_t var, double x)
+{
+ return RENO_SCALING * 3.0 / (3.0 * var->weight * var->weight * x * x + 2.0);
+}
+
+double func_reno_fpi(lmm_variable_t var, double x)
+{
+ double res_fpi;
+
+ xbt_assert0(var->weight > 0.0, "Don't call me with stupid values!");
+ xbt_assert0(x > 0.0, "Don't call me with stupid values!");
+
+ res_fpi =
+ 1.0 / (var->weight * var->weight * (x / RENO_SCALING)) -
+ 2.0 / (3.0 * var->weight * var->weight);
+ if (res_fpi <= 0.0)
+ return 0.0;
+/* xbt_assert0(res_fpi>0.0,"Don't call me with stupid values!"); */
+ return sqrt(res_fpi);
+}
+
+
+/* Implementing new Reno-2
+ * For Reno-2: $f(x) = U_f(x_f) = \frac{{2}{D_f}}*ln(2+x*D_f)$
+ * Therefore: $fp(x) = 2/(Weight*x + 2)
+ * Therefore: $fpi(x) = (2*Weight)/x - 4
+ */
+#define RENO2_SCALING 1.0
+double func_reno2_f(lmm_variable_t var, double x)
+{
+ xbt_assert0(var->weight > 0.0, "Don't call me with stupid values!");
+ return RENO2_SCALING * (1.0 / var->weight) * log((x * var->weight) /
+ (2.0 * x * var->weight +
+ 3.0));
+}
+
+double func_reno2_fp(lmm_variable_t var, double x)
+{
+ return RENO2_SCALING * 3.0 / (var->weight * x *
+ (2.0 * var->weight * x + 3.0));
+}
+
+double func_reno2_fpi(lmm_variable_t var, double x)
+{
+ double res_fpi;
+ double tmp;
+
+ xbt_assert0(x > 0.0, "Don't call me with stupid values!");
+ tmp = x * var->weight * var->weight;
+ res_fpi = tmp * (9.0 * x + 24.0);