+
+/*
+ * Returns a double value corresponding to the result of a dichotomy proccess with
+ * respect to a given variable/constraint (\mu in the case of a variable or \lambda in
+ * case of a constraint) and a initial value init.
+ *
+ * @param init initial value for \mu or \lambda
+ * @param diff a function that computes the differential of with respect a \mu or \lambda
+ * @param var_cnst a pointer to a variable or constraint
+ * @param min_erro a minimun error tolerated
+ *
+ * @return a double correponding to the result of the dichotomyal process
+ */
+static double dichotomy(double init, double diff(double, void *), void *var_cnst,
+ double min_error)
+{
+ double min, max;
+ double overall_error;
+ double middle;
+ double min_diff, max_diff, middle_diff;
+ double diff_0 = 0.0;
+ min = max = init;
+
+ XBT_IN;
+
+ if (init == 0.0) {
+ min = max = 0.5;
+ }
+
+ min_diff = max_diff = middle_diff = 0.0;
+ overall_error = 1;
+
+ if ((diff_0 = diff(1e-16, var_cnst)) >= 0) {
+ CDEBUG1(surf_lagrange_dichotomy, "returning 0.0 (diff = %e)",
+ diff_0);
+ XBT_OUT;
+ return 0.0;
+ }
+
+ min_diff = diff(min, var_cnst);
+ max_diff = diff(max, var_cnst);
+
+ while (overall_error > min_error) {
+ CDEBUG4(surf_lagrange_dichotomy,
+ "[min, max] = [%1.20f, %1.20f] || diffmin, diffmax = %1.20f, %1.20f", min, max,
+ min_diff,max_diff);
+
+ if (min_diff > 0 && max_diff > 0) {
+ if (min == max) {
+ CDEBUG0(surf_lagrange_dichotomy, "Decreasing min");
+ min = min / 2.0;
+ min_diff = diff(min, var_cnst);
+ } else {
+ CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
+ max = min;
+ max_diff = min_diff;
+ }
+ } else if (min_diff < 0 && max_diff < 0) {
+ if (min == max) {
+ CDEBUG0(surf_lagrange_dichotomy, "Increasing max");
+ max = max * 2.0;
+ max_diff = diff(max, var_cnst);
+ } else {
+ CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
+ min = max;
+ min_diff = max_diff;
+ }
+ } else if (min_diff < 0 && max_diff > 0) {
+ middle = (max + min) / 2.0;
+ CDEBUG1(surf_lagrange_dichotomy, "Trying (max+min)/2 : %1.20f",middle);
+
+ if((min==middle) || (max==middle)) {
+ CWARN4(surf_lagrange_dichotomy,"Cannot improve the convergence! min=max=middle=%1.20f, diff = %1.20f."
+ " Reaching the 'double' limits. Maybe scaling your function would help ([%1.20f,%1.20f]).",
+ min, max-min, min_diff,max_diff);
+ break;
+ }
+ middle_diff = diff(middle, var_cnst);
+
+ if (middle_diff < 0) {
+ CDEBUG0(surf_lagrange_dichotomy, "Increasing min");
+ min = middle;
+ overall_error = max_diff-middle_diff;
+ min_diff = middle_diff;
+/* SHOW_EXPR(overall_error); */
+ } else if (middle_diff > 0) {
+ CDEBUG0(surf_lagrange_dichotomy, "Decreasing max");
+ max = middle;
+ overall_error = max_diff-middle_diff;
+ max_diff = middle_diff;
+/* SHOW_EXPR(overall_error); */
+ } else {
+ overall_error = 0;
+/* SHOW_EXPR(overall_error); */
+ }
+ } else if (min_diff == 0) {
+ max=min;
+ overall_error = 0;
+/* SHOW_EXPR(overall_error); */
+ } else if (max_diff == 0) {
+ min=max;
+ overall_error = 0;
+/* SHOW_EXPR(overall_error); */
+ } else if (min_diff > 0 && max_diff < 0) {
+ CWARN0(surf_lagrange_dichotomy,
+ "The impossible happened, partial_diff(min) > 0 && partial_diff(max) < 0");
+ abort();
+ } else {
+ CWARN2(surf_lagrange_dichotomy,
+ "diffmin (%1.20f) or diffmax (%1.20f) are something I don't know, taking no action.",
+ min_diff, max_diff);
+ abort();
+ }
+ }
+
+ CDEBUG1(surf_lagrange_dichotomy, "returning %e", (min + max) / 2.0);
+ XBT_OUT;
+ return ((min + max) / 2.0);
+}
+
+static double partial_diff_lambda(double lambda, void *param_cnst)
+{
+
+ int j;
+ xbt_swag_t elem_list = NULL;
+ lmm_element_t elem = NULL;
+ lmm_variable_t var = NULL;
+ lmm_constraint_t cnst = (lmm_constraint_t) param_cnst;
+ double diff = 0.0;
+ double sigma_i = 0.0;
+
+ XBT_IN;
+ elem_list = &(cnst->element_set);
+
+ CDEBUG1(surf_lagrange_dichotomy,"Computing diff of cnst (%p)", cnst);
+
+ xbt_swag_foreach(elem, elem_list) {
+ var = elem->variable;
+ if (var->weight <= 0)
+ continue;
+
+ CDEBUG1(surf_lagrange_dichotomy,"Computing sigma_i for var (%p)", var);
+ // Initialize the summation variable
+ sigma_i = 0.0;
+
+ // Compute sigma_i
+ for (j = 0; j < var->cnsts_number; j++) {
+ sigma_i += (var->cnsts[j].constraint)->lambda;
+ }
+
+ //add mu_i if this flow has a RTT constraint associated
+ if (var->bound > 0)
+ sigma_i += var->mu;
+
+ //replace value of cnst->lambda by the value of parameter lambda
+ sigma_i = (sigma_i - cnst->lambda) + lambda;
+
+ diff += -var->func_fpi(var, sigma_i);
+ }
+
+
+ diff += cnst->bound;
+
+ CDEBUG3(surf_lagrange_dichotomy,"d D/d lambda for cnst (%p) at %1.20f = %1.20f",
+ cnst, lambda, diff);
+ XBT_OUT;
+ return diff;
+}
+
+/** \brief Attribute the value bound to var->bound.
+ *
+ * \param func_fpi inverse of the partial differential of f (f prime inverse, (f')^{-1})
+ *
+ * Set default functions to the ones passed as parameters. This is a polimorfism in C pure, enjoy the roots of programming.
+ *
+ */
+void lmm_set_default_protocol_function(double (* func_f) (lmm_variable_t var, double x),
+ double (* func_fp) (lmm_variable_t var, double x),
+ double (* func_fpi) (lmm_variable_t var, double x))
+{
+ func_f_def = func_f;
+ func_fp_def = func_fp;
+ func_fpi_def = func_fpi;
+}
+
+
+/**************** Vegas and Reno functions *************************/
+/*
+ * NOTE for Reno: all functions consider the network
+ * coeficient (alpha) equal to 1.
+ */
+
+/*
+ * For Vegas: $f(x) = \alpha D_f\ln(x)$
+ * Therefore: $fp(x) = \frac{\alpha D_f}{x}$
+ * Therefore: $fpi(x) = \frac{\alpha D_f}{x}$
+ */
+#define VEGAS_SCALING 1000.0
+
+double func_vegas_f(lmm_variable_t var, double x){
+ xbt_assert1(x>0.0,"Don't call me with stupid values! (%1.20f)",x);
+ return VEGAS_SCALING*var->df*log(x);
+}
+
+double func_vegas_fp(lmm_variable_t var, double x){
+ xbt_assert1(x>0.0,"Don't call me with stupid values! (%1.20f)",x);
+ return VEGAS_SCALING*var->df/x;
+}
+
+double func_vegas_fpi(lmm_variable_t var, double x){
+ xbt_assert1(x>0.0,"Don't call me with stupid values! (%1.20f)",x);
+ return var->df/(x/VEGAS_SCALING);
+}
+
+/*
+ * For Reno: $f(x) = \frac{\sqrt{3/2}}{D_f} atan(\sqrt{3/2}D_f x)$
+ * Therefore: $fp(x) = \frac{3}{3 D_f^2 x^2+2}$
+ * Therefore: $fpi(x) = \sqrt{\frac{1}{{D_f}^2 x} - \frac{2}{3{D_f}^2}}$
+ */
+#define RENO_SCALING 1.0
+double func_reno_f(lmm_variable_t var, double x){
+ xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
+
+ return RENO_SCALING*sqrt(3.0/2.0)/var->df*atan(sqrt(3.0/2.0)*var->df*x);
+}
+
+double func_reno_fp(lmm_variable_t var, double x){
+ return RENO_SCALING*3.0/(3.0*var->df*var->df*x*x +2.0);
+}
+
+double func_reno_fpi(lmm_variable_t var, double x){
+ double res_fpi;
+
+ xbt_assert0(var->df>0.0,"Don't call me with stupid values!");
+ xbt_assert0(x>0.0,"Don't call me with stupid values!");
+
+ res_fpi = 1.0/(var->df*var->df*(x/RENO_SCALING)) - 2.0/(3.0*var->df*var->df);
+ if(res_fpi<=0.0) return 0.0;
+/* xbt_assert0(res_fpi>0.0,"Don't call me with stupid values!"); */
+ return sqrt(res_fpi);
+}
+
+