Logo AND Algorithmique Numérique Distribuée

Public GIT Repository
Merge branch 'smpi-topo'
authorStéphane Castelli <stephane.castelli@loria.fr>
Tue, 22 Apr 2014 14:37:30 +0000 (16:37 +0200)
committerStéphane Castelli <stephane.castelli@loria.fr>
Tue, 22 Apr 2014 14:37:30 +0000 (16:37 +0200)
Conflicts:
src/smpi/smpi_pmpi.c

1  2 
buildtools/Cmake/AddTests.cmake
buildtools/Cmake/DefinePackages.cmake
include/smpi/smpi.h
src/smpi/private.h
src/smpi/smpi_global.c
src/smpi/smpi_pmpi.c

@@@ -30,10 -30,10 +30,10 @@@ endif(
  
  #some tests may take forever on non futexes systems, using busy_wait with n cores < n workers
  # default to posix for these tests if futexes are not supported
 -if(NOT HAVE_FUTEX_H) 
 +if(NOT HAVE_FUTEX_H)
  SET(CONTEXTS_SYNCHRO --cfg contexts/synchro:posix)
  endif()
 - 
 +
  
  INCLUDE(CTest)
  ENABLE_TESTING()
@@@ -140,7 -140,6 +140,7 @@@ if(NOT enable_memcheck
    IF(enable_debug AND NOT enable_memcheck)
      ADD_TEST(tesh-parser-bogus-symmetric     ${TESH_COMMAND} ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/simdag/platforms --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms bogus_two_hosts_asymetric.tesh)
      ADD_TEST(tesh-parser-bogus-missing-gw ${TESH_COMMAND} ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/simdag/platforms --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms bogus_missing_gateway.tesh)
 +    ADD_TEST(tesh-disk-attachment ${TESH_COMMAND} ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/simdag/platforms --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms bogus_disk_attachment.tesh)
    ENDIF()
  
    ADD_TEST(tesh-simdag-bypass                   ${TESH_COMMAND} ${TESH_OPTION} --setenv bindir=${CMAKE_BINARY_DIR}/teshsuite/simdag/platforms --setenv srcdir=${CMAKE_HOME_DIRECTORY} --cd ${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms basic_parsing_test_bypass.tesh)
    ADD_TEST(msg-masterslave-vivaldi-thread       ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg --cd ${CMAKE_BINARY_DIR}/examples/msg ${CMAKE_HOME_DIRECTORY}/examples/msg/masterslave/masterslave_vivaldi.tesh)
  
    ADD_TEST(msg-cloud-two-tasks-vm-thread ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/cloud/ --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/cloud/ --cd ${CMAKE_BINARY_DIR}/examples/msg/cloud/ ${CMAKE_HOME_DIRECTORY}/examples/msg/cloud/two_tasks_vm.tesh)
 -  
 +
    ADD_TEST(msg-cloud-simple-vm-thread ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv bindir=${CMAKE_BINARY_DIR}/examples/msg/cloud/ --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/msg/cloud/ --cd ${CMAKE_BINARY_DIR}/examples/msg/cloud/ ${CMAKE_HOME_DIRECTORY}/examples/msg/cloud/simple_vm.tesh)
  
    if(CONTEXT_UCONTEXT)
      ADD_TEST(smpi-struct-thread                 ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/struct.tesh)
      ADD_TEST(smpi-pt2pt-thread                  ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/pt2pt.tesh)
      ADD_TEST(smpi-compute-thread                ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/compute.tesh)
 +    
 +    # https://gforge.inria.fr/tracker/index.php?func=detail&aid=17132&group_id=12&atid=165
 +    ADD_TEST(smpi-bug-17132                     ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/bug-17132 --cd ${CMAKE_BINARY_DIR}/teshsuite/bug-17132 ${CMAKE_HOME_DIRECTORY}/teshsuite/bug-17132/bug-17132.tesh)
 +    ADD_TEST(smpi-bug-17132-surf-debug          ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/bug-17132 --cd ${CMAKE_BINARY_DIR}/teshsuite/bug-17132 ${CMAKE_HOME_DIRECTORY}/teshsuite/bug-17132/bug-17132-surf-debug.tesh)
 +    
      if (NOT WIN32)
        ADD_TEST(smpi-shared-thread               ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --cd ${CMAKE_BINARY_DIR}/teshsuite/smpi ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/shared.tesh)
      endif()
        endif()
      endif()
  
 -    ADD_TEST(smpi-energy                        ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
 +    ADD_TEST(smpi-energy-thread                 ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
      if(SMPI_F2C)
 -      ADD_TEST(smpi-energy-f77                  ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
 +      ADD_TEST(smpi-energy-f77-thread           ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
      endif()
      if(SMPI_F90)
 -      ADD_TEST(smpi-energy-f90                  ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
 +      ADD_TEST(smpi-energy-f90-thread           ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
 +    endif()
 +    if(CONTEXT_UCONTEXT)
 +      ADD_TEST(smpi-energy-ucontext             ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
 +      if(SMPI_F2C)
 +        ADD_TEST(smpi-energy-f77-ucontext       ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
 +      endif()
 +      if(SMPI_F90)
 +        ADD_TEST(smpi-energy-f90-ucontext       ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
 +      endif()
 +    endif()
 +    if(HAVE_RAWCTX)
 +      ADD_TEST(smpi-energy-raw                  ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/energy.tesh)
 +      if(SMPI_F2C)
 +        ADD_TEST(smpi-energy-f77-raw            ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f77/energy.tesh)
 +      endif()
 +      if(SMPI_F90)
 +        ADD_TEST(smpi-energy-f90-raw            ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/smpi/energy --cd ${CMAKE_BINARY_DIR}/examples/smpi/energy ${CMAKE_HOME_DIRECTORY}/examples/smpi/energy/f90/energy.tesh)
 +      endif()
      endif()
  
      if(HAVE_TRACING)
    # END TESH TESTS
  
    if(enable_smpi_MPICH3_testsuite)
 -    ADD_TEST(smpi-mpich3-coll-thread            ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread)
 -    ADD_TEST(smpi-mpich3-coll-ompi-thread       ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0)
 -    ADD_TEST(smpi-mpich3-coll-mpich-thread      ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich)
 +    ADD_TEST(smpi-mpich3-coll-thread            ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/privatize_global_variables:yes)
 +    ADD_TEST(smpi-mpich3-coll-ompi-thread       ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0 -execarg=--cfg=smpi/privatize_global_variables:yes)
 +    ADD_TEST(smpi-mpich3-coll-mpich-thread      ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatize_global_variables:yes)
      set_tests_properties(smpi-mpich3-coll-thread smpi-mpich3-coll-ompi-thread smpi-mpich3-coll-mpich-thread PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
+     
+     ADD_TEST(smpi-mpich3-topo-raw            ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/topo perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/topo -tests=testlist -execarg=--cfg=contexts/factory:raw)
+       set_tests_properties(smpi-mpich3-topo-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
      if(CONTEXT_UCONTEXT)
 -      ADD_TEST(smpi-mpich3-coll-ompi-ucontext   ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0)
 +      ADD_TEST(smpi-mpich3-coll-ompi-ucontext   ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:ucontext -execarg=--cfg=smpi/coll_selector:ompi -execarg=--cfg=smpi/send_is_detached_thres:0 -execarg=--cfg=smpi/privatize_global_variables:yes)
        set_tests_properties(smpi-mpich3-coll-ompi-ucontext PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
      endif()
      if(HAVE_RAWCTX)
 -      ADD_TEST(smpi-mpich3-coll-mpich-raw       ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mpich)
 +      ADD_TEST(smpi-mpich3-coll-mpich-raw       ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/coll perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/coll -tests=testlist -execarg=--cfg=contexts/factory:raw -execarg=--cfg=smpi/coll_selector:mpich -execarg=--cfg=smpi/privatize_global_variables:yes)
        set_tests_properties(smpi-mpich3-coll-mpich-raw PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
      endif()
  
      endif()
  
      if(SMPI_F2C)
 -      ADD_TEST(smpi-mpich3-thread-f77           ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f77/ perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f77/ -tests=testlist -execarg=--cfg=contexts/factory:thread -execarg=--cfg=contexts/stack_size:8000)
 +      ADD_TEST(smpi-mpich3-thread-f77           ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f77/ perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f77/ -tests=testlist -execarg=--cfg=contexts/stack_size:8000 -execarg=--cfg=smpi/privatize_global_variables:yes)
        set_tests_properties(smpi-mpich3-thread-f77 PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
      endif()
      if(SMPI_F90)
 -      ADD_TEST(smpi-mpich3-thread-f90           ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f90/ perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f90/ -tests=testlist -execarg=--cfg=contexts/factory:thread)
 +      ADD_TEST(smpi-mpich3-thread-f90           ${CMAKE_COMMAND} -E chdir ${CMAKE_BINARY_DIR}/teshsuite/smpi/mpich3-test/f90/ perl ${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/runtests -mpiexec=${CMAKE_BINARY_DIR}/smpi_script/bin/smpirun -srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite/smpi/mpich3-test/f90/ -tests=testlist -execarg=--cfg=smpi/privatize_global_variables:yes)
        set_tests_properties(smpi-mpich3-thread-f90 PROPERTIES PASS_REGULAR_EXPRESSION "tests passed!")
      endif()
    endif()
      ADD_TEST(java-mutualExclusion               ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/mutualExclusion/mutualexclusion.tesh)
      ADD_TEST(java-pingPong                      ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/pingPong/pingpong.tesh)
      ADD_TEST(java-priority                      ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/priority/priority.tesh)
 +    ADD_TEST(java-reservation-surf-plugin       ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/reservationSurfPlugin/reservation_surf_plugin.tesh)
      ADD_TEST(java-startKillTime                 ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/startKillTime/startKillTime.tesh)
 +    ADD_TEST(java-surf-cpu-model                ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/surfCpuModel/surf_cpu_model.tesh)
 +    ADD_TEST(java-surf-plugin                   ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/surfPlugin/surf_plugin.tesh)
      ADD_TEST(java-suspend                       ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/suspend/suspend.tesh)
      if(HAVE_TRACING)
        ADD_TEST(java-tracing                     ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/java --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/java ${CMAKE_HOME_DIRECTORY}/examples/java/tracing/tracingPingPong.tesh)
      ADD_TEST(scala-masterslave                  ${TESH_COMMAND} ${TESH_OPTION} --setenv srcdir=${CMAKE_HOME_DIRECTORY}/examples/scala --setenv classpath=${TESH_CLASSPATH} --cd ${CMAKE_BINARY_DIR}/examples/scala ${CMAKE_HOME_DIRECTORY}/examples/scala/masterslave/masterslave.tesh)
    endif()
  
 +  ADD_TEST(stack-overflow-thread                ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:thread --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/simix/stack_overflow.tesh)
 +  if(CONTEXT_UCONTEXT)
 +    ADD_TEST(stack-overflow-ucontext            ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:ucontext --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/simix/stack_overflow.tesh)
 +  endif()
 +  if(HAVE_RAWCTX)
 +    ADD_TEST(stack-overflow-raw                 ${TESH_COMMAND} ${TESH_OPTION} --cfg contexts/factory:raw --setenv srcdir=${CMAKE_HOME_DIRECTORY}/teshsuite --cd ${CMAKE_BINARY_DIR}/teshsuite ${CMAKE_HOME_DIRECTORY}/teshsuite/simix/stack_overflow.tesh)
 +  endif()
 +
    # examples/msg/mc
    if(HAVE_MC)
      if(CONTEXT_UCONTEXT)
      endif()
    endif()
  
 +  ###
 +  ### Declare that we know that some tests are broken
 +  ###
 +  if(release)
 +    if(WIN32 OR CMAKE_SYSTEM_NAME MATCHES "Darwin")
 +      # These tests are known to fail on Windows and Mac OS X
 +      # (the expected error message is not shown).
 +      set_tests_properties(stack-overflow-thread PROPERTIES WILL_FAIL true)
 +      if(CONTEXT_UCONTEXT)
 +        set_tests_properties(stack-overflow-ucontext PROPERTIES WILL_FAIL true)
 +      endif()
 +      if(HAVE_RAWCTX)
 +        set_tests_properties(stack-overflow-raw PROPERTIES WILL_FAIL true)
 +      endif()
 +    endif()
 +  endif()
 +
  endif()
  
  ADD_TEST(tesh-simdag-full-links01               ${CMAKE_BINARY_DIR}/teshsuite/simdag/platforms/basic_parsing_test ${CMAKE_HOME_DIRECTORY}/teshsuite/simdag/platforms/two_clusters.xml FULL_LINK)
@@@ -711,11 -662,6 +714,11 @@@ add_test(test-surf-usag
  add_test(test-surf-usage2                       ${CMAKE_BINARY_DIR}/testsuite/surf/surf_usage2 --cfg=path:${CMAKE_HOME_DIRECTORY}/testsuite/surf/ platform.xml)
  add_test(test-surf-trace                        ${CMAKE_BINARY_DIR}/testsuite/surf/trace_usage --cfg=path:${CMAKE_HOME_DIRECTORY}/testsuite/surf/)
  
 +if(HAVE_MC)
 +  add_test(mc-dwarf                             ${CMAKE_BINARY_DIR}/testsuite/mc/dwarf)
 +  add_test(mc-dwarf-expression                  ${CMAKE_BINARY_DIR}/testsuite/mc/dwarf-expression)
 +endif()
 +
  add_test(testall                                ${CMAKE_BINARY_DIR}/src/testall)
  
  if(enable_auto_install)
@@@ -1,6 -1,7 +1,6 @@@
  ### define source packages
  
  set(EXTRA_DIST
 -  include/xbt/win32_ucontext.h
    src/bindings/java/MANIFEST.MF.in
    src/include/instr/instr_interface.h
    src/include/mc/datatypes.h
@@@ -16,7 -17,6 +16,7 @@@
    src/include/surf/surf_resource_lmm.h
    src/include/surf/surfxml_parse_values.h
    src/include/surf/trace_mgr.h
 +  src/include/xbt/win32_ucontext.h
    src/include/xbt/wine_dbghelp.h
    src/include/xbt/xbt_os_time.h
    src/msg/msg_mailbox.h
    src/xbt/win32_ucontext.c
    tools/tesh/run_context.h
    tools/tesh/tesh.h
 +  src/surf/surf_routing_cluster_fat_tree.hpp
    )
  
  set(SMPI_SRC
 +  src/surf/surf_routing_cluster_fat_tree.cpp
    src/smpi/instr_smpi.c
    src/smpi/smpi_base.c
    src/smpi/smpi_bench.c
    src/smpi/smpi_mpi_dt.c
    src/smpi/smpi_pmpi.c
    src/smpi/smpi_replay.c
+   src/smpi/smpi_topo.c
    src/smpi/colls/smpi_openmpi_selector.c
    src/smpi/colls/smpi_mpich_selector.c
    src/smpi/colls/colls_global.c
    src/smpi/colls/allgather-smp-simple.c
    src/smpi/colls/allgather-spreading-simple.c
    src/smpi/colls/allgather-ompi-neighborexchange.c
 -  src/smpi/colls/allgatherv-GB.c  
 +  src/smpi/colls/allgatherv-GB.c
    src/smpi/colls/allgatherv-pair.c
    src/smpi/colls/allgatherv-ring.c
    src/smpi/colls/allgatherv-ompi-neighborexchange.c
    src/smpi/colls/alltoall-ring-light-barrier.c
    src/smpi/colls/alltoall-ring-mpi-barrier.c
    src/smpi/colls/alltoall-ring-one-barrier.c
 -  src/smpi/colls/alltoallv-pair.c   
 +  src/smpi/colls/alltoallv-pair.c
    src/smpi/colls/alltoallv-pair-light-barrier.c
    src/smpi/colls/alltoallv-pair-mpi-barrier.c
 -  src/smpi/colls/alltoallv-pair-one-barrier.c 
 +  src/smpi/colls/alltoallv-pair-one-barrier.c
    src/smpi/colls/alltoallv-ring.c
    src/smpi/colls/alltoallv-ring-light-barrier.c
    src/smpi/colls/alltoallv-ring-mpi-barrier.c
@@@ -321,7 -320,7 +322,7 @@@ set(SURF_SR
    src/surf/maxmin.cpp
    src/surf/network_interface.cpp
    src/surf/network_cm02.cpp
 -  src/surf/network_smpi.cpp  
 +  src/surf/network_smpi.cpp
    src/surf/network_constant.cpp
    src/surf/platf_generator.c
    src/surf/random_mgr.c
    src/surf/storage_n11.cpp
    src/surf/surf_interface.cpp
    src/surf/surf_c_bindings.cpp
 -  src/surf/surf_routing.cpp  
 +  src/surf/surf_routing.cpp
    src/surf/surf_routing_cluster.cpp
    src/surf/surf_routing_cluster_torus.cpp
    src/surf/surf_routing_dijkstra.cpp
@@@ -432,47 -431,6 +433,47 @@@ set(BINDINGS_SR
    src/bindings/lua/simgrid_lua.h
    )
  
 +set(JSURF_SWIG_SRC
 +  src/bindings/java/surf.i
 +)
 +
 +set(JSURF_SWIG_SRC_EXTRA
 +  src/bindings/java/surfdoc.i
 +)
 +
 +set(JSURF_JAVA_C_SRC
 +  src/bindings/java/surf_swig.cpp
 +  src/bindings/java/surf_swig.hpp
 +)
 +
 +set(JSURF_JAVA_GENERATED_SRC
 +  src/bindings/java/org/simgrid/surf/Surf.java
 +  src/bindings/java/org/simgrid/surf/SurfJNI.java
 +  src/bindings/java/org/simgrid/surf/Plugin.java
 +  src/bindings/java/org/simgrid/surf/Model.java
 +  src/bindings/java/org/simgrid/surf/CpuModel.java
 +  src/bindings/java/org/simgrid/surf/Resource.java
 +  src/bindings/java/org/simgrid/surf/Action.java
 +  src/bindings/java/org/simgrid/surf/Cpu.java
 +  src/bindings/java/org/simgrid/surf/CpuAction.java
 +  src/bindings/java/org/simgrid/surf/NetworkLink.java
 +  src/bindings/java/org/simgrid/surf/NetworkAction.java
 +
 +  src/bindings/java/org/simgrid/surf/RoutingEdge.java
 +  src/bindings/java/org/simgrid/surf/TmgrTraceEvent.java
 +  src/bindings/java/org/simgrid/surf/TmgrTrace.java
 +
 +  src/bindings/java/org/simgrid/surf/ActionList.java
 +
 +  #enums
 +  src/bindings/java/org/simgrid/surf/ActionState.java
 +  src/bindings/java/org/simgrid/surf/ResourceState.java
 +
 +  src/bindings/java/org/simgrid/surf/LmmConstraint.java
 +  src/bindings/java/org/simgrid/surf/LmmVariable.java
 +  src/bindings/java/org/simgrid/surf/XbtDict.java
 +)
 +
  set(JMSG_C_SRC
    src/bindings/java/jmsg.c
    src/bindings/java/jmsg.h
    src/bindings/java/smx_context_java.h
  )
  
 +set(JSURF_C_SRC
 +  src/bindings/java/surfJAVA_wrap.cxx
 +  src/bindings/java/surfJAVA_wrap.h
 +  ${JSURF_JAVA_C_SRC}
 +)
 +
  set(JMSG_JAVA_SRC
 -  src/bindings/java/org/simgrid/msg/As.java   
 +  src/bindings/java/org/simgrid/NativeLib.java
 +
 +  src/bindings/java/org/simgrid/msg/As.java
    src/bindings/java/org/simgrid/msg/Comm.java
    src/bindings/java/org/simgrid/msg/File.java
    src/bindings/java/org/simgrid/msg/Host.java
    src/bindings/java/org/simgrid/msg/MsgException.java
    src/bindings/java/org/simgrid/msg/Mutex.java
    src/bindings/java/org/simgrid/msg/NativeException.java
 -  src/bindings/java/org/simgrid/msg/NativeLib.java
    src/bindings/java/org/simgrid/msg/Process.java
    src/bindings/java/org/simgrid/msg/ProcessKilledError.java
    src/bindings/java/org/simgrid/msg/ProcessNotFoundException.java
    src/bindings/java/org/simgrid/msg/TimeoutException.java
    src/bindings/java/org/simgrid/msg/TransferFailureException.java
    src/bindings/java/org/simgrid/msg/VM.java
 +  ${JSURF_JAVA_GENERATED_SRC}
  )
  
  set(JTRACE_C_SRC
@@@ -593,11 -543,6 +594,11 @@@ set(MC_SR
    src/mc/mc_compare.c
    src/mc/mc_dpor.c
    src/mc/mc_global.c
 +  src/mc/mc_dwarf.c
 +  src/mc/mc_member.c
 +  src/mc/mc_dwarf_attrnames.h
 +  src/mc/mc_dwarf_expression.c
 +  src/mc/mc_dwarf_tagnames.h
    src/mc/mc_liveness.c
    src/mc/mc_memory.c
    src/mc/mc_private.h
    src/mc/mc_state.c
    src/mc/memory_map.c
    src/mc/mc_pair.c
 +  src/mc/mc_hash.c
 +  src/mc/mc_set.cpp
    )
  
  set(headers_to_install
@@@ -857,7 -800,7 +858,7 @@@ set(DOC_SOURCE
    doc/msg-tuto-src/platforms/griffon.xml
    doc/msg-tuto-src/platforms/peers.xml
    doc/msg-tuto-src/platforms/platform.xml
 -  
 +
    CITATION.bib
    )
  
@@@ -942,10 -885,7 +943,10 @@@ set(EXAMPLES_CMAKEFILES_TX
    examples/java/mutualExclusion/CMakeLists.txt
    examples/java/pingPong/CMakeLists.txt
    examples/java/priority/CMakeLists.txt
 +  examples/java/reservationSurfPlugin/CMakeLists.txt
    examples/java/startKillTime/CMakeLists.txt
 +  examples/java/surfCpuModel/CMakeLists.txt
 +  examples/java/surfPlugin/CMakeLists.txt
    examples/java/suspend/CMakeLists.txt
    examples/java/tracing/CMakeLists.txt
    examples/lua/CMakeLists.txt
@@@ -1002,7 -942,6 +1003,7 @@@ set(TESHSUITE_CMAKEFILES_TX
    teshsuite/msg/CMakeLists.txt
    teshsuite/msg/storage/CMakeLists.txt
    teshsuite/msg/trace/CMakeLists.txt
 +  teshsuite/bug-17132/CMakeLists.txt
    teshsuite/simdag/CMakeLists.txt
    teshsuite/simdag/availability/CMakeLists.txt
    teshsuite/simdag/network/CMakeLists.txt
@@@ -1046,7 -985,6 +1047,7 @@@ set(TOOLS_CMAKEFILES_TX
  set(TESTSUITE_CMAKEFILES_TXT
    testsuite/surf/CMakeLists.txt
    testsuite/xbt/CMakeLists.txt
 +  testsuite/mc/CMakeLists.txt
    )
  
  set(CMAKE_SOURCE_FILES
    buildtools/Cmake/Modules/FindGraphviz.cmake
    buildtools/Cmake/Modules/FindLibSigc++.cmake
    buildtools/Cmake/Modules/FindLibunwind.cmake
 +  buildtools/Cmake/Modules/FindLibdw.cmake
    buildtools/Cmake/Modules/FindLua51Simgrid.cmake
    buildtools/Cmake/Modules/FindNS3.cmake
    buildtools/Cmake/Modules/FindRngStream.cmake
diff --combined include/smpi/smpi.h
@@@ -64,6 -64,9 +64,9 @@@ SG_BEGIN_DECL(
  #define MPI_ERR_PENDING   14
  #define MPI_ERR_BUFFER    15
  #define MPI_ERR_NAME      16
+ #define MPI_ERR_DIMS      17
+ #define MPI_ERR_TOPOLOGY  18
+ #define MPI_ERR_NO_MEM    19
  #define MPI_ERRCODES_IGNORE (int *)0
  #define MPI_IDENT     0
  #define MPI_SIMILAR   1
  #define MPI_INTEGER8 MPI_DATATYPE_NULL
  #define MPI_COMPLEX MPI_DATATYPE_NULL
  #define MPI_DOUBLE_COMPLEX MPI_DATATYPE_NULL
 +#define MPI_2DOUBLE_PRECISION MPI_DATATYPE_NULL
 +#define MPI_REAL MPI_DATATYPE_NULL
 +#define MPI_LOGICAL MPI_DATATYPE_NULL
 +#define MPI_DOUBLE_PRECISION MPI_DATATYPE_NULL
 +#define MPI_INTEGER MPI_DATATYPE_NULL
  
  #define MPI_DISTRIBUTE_BLOCK 0
  #define MPI_DISTRIBUTE_NONE 1
@@@ -238,6 -236,9 +241,9 @@@ XBT_PUBLIC_DATA( MPI_Op ) MPI_BAND
  XBT_PUBLIC_DATA( MPI_Op ) MPI_BOR;
  XBT_PUBLIC_DATA( MPI_Op ) MPI_BXOR;
  
+ struct s_smpi_mpi_topology;
+ typedef struct s_smpi_mpi_topology *MPI_Topology;
+                           
  struct s_smpi_mpi_group;
  typedef struct s_smpi_mpi_group *MPI_Group;
  
diff --combined src/smpi/private.h
@@@ -110,6 -110,19 +110,19 @@@ int smpi_process_finalized(void)
  int smpi_process_initialized(void);
  void smpi_process_mark_as_initialized(void);
  
+ void smpi_topo_destroy(MPI_Topology topo);
+ MPI_Topology smpi_topo_create(int ndims);
+ int smpi_mpi_cart_create(MPI_Comm comm_old, int ndims, int dims[],
+                          int periodic[], int reorder, MPI_Comm *comm_cart);
+ int smpi_mpi_cart_shift(MPI_Comm comm, int direction, int disp,
+                         int *rank_source, int *rank_dest);
+ int smpi_mpi_cart_rank(MPI_Comm comm, int* coords, int* rank);
+ int smpi_mpi_cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords);
+ int smpi_mpi_cart_coords(MPI_Comm comm, int rank, int maxdims,
+                          int coords[]);
+ int smpi_mpi_cartdim_get(MPI_Comm comm, int *ndims);
+ int smpi_mpi_dims_create(int nnodes, int ndims, int dims[]);
+ int smpi_mpi_cart_sub(MPI_Comm comm, const int remain_dims[], MPI_Comm *newcomm);
  
  smpi_process_data_t smpi_process_data(void);
  smpi_process_data_t smpi_process_remote_data(int index);
@@@ -181,7 -194,8 +194,8 @@@ int smpi_group_unuse(MPI_Group group)
  int smpi_group_size(MPI_Group group);
  int smpi_group_compare(MPI_Group group1, MPI_Group group2);
  
- MPI_Comm smpi_comm_new(MPI_Group group);
+ MPI_Topology smpi_comm_topo(MPI_Comm comm);
+ MPI_Comm smpi_comm_new(MPI_Group group, MPI_Topology topo);
  void smpi_comm_destroy(MPI_Comm comm);
  MPI_Group smpi_comm_group(MPI_Comm comm);
  int smpi_comm_size(MPI_Comm comm);
@@@ -301,20 -315,10 +315,20 @@@ int smpi_coll_basic_alltoallv(void *sen
  // utilities
  extern double smpi_cpu_threshold;
  extern double smpi_running_power;
 +extern int smpi_privatize_global_variables;
 +extern char* start_data_exe; //start of the data+bss segment of the executable
 +extern int size_data_exe; //size of the data+bss segment of the executable
 +
 +
 +void switch_data_segment(int);
 +void smpi_get_executable_global_size(void);
 +void smpi_initialize_global_memory_segments(void);
 +void smpi_destroy_global_memory_segments(void);
  void smpi_bench_destroy(void);
  void smpi_bench_begin(void);
  void smpi_bench_end(void);
  
 +
  // f77 wrappers
  void mpi_init_(int*);
  void mpi_finalize_(int*);
@@@ -589,8 -593,5 +603,8 @@@ void TRACE_smpi_finalize(int rank)
  
  const char* encode_datatype(MPI_Datatype datatype);
  
 +// TODO, make this static and expose it more cleanly
 +extern void** mappings;
 +extern int loaded_page;
  
  #endif
diff --combined src/smpi/smpi_global.c
@@@ -72,7 -72,6 +72,7 @@@ void smpi_process_init(int *argc, char 
  #ifdef SMPI_F2C
      smpi_current_rank = index;
  #endif
 +
      data = smpi_process_remote_data(index);
      simcall_process_set_data(proc, data);
      if (*argc > 2) {
      // set the process attached to the mailbox
      simcall_rdv_set_receiver(data->mailbox_small, proc);
      XBT_DEBUG("<%d> New process in the game: %p", index, proc);
 +
 +    if(smpi_privatize_global_variables){
 +      switch_data_segment(index);
 +    }
 +
    }
    if (smpi_process_data() == NULL)
      xbt_die("smpi_process_data() returned NULL. You probably gave a NULL parameter to MPI_Init. Although it's required by MPI-2, this is currently not supported by SMPI.");
@@@ -99,9 -93,6 +99,9 @@@
  void smpi_process_destroy(void)
  {
    int index = smpi_process_index();
 +  if(smpi_privatize_global_variables){
 +    switch_data_segment(index);
 +  }
    process_data[index]->state = SMPI_FINALIZED;
    XBT_DEBUG("<%d> Process left the game", index);
  }
   */
  void smpi_process_finalize(void)
  {
 +#if 0
 +  // wait for all pending asynchronous comms to finish
 +  while (SIMIX_process_has_pending_comms(SIMIX_process_self())) {
 +    simcall_process_sleep(0.01);
 +  }
 +#else
    int i;
    int size = smpi_comm_size(MPI_COMM_WORLD);
    int rank = smpi_comm_rank(MPI_COMM_WORLD);
      smpi_mpi_waitall( size-1, requests+1, MPI_STATUSES_IGNORE );
      free( requests );
    }
 -
 +#endif
  }
  
  /**
@@@ -305,7 -290,7 +305,7 @@@ MPI_Comm smpi_process_comm_self(void
    smpi_process_data_t data = smpi_process_data();
    if(data->comm_self==MPI_COMM_NULL){
      MPI_Group group = smpi_group_new(1);
-     data->comm_self = smpi_comm_new(group);
+     data->comm_self = smpi_comm_new(group, NULL);
      smpi_group_set_mapping(group, smpi_process_index(), 0);
    }
  
@@@ -337,29 -322,7 +337,29 @@@ static void smpi_comm_copy_buffer_callb
  {
    XBT_DEBUG("Copy the data over");
    if(_xbt_replay_is_active()) return;
 -  memcpy(comm->comm.dst_buff, buff, buff_size);
 +  void* tmpbuff=buff;
 +
 +  if((smpi_privatize_global_variables)
 +      && ((char*)buff >= start_data_exe)
 +      && ((char*)buff < start_data_exe + size_data_exe )
 +    ){
 +       XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !");
 +       switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.src_proc))->index);
 +       tmpbuff = (void*)xbt_malloc(buff_size);
 +       memcpy(tmpbuff, buff, buff_size);
 +  }
 +
 +
 +  if((smpi_privatize_global_variables)
 +      && ((char*)comm->comm.dst_buff >= start_data_exe)
 +      && ((char*)comm->comm.dst_buff < start_data_exe + size_data_exe )
 +    ){
 +       XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment");
 +       switch_data_segment(((smpi_process_data_t)SIMIX_process_get_data(comm->comm.dst_proc))->index);
 +  }
 +
 +
 +  memcpy(comm->comm.dst_buff, tmpbuff, buff_size);
    if (comm->comm.detached) {
      // if this is a detached send, the source buffer was duplicated by SMPI
      // sender to make the original buffer available to the application ASAP
      //inside the user data and should be free 
      comm->comm.src_buff = NULL;
    }
 +
 +  if(tmpbuff!=buff)xbt_free(tmpbuff);
 +
  }
  
  void smpi_global_init(void)
      process_data[i]->sampling = 0;
    }
    group = smpi_group_new(process_count);
-   MPI_COMM_WORLD = smpi_comm_new(group);
+   MPI_COMM_WORLD = smpi_comm_new(group, NULL);
    MPI_UNIVERSE_SIZE = smpi_comm_size(MPI_COMM_WORLD);
    for (i = 0; i < process_count; i++) {
      smpi_group_set_mapping(group, i, i);
               "Use the option \"--cfg=smpi/running_power:<flops>\" to set its value."
               "Check http://simgrid.org/simgrid/latest/doc/options.html#options_smpi_bench for more information. ");
    }
 +  if(smpi_privatize_global_variables)
 +    smpi_initialize_global_memory_segments();
  }
  
  void smpi_global_destroy(void)
    }
    xbt_free(process_data);
    process_data = NULL;
 -
 +  if(smpi_privatize_global_variables)
 +    smpi_destroy_global_memory_segments();
    smpi_free_static();
  }
  
@@@ -602,7 -559,6 +602,7 @@@ int smpi_main(int (*realmain) (int argc
  
    smpi_cpu_threshold = sg_cfg_get_double("smpi/cpu_threshold");
    smpi_running_power = sg_cfg_get_double("smpi/running_power");
 +  smpi_privatize_global_variables = sg_cfg_get_boolean("smpi/privatize_global_variables");
    if (smpi_cpu_threshold < 0)
      smpi_cpu_threshold = DBL_MAX;
  
diff --combined src/smpi/smpi_pmpi.c
@@@ -74,8 -74,8 +74,8 @@@ int PMPI_Get_version (int *version,int 
  int PMPI_Get_library_version (char *version,int *len){
    int retval = MPI_SUCCESS;
    smpi_bench_end();
 -  snprintf(version,MPI_MAX_LIBRARY_VERSION_STRING,"SMPI Version %d.%d. Copyright The Simgrid Team 2007-2013",SIMGRID_VERSION_MAJOR,
 -           SIMGRID_VERSION_MINOR);
 +  snprintf(version,MPI_MAX_LIBRARY_VERSION_STRING,"SMPI Version %d.%d. Copyright The Simgrid Team 2007-2014",SIMGRID_VERSION_MAJOR,
 +          SIMGRID_VERSION_MINOR);
    *len = strlen(version) > MPI_MAX_LIBRARY_VERSION_STRING ? MPI_MAX_LIBRARY_VERSION_STRING : strlen(version);
    smpi_bench_begin();
    return retval;
@@@ -93,12 -93,14 +93,12 @@@ int PMPI_Query_thread(int *provided
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (provided == NULL) {
      retval = MPI_ERR_ARG;
    } else {
      *provided = MPI_THREAD_MULTIPLE;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -106,12 -108,14 +106,12 @@@ int PMPI_Is_thread_main(int *flag
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (flag == NULL) {
      retval = MPI_ERR_ARG;
    } else {
      *flag = smpi_process_index() == 0;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -147,12 -151,14 +147,12 @@@ int PMPI_Address(void *location, MPI_Ai
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (!address) {
      retval = MPI_ERR_ARG;
    } else {
      *address = (MPI_Aint) location;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -165,12 -171,14 +165,12 @@@ int PMPI_Type_free(MPI_Datatype * datat
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (!datatype) {
      retval = MPI_ERR_ARG;
    } else {
      smpi_datatype_free(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -178,6 -186,7 +178,6 @@@ int PMPI_Type_size(MPI_Datatype datatyp
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (size == NULL) {
      *size = (int) smpi_datatype_size(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -193,6 -203,7 +193,6 @@@ int PMPI_Type_get_extent(MPI_Datatype d
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (lb == NULL || extent == NULL) {
    } else {
      retval = smpi_datatype_extent(datatype, lb, extent);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -212,6 -224,7 +212,6 @@@ int PMPI_Type_extent(MPI_Datatype datat
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (extent == NULL) {
      *extent = smpi_datatype_get_extent(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -227,6 -241,7 +227,6 @@@ int PMPI_Type_lb(MPI_Datatype datatype
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (disp == NULL) {
      *disp = smpi_datatype_lb(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -242,6 -258,7 +242,6 @@@ int PMPI_Type_ub(MPI_Datatype datatype
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (disp == NULL) {
      *disp = smpi_datatype_ub(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -257,12 -275,14 +257,12 @@@ int PMPI_Op_create(MPI_User_function * 
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (function == NULL || op == NULL) {
      retval = MPI_ERR_ARG;
    } else {
      *op = smpi_op_new(function, commute);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -270,6 -290,7 +270,6 @@@ int PMPI_Op_free(MPI_Op * op
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (op == NULL) {
      retval = MPI_ERR_ARG;
    } else if (*op == MPI_OP_NULL) {
      *op = MPI_OP_NULL;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -286,6 -308,7 +286,6 @@@ int PMPI_Group_free(MPI_Group * group
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (group == NULL) {
      retval = MPI_ERR_ARG;
    } else {
      *group = MPI_GROUP_NULL;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -300,6 -324,7 +300,6 @@@ int PMPI_Group_size(MPI_Group group, in
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (size == NULL) {
      *size = smpi_group_size(group);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -315,6 -341,7 +315,6 @@@ int PMPI_Group_rank(MPI_Group group, in
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (rank == NULL) {
      *rank = smpi_group_rank(group, smpi_process_index());
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Group_translate_ranks(MPI_Group group1, int n, int *ranks1,
 -                               MPI_Group group2, int *ranks2)
 +                              MPI_Group group2, int *ranks2)
  {
    int retval, i, index;
 -  smpi_bench_end();
    if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -350,6 -380,7 +350,6 @@@ int PMPI_Group_compare(MPI_Group group1
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (result == NULL) {
      *result = smpi_group_compare(group1, group2);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Group_union(MPI_Group group1, MPI_Group group2,
 -                     MPI_Group * newgroup)
 +                    MPI_Group * newgroup)
  {
    int retval, i, proc1, proc2, size, size2;
  
 -  smpi_bench_end();
    if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Group_intersection(MPI_Group group1, MPI_Group group2,
 -                            MPI_Group * newgroup)
 +                           MPI_Group * newgroup)
  {
    int retval, i, proc1, proc2, size;
  
 -  smpi_bench_end();
    if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -440,6 -476,7 +440,6 @@@ int PMPI_Group_difference(MPI_Group gro
  {
    int retval, i, proc1, proc2, size, size2;
  
 -  smpi_bench_end();
    if (group1 == MPI_GROUP_NULL || group2 == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -474,6 -512,7 +474,6 @@@ int PMPI_Group_incl(MPI_Group group, in
  {
    int retval, i, index;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      } else if (n == smpi_group_size(group)) {
        *newgroup = group;
        if(group!= smpi_comm_group(MPI_COMM_WORLD)
 -         && group != MPI_GROUP_NULL
 -         && group != smpi_comm_group(MPI_COMM_SELF)
 -         && group != MPI_GROUP_EMPTY)
 -        smpi_group_use(group);
 +                && group != MPI_GROUP_NULL
 +                && group != smpi_comm_group(MPI_COMM_SELF)
 +                && group != MPI_GROUP_EMPTY)
 +      smpi_group_use(group);
      } else {
        *newgroup = smpi_group_new(n);
        for (i = 0; i < n; i++) {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -504,6 -544,7 +504,6 @@@ int PMPI_Group_excl(MPI_Group group, in
  {
    int retval, i, j, newsize, oldsize, index;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      if (n == 0) {
        *newgroup = group;
        if(group!= smpi_comm_group(MPI_COMM_WORLD)
 -         && group != MPI_GROUP_NULL
 -         && group != smpi_comm_group(MPI_COMM_SELF)
 -         && group != MPI_GROUP_EMPTY)
 -        smpi_group_use(group);
 +                && group != MPI_GROUP_NULL
 +                && group != smpi_comm_group(MPI_COMM_SELF)
 +                && group != MPI_GROUP_EMPTY)
 +      smpi_group_use(group);
      } else if (n == smpi_group_size(group)) {
        *newgroup = MPI_GROUP_EMPTY;
      } else {
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Group_range_incl(MPI_Group group, int n, int ranges[][3],
 -                          MPI_Group * newgroup)
 +                         MPI_Group * newgroup)
  {
    int retval, i, j, rank, size, index;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
        for (i = 0; i < n; i++) {
          for (rank = ranges[i][0];       /* First */
               rank >= 0; /* Last */
 -             ) {
 +              ) {
            size++;
  
            rank += ranges[i][2]; /* Stride */
          if (ranges[i][0]<ranges[i][1]){
 -            if(rank > ranges[i][1])
 -              break;
 +            if(rank > ranges[i][1])
 +              break;
          }else{
 -            if(rank < ranges[i][1])
 -              break;
 +            if(rank < ranges[i][1])
 +              break;
          }
          }
        }
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Group_range_excl(MPI_Group group, int n, int ranges[][3],
 -                          MPI_Group * newgroup)
 +                         MPI_Group * newgroup)
  {
    int retval, i, rank, newrank,oldrank, size, index, add;
  
 -  smpi_bench_end();
    if (group == MPI_GROUP_NULL) {
      retval = MPI_ERR_GROUP;
    } else if (newgroup == NULL) {
      if (n == 0) {
        *newgroup = group;
        if(group!= smpi_comm_group(MPI_COMM_WORLD)
 -         && group != MPI_GROUP_NULL
 -         && group != smpi_comm_group(MPI_COMM_SELF)
 -         && group != MPI_GROUP_EMPTY)
 -        smpi_group_use(group);
 +                && group != MPI_GROUP_NULL
 +                && group != smpi_comm_group(MPI_COMM_SELF)
 +                && group != MPI_GROUP_EMPTY)
 +      smpi_group_use(group);
      } else {
        size = smpi_group_size(group);
        for (i = 0; i < n; i++) {
          for (rank = ranges[i][0];       /* First */
               rank >= 0; /* Last */
 -             ) {
 +              ) {
            size--;
  
            rank += ranges[i][2]; /* Stride */
          if (ranges[i][0]<ranges[i][1]){
 -            if(rank > ranges[i][1])
 -              break;
 +            if(rank > ranges[i][1])
 +              break;
          }else{
 -            if(rank < ranges[i][1])
 -              break;
 +            if(rank < ranges[i][1])
 +              break;
          }
          }
        }
            for (i = 0; i < n; i++) {
              for (rank = ranges[i][0];rank >= 0;){
                if(rank==oldrank){
 -                add=0;
 -                break;
 +                  add=0;
 +                  break;
                }
  
                rank += ranges[i][2]; /* Stride */
  
                if (ranges[i][0]<ranges[i][1]){
 -                if(rank > ranges[i][1])
 -                  break;
 +                  if(rank > ranges[i][1])
 +                    break;
                }else{
 -                if(rank < ranges[i][1])
 -                  break;
 +                  if(rank < ranges[i][1])
 +                    break;
                }
              }
            }
  
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Comm_rank(MPI_Comm comm, int *rank)
  {
    int retval = 0;
 -
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (rank == NULL) {
      *rank = smpi_comm_rank(comm);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Comm_size(MPI_Comm comm, int *size)
  {
    int retval = 0;
 -
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (size == NULL) {
      *size = smpi_comm_size(comm);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -709,6 -761,7 +709,6 @@@ int PMPI_Comm_get_name (MPI_Comm comm, 
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL)  {
      retval = MPI_ERR_COMM;
    } else if (name == NULL || len == NULL)  {
      smpi_comm_get_name(comm, name, len);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -724,6 -778,7 +724,6 @@@ int PMPI_Comm_group(MPI_Comm comm, MPI_
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (group == NULL) {
    } else {
      *group = smpi_comm_group(comm);
      if(*group!= smpi_comm_group(MPI_COMM_WORLD)
 -       && *group != MPI_GROUP_NULL
 -       && *group != smpi_comm_group(MPI_COMM_SELF)
 -       && *group != MPI_GROUP_EMPTY)
 -      smpi_group_use(*group);
 +              && *group != MPI_GROUP_NULL
 +              && *group != smpi_comm_group(MPI_COMM_SELF)
 +              && *group != MPI_GROUP_EMPTY)
 +    smpi_group_use(*group);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -744,6 -800,7 +744,6 @@@ int PMPI_Comm_compare(MPI_Comm comm1, M
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm1 == MPI_COMM_NULL || comm2 == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (result == NULL) {
        *result = MPI_IDENT;
      } else {
        *result =
 -        smpi_group_compare(smpi_comm_group(comm1),
 -                           smpi_comm_group(comm2));
 +          smpi_group_compare(smpi_comm_group(comm1),
 +                             smpi_comm_group(comm2));
        if (*result == MPI_IDENT) {
          *result = MPI_CONGRUENT;
        }
      }
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -768,14 -826,16 +768,14 @@@ int PMPI_Comm_dup(MPI_Comm comm, MPI_Co
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (newcomm == NULL) {
      retval = MPI_ERR_ARG;
    } else {
-     *newcomm = smpi_comm_new(smpi_comm_group(comm));
+     *newcomm = smpi_comm_new(smpi_comm_group(comm), smpi_comm_topo(comm));
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -783,6 -843,7 +783,6 @@@ int PMPI_Comm_create(MPI_Comm comm, MPI
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if (group == MPI_GROUP_NULL) {
      retval = MPI_SUCCESS;
    }else{
  
-     *newcomm = smpi_comm_new(group);
+     *newcomm = smpi_comm_new(group, NULL);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -804,6 -866,7 +804,6 @@@ int PMPI_Comm_free(MPI_Comm * comm
  {
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == NULL) {
      retval = MPI_ERR_ARG;
    } else if (*comm == MPI_COMM_NULL) {
      *comm = MPI_COMM_NULL;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -821,6 -885,7 +821,6 @@@ int PMPI_Comm_disconnect(MPI_Comm * com
    /* TODO: wait until all communication in comm are done */
    int retval = 0;
  
 -  smpi_bench_end();
    if (comm == NULL) {
      retval = MPI_ERR_ARG;
    } else if (*comm == MPI_COMM_NULL) {
      *comm = MPI_COMM_NULL;
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Comm_split(MPI_Comm comm, int color, int key, MPI_Comm* comm_out)
  {
    int retval = 0;
 -
    smpi_bench_end();
 +
    if (comm_out == NULL) {
      retval = MPI_ERR_ARG;
    } else if (comm == MPI_COMM_NULL) {
      retval = MPI_SUCCESS;
    }
    smpi_bench_begin();
 +
    return retval;
  }
  
@@@ -1125,7 -1190,7 +1125,7 @@@ int PMPI_Issend(void* buf, int count, M
  }
  
  int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag,
 -              MPI_Comm comm, MPI_Status * status)
 +             MPI_Comm comm, MPI_Status * status)
  {
    int retval = 0;
  
      retval = MPI_ERR_TAG;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int src_traced = smpi_group_index(smpi_comm_group(comm), src);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_RECV;
 -    extra->send_size = count;
 -    extra->src = src_traced;
 -    extra->dst = rank;
 -    extra->datatype1 = encode_datatype(datatype);
 -    TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int src_traced = smpi_group_index(smpi_comm_group(comm), src);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_RECV;
 +  extra->send_size = count;
 +  extra->src = src_traced;
 +  extra->dst = rank;
 +  extra->datatype1 = encode_datatype(datatype);
 +  TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra);
  #endif
  
      smpi_mpi_recv(buf, count, datatype, src, tag, comm, status);
      retval = MPI_SUCCESS;
  
  #ifdef HAVE_TRACING
 -    //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
 -    if(status!=MPI_STATUS_IGNORE){
 -      src_traced = smpi_group_index(smpi_comm_group(comm), status->MPI_SOURCE);
 -      TRACE_smpi_recv(rank, src_traced, rank);
 -    }
 -    TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
 +  //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
 +  if(status!=MPI_STATUS_IGNORE){
 +    src_traced = smpi_group_index(smpi_comm_group(comm), status->MPI_SOURCE);
 +    TRACE_smpi_recv(rank, src_traced, rank);
 +  }
 +  TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag,
 -              MPI_Comm comm)
 +             MPI_Comm comm)
  {
    int retval = 0;
  
    } else {
  
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SEND;
 -    extra->send_size = count;
 -    extra->src = rank;
 -    extra->dst = dst_traced;
 -    extra->datatype1 = encode_datatype(datatype);
 -    TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
 -    TRACE_smpi_send(rank, rank, dst_traced,count*smpi_datatype_size(datatype));
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_SEND;
 +  extra->send_size = count;
 +  extra->src = rank;
 +  extra->dst = dst_traced;
 +  extra->datatype1 = encode_datatype(datatype);
 +  TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);
 +  TRACE_smpi_send(rank, rank, dst_traced,count*smpi_datatype_size(datatype));
  #endif
  
      smpi_mpi_send(buf, count, datatype, dst, tag, comm);
      retval = MPI_SUCCESS;
  
  #ifdef HAVE_TRACING
 -    TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
 +  TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
  #endif
    }
  
  int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MPI_Comm comm) {
    int retval = 0;
  
 -  smpi_bench_end();
 -
 -  if (comm == MPI_COMM_NULL) {
 -    retval = MPI_ERR_COMM;
 -  } else if (dst == MPI_PROC_NULL) {
 -    retval = MPI_SUCCESS;
 -  } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){
 -    retval = MPI_ERR_RANK;
 -  } else if (count < 0) {
 -    retval = MPI_ERR_COUNT;
 -  } else if (buf==NULL && count > 0) {
 -    retval = MPI_ERR_COUNT;
 -  } else if (datatype == MPI_DATATYPE_NULL){
 -    retval = MPI_ERR_TYPE;
 -  } else if(tag<0 && tag !=  MPI_ANY_TAG){
 -    retval = MPI_ERR_TAG;
 -  } else {
 -
 -#ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SSEND;
 -    extra->send_size = count;
 -    extra->src = rank;
 -    extra->dst = dst_traced;
 -    extra->datatype1 = encode_datatype(datatype);
 -    TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);   TRACE_smpi_send(rank, rank, dst_traced,count*smpi_datatype_size(datatype));
 -#endif
 -
 -    smpi_mpi_ssend(buf, count, datatype, dst, tag, comm);
 -    retval = MPI_SUCCESS;
 -
 -#ifdef HAVE_TRACING
 -    TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
 -#endif
 -  }
 -
 -  smpi_bench_begin();
 -  return retval;}
 +   smpi_bench_end();
 +
 +   if (comm == MPI_COMM_NULL) {
 +     retval = MPI_ERR_COMM;
 +   } else if (dst == MPI_PROC_NULL) {
 +     retval = MPI_SUCCESS;
 +   } else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0){
 +     retval = MPI_ERR_RANK;
 +   } else if (count < 0) {
 +     retval = MPI_ERR_COUNT;
 +   } else if (buf==NULL && count > 0) {
 +     retval = MPI_ERR_COUNT;
 +   } else if (datatype == MPI_DATATYPE_NULL){
 +     retval = MPI_ERR_TYPE;
 +   } else if(tag<0 && tag !=  MPI_ANY_TAG){
 +     retval = MPI_ERR_TAG;
 +   } else {
 +
 + #ifdef HAVE_TRACING
 +   int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +   int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 +   instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +   extra->type = TRACING_SSEND;
 +   extra->send_size = count;
 +   extra->src = rank;
 +   extra->dst = dst_traced;
 +   extra->datatype1 = encode_datatype(datatype);
 +   TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra);   TRACE_smpi_send(rank, rank, dst_traced,count*smpi_datatype_size(datatype));
 + #endif
 +
 +     smpi_mpi_ssend(buf, count, datatype, dst, tag, comm);
 +     retval = MPI_SUCCESS;
 +
 + #ifdef HAVE_TRACING
 +   TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__);
 + #endif
 +   }
 +
 +   smpi_bench_begin();
 +   return retval;}
  
  
  int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                  int dst, int sendtag, void *recvbuf, int recvcount,
 -                  MPI_Datatype recvtype, int src, int recvtag,
 -                  MPI_Comm comm, MPI_Status * status)
 +                 int dst, int sendtag, void *recvbuf, int recvcount,
 +                 MPI_Datatype recvtype, int src, int recvtag,
 +                 MPI_Comm comm, MPI_Status * status)
  {
    int retval = 0;
  
               || recvtype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) {
 -    smpi_empty_status(status);
 -    status->MPI_SOURCE = MPI_PROC_NULL;
 -    retval = MPI_SUCCESS;
 +      smpi_empty_status(status);
 +      status->MPI_SOURCE = MPI_PROC_NULL;
 +      retval = MPI_SUCCESS;
    }else if (dst >= smpi_group_size(smpi_comm_group(comm)) || dst <0 ||
 -            (src!=MPI_ANY_SOURCE && (src >= smpi_group_size(smpi_comm_group(comm)) || src <0))){
 +      (src!=MPI_ANY_SOURCE && (src >= smpi_group_size(smpi_comm_group(comm)) || src <0))){
      retval = MPI_ERR_RANK;
    } else if (sendcount < 0 || recvcount<0) {
 -    retval = MPI_ERR_COUNT;
 +      retval = MPI_ERR_COUNT;
    } else if ((sendbuf==NULL && sendcount > 0)||(recvbuf==NULL && recvcount>0)) {
      retval = MPI_ERR_COUNT;
    } else if((sendtag<0 && sendtag !=  MPI_ANY_TAG)||(recvtag<0 && recvtag != MPI_ANY_TAG)){
    } else {
  
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 -    int src_traced = smpi_group_index(smpi_comm_group(comm), src);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SENDRECV;
 -    extra->send_size = sendcount;
 -    extra->recv_size = recvcount;
 -    extra->src = src_traced;
 -    extra->dst = dst_traced;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
 -    TRACE_smpi_send(rank, rank, dst_traced,sendcount*smpi_datatype_size(sendtype));
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int dst_traced = smpi_group_index(smpi_comm_group(comm), dst);
 +  int src_traced = smpi_group_index(smpi_comm_group(comm), src);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_SENDRECV;
 +  extra->send_size = sendcount;
 +  extra->recv_size = recvcount;
 +  extra->src = src_traced;
 +  extra->dst = dst_traced;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra);
 +  TRACE_smpi_send(rank, rank, dst_traced,sendcount*smpi_datatype_size(sendtype));
  #endif
  
  
      retval = MPI_SUCCESS;
  
  #ifdef HAVE_TRACING
 -    TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
 -    TRACE_smpi_recv(rank, src_traced, rank);
 +  TRACE_smpi_ptp_out(rank, src_traced, dst_traced, __FUNCTION__);
 +  TRACE_smpi_recv(rank, src_traced, rank);
  #endif
  
    }
  }
  
  int PMPI_Sendrecv_replace(void *buf, int count, MPI_Datatype datatype,
 -                          int dst, int sendtag, int src, int recvtag,
 -                          MPI_Comm comm, MPI_Status * status)
 +                         int dst, int sendtag, int src, int recvtag,
 +                         MPI_Comm comm, MPI_Status * status)
  {
    //TODO: suboptimal implementation
    void *recvbuf;
    int retval = 0;
    if (datatype == MPI_DATATYPE_NULL) {
 -    retval = MPI_ERR_TYPE;
 +      retval = MPI_ERR_TYPE;
    } else if (count < 0) {
 -    retval = MPI_ERR_COUNT;
 +      retval = MPI_ERR_COUNT;
    } else {
      int size = smpi_datatype_get_extent(datatype) * count;
      recvbuf = xbt_new0(char, size);
      retval =
 -      MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count,
 -                   datatype, src, recvtag, comm, status);
 +        MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count,
 +                     datatype, src, recvtag, comm, status);
      if(retval==MPI_SUCCESS){
 -      smpi_datatype_copy(recvbuf, count, datatype, buf, count, datatype);
 +        smpi_datatype_copy(recvbuf, count, datatype, buf, count, datatype);
      }
      xbt_free(recvbuf);
  
@@@ -1379,7 -1444,7 +1379,7 @@@ int PMPI_Test(MPI_Request * request, in
  }
  
  int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag,
 -                 MPI_Status * status)
 +                MPI_Status * status)
  {
    int retval = 0;
  
@@@ -1542,8 -1607,8 +1542,8 @@@ int PMPI_Waitany(int count, MPI_Reques
      if (is_wait_for_receive) {
        if(srcs[*index]==MPI_ANY_SOURCE)
          src_traced = (status!=MPI_STATUSES_IGNORE) ?
 -          smpi_group_rank(smpi_comm_group(comms[*index]), status->MPI_SOURCE) :
 -          srcs[*index];
 +                      smpi_group_rank(smpi_comm_group(comms[*index]), status->MPI_SOURCE) :
 +                      srcs[*index];
        TRACE_smpi_recv(rank_traced, src_traced, dst_traced);
      }
      TRACE_smpi_ptp_out(rank_traced, src_traced, dst_traced, __FUNCTION__);
@@@ -1594,16 -1659,16 +1594,16 @@@ int PMPI_Waitall(int count, MPI_Reques
  #ifdef HAVE_TRACING
    for (i = 0; i < count; i++) {
      if(valid[i]){
 -      //int src_traced = srcs[*index];
 -      //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
 +    //int src_traced = srcs[*index];
 +    //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE)
        int src_traced = srcs[i];
        int dst_traced = dsts[i];
        int is_wait_for_receive = recvs[i];
        if (is_wait_for_receive) {
          if(src_traced==MPI_ANY_SOURCE)
 -          src_traced = (status!=MPI_STATUSES_IGNORE) ?
 -            smpi_group_rank(smpi_comm_group(comms[i]), status[i].MPI_SOURCE) :
 -            srcs[i];
 +        src_traced = (status!=MPI_STATUSES_IGNORE) ?
 +                          smpi_group_rank(smpi_comm_group(comms[i]), status[i].MPI_SOURCE) :
 +                          srcs[i];
          TRACE_smpi_recv(rank_traced, src_traced, dst_traced);
        }
      }
  }
  
  int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount,
 -                  int *indices, MPI_Status status[])
 +                 int *indices, MPI_Status status[])
  {
    int retval = 0;
  
  }
  
  int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount,
 -                  int* indices, MPI_Status status[])
 +                 int* indices, MPI_Status status[])
  {
    int retval = 0;
  
 -  smpi_bench_end();
 -  if (outcount == NULL) {
 -    retval = MPI_ERR_ARG;
 -  } else {
 -    *outcount = smpi_mpi_testsome(incount, requests, indices, status);
 -    retval = MPI_SUCCESS;
 -  }
 -  smpi_bench_begin();
 -  return retval;
 +   smpi_bench_end();
 +   if (outcount == NULL) {
 +     retval = MPI_ERR_ARG;
 +   } else {
 +     *outcount = smpi_mpi_testsome(incount, requests, indices, status);
 +     retval = MPI_SUCCESS;
 +   }
 +   smpi_bench_begin();
 +   return retval;
  }
  
  
@@@ -1663,21 -1728,21 +1663,21 @@@ int PMPI_Bcast(void *buf, int count, MP
      retval = MPI_ERR_COMM;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
  
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_BCAST;
 -    extra->send_size = count;
 -    extra->root = root_traced;
 -    extra->datatype1 = encode_datatype(datatype);
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_BCAST;
 +  extra->send_size = count;
 +  extra->root = root_traced;
 +  extra->datatype1 = encode_datatype(datatype);
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
  
  #endif
      mpi_coll_bcast_fun(buf, count, datatype, root, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
@@@ -1695,15 -1760,15 +1695,15 @@@ int PMPI_Barrier(MPI_Comm comm
      retval = MPI_ERR_COMM;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_BARRIER;
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_BARRIER;
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
  #endif
      mpi_coll_barrier_fun(comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                void *recvbuf, int recvcount, MPI_Datatype recvtype,
 -                int root, MPI_Comm comm)
 +               void *recvbuf, int recvcount, MPI_Datatype recvtype,
 +               int root, MPI_Comm comm)
  {
    int retval = 0;
  
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
 -             ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){
 +            ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){
      retval = MPI_ERR_TYPE;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) ||
 -             ((smpi_comm_rank(comm) == root) && (recvcount <0))){
 +            ((smpi_comm_rank(comm) == root) && (recvcount <0))){
      retval = MPI_ERR_COUNT;
    } else {
  
        sendtmptype=recvtype;
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_GATHER;
 -    extra->send_size = sendtmpcount;
 -    extra->recv_size = recvcount;
 -    extra->root = root_traced;
 -    extra->datatype1 = encode_datatype(sendtmptype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_GATHER;
 +  extra->send_size = sendtmpcount;
 +  extra->recv_size = recvcount;
 +  extra->root = root_traced;
 +  extra->datatype1 = encode_datatype(sendtmptype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra);
  #endif
      mpi_coll_gather_fun(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount,
 -                        recvtype, root, comm);
 +                    recvtype, root, comm);
  
  
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                 void *recvbuf, int *recvcounts, int *displs,
 -                 MPI_Datatype recvtype, int root, MPI_Comm comm)
 +                void *recvbuf, int *recvcounts, int *displs,
 +                MPI_Datatype recvtype, int root, MPI_Comm comm)
  {
    int retval = 0;
  
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
 -             ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){
 +            ((smpi_comm_rank(comm) == root) && (recvtype == MPI_DATATYPE_NULL))){
      retval = MPI_ERR_TYPE;
    } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
      retval = MPI_ERR_COUNT;
      }
  
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 -    int i=0;
 -    int size = smpi_comm_size(comm);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_GATHERV;
 -    extra->send_size = sendtmpcount;
 -    extra->recvcounts= xbt_malloc(size*sizeof(int));
 -    for(i=0; i< size; i++)//copy data to avoid bad free
 -      extra->recvcounts[i] = recvcounts[i];
 -    extra->num_processes = size;
 -    extra->root = root_traced;
 -    extra->datatype1 = encode_datatype(sendtmptype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  int i=0;
 +  int size = smpi_comm_size(comm);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_GATHERV;
 +  extra->send_size = sendtmpcount;
 +  extra->recvcounts= xbt_malloc(size*sizeof(int));
 +  for(i=0; i< size; i++)//copy data to avoid bad free
 +    extra->recvcounts[i] = recvcounts[i];
 +  extra->num_processes = size;
 +  extra->root = root_traced;
 +  extra->datatype1 = encode_datatype(sendtmptype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
  #endif
      smpi_mpi_gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts,
                       displs, recvtype, root, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                   void *recvbuf, int recvcount, MPI_Datatype recvtype,
 -                   MPI_Comm comm)
 +                  void *recvbuf, int recvcount, MPI_Datatype recvtype,
 +                  MPI_Comm comm)
  {
    int retval = 0;
  
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
 -             (recvtype == MPI_DATATYPE_NULL)){
 +            (recvtype == MPI_DATATYPE_NULL)){
      retval = MPI_ERR_TYPE;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendcount <0)) ||
 -             (recvcount <0)){
 +            (recvcount <0)){
      retval = MPI_ERR_COUNT;
    } else {
      if(sendbuf == MPI_IN_PLACE) {
        sendtype=recvtype;
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_ALLGATHER;
 -    extra->send_size = sendcount;
 -    extra->recv_size = recvcount;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_ALLGATHER;
 +  extra->send_size = sendcount;
 +  extra->recv_size = recvcount;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra);
  #endif
      mpi_coll_allgather_fun(sendbuf, sendcount, sendtype, recvbuf, recvcount,
                             recvtype, comm);
      retval = MPI_SUCCESS;
  
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
    smpi_bench_begin();
  }
  
  int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                    void *recvbuf, int *recvcounts, int *displs,
 -                    MPI_Datatype recvtype, MPI_Comm comm)
 +                   void *recvbuf, int *recvcounts, int *displs,
 +                   MPI_Datatype recvtype, MPI_Comm comm)
  {
    int retval = 0;
  
    if (comm == MPI_COMM_NULL) {
      retval = MPI_ERR_COMM;
    } else if ((( sendbuf != MPI_IN_PLACE) && (sendtype == MPI_DATATYPE_NULL)) ||
 -             (recvtype == MPI_DATATYPE_NULL)){
 +            (recvtype == MPI_DATATYPE_NULL)){
      retval = MPI_ERR_TYPE;
    } else if (( sendbuf != MPI_IN_PLACE) && (sendcount <0)){
      retval = MPI_ERR_COUNT;
        sendtype=recvtype;
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int i=0;
 -    int size = smpi_comm_size(comm);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_ALLGATHERV;
 -    extra->send_size = sendcount;
 -    extra->recvcounts= xbt_malloc(size*sizeof(int));
 -    for(i=0; i< size; i++)//copy data to avoid bad free
 -      extra->recvcounts[i] = recvcounts[i];
 -    extra->num_processes = size;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int i=0;
 +  int size = smpi_comm_size(comm);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_ALLGATHERV;
 +  extra->send_size = sendcount;
 +  extra->recvcounts= xbt_malloc(size*sizeof(int));
 +  for(i=0; i< size; i++)//copy data to avoid bad free
 +    extra->recvcounts[i] = recvcounts[i];
 +  extra->num_processes = size;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      mpi_coll_allgatherv_fun(sendbuf, sendcount, sendtype, recvbuf, recvcounts,
 -                            displs, recvtype, comm);
 +                        displs, recvtype, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                 void *recvbuf, int recvcount, MPI_Datatype recvtype,
 -                 int root, MPI_Comm comm)
 +                void *recvbuf, int recvcount, MPI_Datatype recvtype,
 +                int root, MPI_Comm comm)
  {
    int retval = 0;
  
    } else {
  
      if (recvbuf == MPI_IN_PLACE) {
 -      recvtype=sendtype;
 -      recvcount=sendcount;
 +        recvtype=sendtype;
 +        recvcount=sendcount;
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SCATTER;
 -    extra->send_size = sendcount;
 -    extra->recv_size= recvcount;
 -    extra->root = root_traced;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_SCATTER;
 +  extra->send_size = sendcount;
 +  extra->recv_size= recvcount;
 +  extra->root = root_traced;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
  #endif
      mpi_coll_scatter_fun(sendbuf, sendcount, sendtype, recvbuf, recvcount,
 -                         recvtype, root, comm);
 +                     recvtype, root, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
 -                  MPI_Datatype sendtype, void *recvbuf, int recvcount,
 -                  MPI_Datatype recvtype, int root, MPI_Comm comm)
 +                 MPI_Datatype sendtype, void *recvbuf, int recvcount,
 +                 MPI_Datatype recvtype, int root, MPI_Comm comm)
  {
    int retval = 0;
  
      retval = MPI_ERR_TYPE;
    } else {
      if (recvbuf == MPI_IN_PLACE) {
 -      recvtype=sendtype;
 -      recvcount=sendcounts[smpi_comm_rank(comm)];
 +        recvtype=sendtype;
 +        recvcount=sendcounts[smpi_comm_rank(comm)];
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 -    int i=0;
 -    int size = smpi_comm_size(comm);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SCATTERV;
 -    extra->recv_size = recvcount;
 -    extra->sendcounts= xbt_malloc(size*sizeof(int));
 -    for(i=0; i< size; i++)//copy data to avoid bad free
 -      extra->sendcounts[i] = sendcounts[i];
 -    extra->num_processes = size;
 -    extra->root = root_traced;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 -
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  int i=0;
 +  int size = smpi_comm_size(comm);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_SCATTERV;
 +  extra->recv_size = recvcount;
 +  extra->sendcounts= xbt_malloc(size*sizeof(int));
 +  for(i=0; i< size; i++)//copy data to avoid bad free
 +    extra->sendcounts[i] = sendcounts[i];
 +  extra->num_processes = size;
 +  extra->root = root_traced;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
  
  #endif
      smpi_mpi_scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf,
                        recvcount, recvtype, root, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Reduce(void *sendbuf, void *recvbuf, int count,
 -                MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
 +               MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm)
  {
    int retval = 0;
  
      retval = MPI_ERR_ARG;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_REDUCE;
 -    extra->send_size = count;
 -    extra->datatype1 = encode_datatype(datatype);
 -    extra->root = root_traced;
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int root_traced = smpi_group_index(smpi_comm_group(comm), root);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_REDUCE;
 +  extra->send_size = count;
 +  extra->datatype1 = encode_datatype(datatype);
 +  extra->root = root_traced;
  
 -    TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
 +  TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__,extra);
  #endif
      mpi_coll_reduce_fun(sendbuf, recvbuf, count, datatype, op, root, comm);
  
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Reduce_local(void *inbuf, void *inoutbuf, int count,
 -                      MPI_Datatype datatype, MPI_Op op){
 +    MPI_Datatype datatype, MPI_Op op){
    int retval = 0;
  
 -  smpi_bench_end();
 -  if (datatype == MPI_DATATYPE_NULL || op == MPI_OP_NULL) {
 -    retval = MPI_ERR_ARG;
 -  } else {
 -    smpi_op_apply(op, inbuf, inoutbuf, &count, &datatype);
 -    retval=MPI_SUCCESS;
 -  }
 -  smpi_bench_begin();
 -  return retval;
 +    smpi_bench_end();
 +    if (datatype == MPI_DATATYPE_NULL || op == MPI_OP_NULL) {
 +      retval = MPI_ERR_ARG;
 +    } else {
 +      smpi_op_apply(op, inbuf, inoutbuf, &count, &datatype);
 +      retval=MPI_SUCCESS;
 +    }
 +    smpi_bench_begin();
 +    return retval;
  }
  
  int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count,
 -                   MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 +                  MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
  {
    int retval = 0;
  
        smpi_datatype_copy(recvbuf, count, datatype,sendtmpbuf, count, datatype);
      }
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_ALLREDUCE;
 -    extra->send_size = count;
 -    extra->datatype1 = encode_datatype(datatype);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_ALLREDUCE;
 +  extra->send_size = count;
 +  extra->datatype1 = encode_datatype(datatype);
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      mpi_coll_allreduce_fun(sendtmpbuf, recvbuf, count, datatype, op, comm);
  
  
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Scan(void *sendbuf, void *recvbuf, int count,
 -              MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 +             MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
  {
    int retval = 0;
  
      retval = MPI_ERR_OP;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_SCAN;
 -    extra->send_size = count;
 -    extra->datatype1 = encode_datatype(datatype);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_SCAN;
 +  extra->send_size = count;
 +  extra->datatype1 = encode_datatype(datatype);
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      smpi_mpi_scan(sendbuf, recvbuf, count, datatype, op, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
@@@ -2154,18 -2219,18 +2154,18 @@@ int PMPI_Exscan(void *sendbuf, void *re
      retval = MPI_ERR_OP;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_EXSCAN;
 -    extra->send_size = count;
 -    extra->datatype1 = encode_datatype(datatype);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_EXSCAN;
 +  extra->send_size = count;
 +  extra->datatype1 = encode_datatype(datatype);
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      smpi_mpi_exscan(sendbuf, recvbuf, count, datatype, op, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts,
 -                        MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 +                       MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
  {
    int retval = 0;
    smpi_bench_end();
      retval = MPI_ERR_ARG;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int i=0;
 -    int size = smpi_comm_size(comm);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_REDUCE_SCATTER;
 -    extra->send_size = 0;
 -    extra->recvcounts= xbt_malloc(size*sizeof(int));
 -    for(i=0; i< size; i++)//copy data to avoid bad free
 -      extra->recvcounts[i] = recvcounts[i];
 -    extra->num_processes = size;
 -    extra->datatype1 = encode_datatype(datatype);
 -
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int i=0;
 +  int size = smpi_comm_size(comm);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_REDUCE_SCATTER;
 +  extra->send_size = 0;
 +  extra->recvcounts= xbt_malloc(size*sizeof(int));
 +  for(i=0; i< size; i++)//copy data to avoid bad free
 +    extra->recvcounts[i] = recvcounts[i];
 +  extra->num_processes = size;
 +  extra->datatype1 = encode_datatype(datatype);
 +
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      void* sendtmpbuf=sendbuf;
      if(sendbuf==MPI_IN_PLACE){
      }
  
      mpi_coll_reduce_scatter_fun(sendtmpbuf, recvbuf, recvcounts,
 -                                datatype,  op, comm);
 +                       datatype,  op, comm);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount,
 -                              MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
 +                       MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
  {
    int retval,i;
    smpi_bench_end();
      int count=smpi_comm_size(comm);
  
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_REDUCE_SCATTER;
 -    extra->send_size = 0;
 -    extra->recvcounts= xbt_malloc(count*sizeof(int));
 -    for(i=0; i< count; i++)//copy data to avoid bad free
 -      extra->recvcounts[i] = recvcount;
 -    extra->num_processes = count;
 -    extra->datatype1 = encode_datatype(datatype);
 -
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_REDUCE_SCATTER;
 +  extra->send_size = 0;
 +  extra->recvcounts= xbt_malloc(count*sizeof(int));
 +  for(i=0; i< count; i++)//copy data to avoid bad free
 +    extra->recvcounts[i] = recvcount;
 +  extra->num_processes = count;
 +  extra->datatype1 = encode_datatype(datatype);
 +
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      int* recvcounts=(int*)xbt_malloc(count);
      for (i=0; i<count;i++)recvcounts[i]=recvcount;
      mpi_coll_reduce_scatter_fun(sendbuf, recvbuf, recvcounts,
 -                                datatype,  op, comm);
 +                       datatype,  op, comm);
      xbt_free(recvcounts);
      retval = MPI_SUCCESS;
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Alltoall(void *sendbuf, int sendcount, MPI_Datatype sendtype,
 -                  void *recvbuf, int recvcount, MPI_Datatype recvtype,
 -                  MPI_Comm comm)
 +                 void *recvbuf, int recvcount, MPI_Datatype recvtype,
 +                 MPI_Comm comm)
  {
    int retval = 0;
  
      retval = MPI_ERR_TYPE;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_ALLTOALL;
 -    extra->send_size = sendcount;
 -    extra->recv_size = recvcount;
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_ALLTOALL;
 +  extra->send_size = sendcount;
 +  extra->recv_size = recvcount;
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      retval = mpi_coll_alltoall_fun(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm);
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
  }
  
  int PMPI_Alltoallv(void *sendbuf, int *sendcounts, int *senddisps,
 -                   MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
 -                   int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
 +                  MPI_Datatype sendtype, void *recvbuf, int *recvcounts,
 +                  int *recvdisps, MPI_Datatype recvtype, MPI_Comm comm)
  {
    int retval = 0;
  
      retval = MPI_ERR_ARG;
    } else {
  #ifdef HAVE_TRACING
 -    int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 -    int i=0;
 -    int size = smpi_comm_size(comm);
 -    instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 -    extra->type = TRACING_ALLTOALLV;
 -    extra->send_size = 0;
 -    extra->recv_size = 0;
 -    extra->recvcounts= xbt_malloc(size*sizeof(int));
 -    extra->sendcounts= xbt_malloc(size*sizeof(int));
 -
 -    for(i=0; i< size; i++){//copy data to avoid bad free
 -      extra->send_size += sendcounts[i];
 -      extra->recv_size += recvcounts[i];
 -
 -      extra->sendcounts[i] = sendcounts[i];
 -      extra->recvcounts[i] = recvcounts[i];
 -    }
 -    extra->num_processes = size;
 +  int rank = comm != MPI_COMM_NULL ? smpi_process_index() : -1;
 +  int i=0;
 +  int size = smpi_comm_size(comm);
 +  instr_extra_data extra = xbt_new0(s_instr_extra_data_t,1);
 +  extra->type = TRACING_ALLTOALLV;
 +  extra->send_size = 0;
 +  extra->recv_size = 0;
 +  extra->recvcounts= xbt_malloc(size*sizeof(int));
 +  extra->sendcounts= xbt_malloc(size*sizeof(int));
  
 -    extra->datatype1 = encode_datatype(sendtype);
 -    extra->datatype2 = encode_datatype(recvtype);
 +  for(i=0; i< size; i++){//copy data to avoid bad free
 +    extra->send_size += sendcounts[i];
 +    extra->recv_size += recvcounts[i];
  
 -    TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
 +    extra->sendcounts[i] = sendcounts[i];
 +    extra->recvcounts[i] = recvcounts[i];
 +  }
 +  extra->num_processes = size;
 +
 +  extra->datatype1 = encode_datatype(sendtype);
 +  extra->datatype2 = encode_datatype(recvtype);
 +
 +  TRACE_smpi_collective_in(rank, -1, __FUNCTION__,extra);
  #endif
      retval =
 -      mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, sendtype,
 -                             recvbuf, recvcounts, recvdisps, recvtype,
 -                             comm);
 +        mpi_coll_alltoallv_fun(sendbuf, sendcounts, senddisps, sendtype,
 +                                  recvbuf, recvcounts, recvdisps, recvtype,
 +                                  comm);
  #ifdef HAVE_TRACING
 -    TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
 +  TRACE_smpi_collective_out(rank, -1, __FUNCTION__);
  #endif
    }
  
@@@ -2360,14 -2425,16 +2360,14 @@@ int PMPI_Get_processor_name(char *name
  {
    int retval = MPI_SUCCESS;
  
 -  smpi_bench_end();
    strncpy(name, SIMIX_host_get_name(SIMIX_host_self()),
            strlen(SIMIX_host_get_name(SIMIX_host_self())) < MPI_MAX_PROCESSOR_NAME - 1 ?
            strlen(SIMIX_host_get_name(SIMIX_host_self())) +1 :
            MPI_MAX_PROCESSOR_NAME - 1 );
    *resultlen =
 -    strlen(name) >
 -    MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
 +      strlen(name) >
 +      MPI_MAX_PROCESSOR_NAME ? MPI_MAX_PROCESSOR_NAME : strlen(name);
  
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -2376,6 -2443,7 +2376,6 @@@ int PMPI_Get_count(MPI_Status * status
    int retval = MPI_SUCCESS;
    size_t size;
  
 -  smpi_bench_end();
    if (status == NULL || count == NULL) {
      retval = MPI_ERR_ARG;
    } else if (datatype == MPI_DATATYPE_NULL) {
        *count = smpi_mpi_get_count(status, datatype);
      }
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
    } else {
      retval = smpi_datatype_contiguous(count, old_type, new_type, 0);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_commit(MPI_Datatype* datatype) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (datatype == NULL || *datatype == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else {
      smpi_datatype_commit(datatype);
      retval = MPI_SUCCESS;
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0 || blocklen<0){
    } else {
      retval = smpi_datatype_vector(count, blocklen, stride, old_type, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0 || blocklen<0){
    } else {
      retval = smpi_datatype_hvector(count, blocklen, stride, old_type, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -2452,6 -2529,7 +2452,6 @@@ int PMPI_Type_create_hvector(int count
  int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
    } else {
      retval = smpi_datatype_indexed(count, blocklens, indices, old_type, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
    } else {
      retval = smpi_datatype_indexed(count, blocklens, indices, old_type, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval,i;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
      retval = smpi_datatype_indexed(count, blocklens, indices, old_type, new_type);
      xbt_free(blocklens);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
    } else {
      retval = smpi_datatype_hindexed(count, blocklens, indices, old_type, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -2512,6 -2597,7 +2512,6 @@@ int PMPI_Type_create_hindexed(int count
  int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indices, MPI_Datatype old_type, MPI_Datatype* new_type) {
    int retval,i;
  
 -  smpi_bench_end();
    if (old_type == MPI_DATATYPE_NULL) {
      retval = MPI_ERR_TYPE;
    } else if (count<0){
      retval = smpi_datatype_hindexed(count, blocklens, indices, old_type, new_type);
      xbt_free(blocklens);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
  int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* new_type) {
    int retval = 0;
  
 -  smpi_bench_end();
    if (count<0){
      retval = MPI_ERR_COUNT;
    } else {
      retval = smpi_datatype_struct(count, blocklens, indices, old_types, new_type);
    }
 -  smpi_bench_begin();
    return retval;
  }
  
@@@ -2550,476 -2639,548 +2550,548 @@@ int PMPI_Error_class(int errorcode, int
  
  
  int PMPI_Initialized(int* flag) {
 -  *flag=smpi_process_initialized();
 -  return MPI_SUCCESS;
 +   *flag=smpi_process_initialized();
 +   return MPI_SUCCESS;
  }
  
- /* The following calls are not yet implemented and will fail at runtime. */
- /* Once implemented, please move them above this notice. */
+ /* The topo part of MPI_COMM_WORLD should always be NULL. When other topologies
+  * will be implemented, not only should we check if the topology is NULL, but
+  * we should check if it is the good topology type (so we have to add a
+  *  MPIR_Topo_Type field, and replace the MPI_Topology field by an union)*/
  
- #define NOT_YET_IMPLEMENTED {\
-       XBT_WARN("Not yet implemented : %s. Please contact the Simgrid team if support is needed", __FUNCTION__);\
-       return MPI_SUCCESS;\
-         }
+ int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int reorder, MPI_Comm* comm_cart) {
+   int retval = 0;
+   smpi_bench_end();
+   if (comm_old == MPI_COMM_NULL){
+     return  MPI_ERR_COMM;
+   }
+   else if (ndims < 0 ||
+            (ndims > 0 && (dims == NULL || 
+                           periodic == NULL)) ||
+            comm_cart == NULL) {
+     return MPI_ERR_ARG;
+   }
+   retval = smpi_mpi_cart_create(comm_old, ndims, dims, periodic, reorder, comm_cart);
  
+   smpi_bench_begin();
  
- int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
-   NOT_YET_IMPLEMENTED
+   return retval;
  }
  
- int PMPI_Type_set_name(MPI_Datatype  datatype, char * name)
- {
-   NOT_YET_IMPLEMENTED
+ int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
+   if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if (coords == NULL) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cart_rank(comm, coords, rank);
  }
  
- int PMPI_Type_get_name(MPI_Datatype  datatype, char * name, int* len)
- {
-   NOT_YET_IMPLEMENTED
+ int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
+   if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if (source == NULL || dest == NULL || direction < 0 ) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cart_shift(comm, direction, displ, source, dest);
  }
  
- int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
+   if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if (rank < 0 || rank >= smpi_comm_size(comm)) {
+     return MPI_ERR_RANK;
+   }
+   if (maxdims <= 0) {
+     return MPI_ERR_ARG;
+   }
+   if(coords == NULL) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cart_coords(comm, rank, maxdims, coords);
  }
  
- int PMPI_Cart_coords(MPI_Comm comm, int rank, int maxdims, int* coords) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
+   if(comm == NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if(maxdims <= 0 || dims == NULL || periods == NULL || coords == NULL) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cart_get(comm, maxdims, dims, periods, coords);
  }
  
- int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periods, int reorder, MPI_Comm* comm_cart) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
+   if (comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if (ndims == NULL) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cartdim_get(comm, ndims);
  }
  
- int PMPI_Cart_get(MPI_Comm comm, int maxdims, int* dims, int* periods, int* coords) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
+   if(dims == NULL) {
+     return MPI_ERR_ARG;
+   }
+   if (ndims < 1 || nnodes < 1) {
+     return MPI_ERR_DIMS;
+   }
+   return smpi_mpi_dims_create(nnodes, ndims, dims);
  }
  
- int PMPI_Cart_map(MPI_Comm comm_old, int ndims, int* dims, int* periods, int* newrank) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
+   if(comm == MPI_COMM_NULL || smpi_comm_topo(comm) == NULL) {
+     return MPI_ERR_TOPOLOGY;
+   }
+   if (comm_new == NULL) {
+     return MPI_ERR_ARG;
+   }
+   return smpi_mpi_cart_sub(comm, remain_dims, comm_new);
  }
  
- int PMPI_Cart_rank(MPI_Comm comm, int* coords, int* rank) {
-    NOT_YET_IMPLEMENTED
+ /* The following calls are not yet implemented and will fail at runtime. */
+ /* Once implemented, please move them above this notice. */
+ #define NOT_YET_IMPLEMENTED {                                           \
+     XBT_WARN("Not yet implemented : %s. Please contact the Simgrid team if support is needed", __FUNCTION__); \
+     return MPI_SUCCESS;                                                 \
+   }
+ int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
- int PMPI_Cart_shift(MPI_Comm comm, int direction, int displ, int* source, int* dest) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Type_set_name(MPI_Datatype  datatype, char * name)
+ {
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
- int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Type_get_name(MPI_Datatype  datatype, char * name, int* len)
+ {
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
- int PMPI_Cartdim_get(MPI_Comm comm, int* ndims) {
-    NOT_YET_IMPLEMENTED
+ int PMPI_Pack_size(int incount, MPI_Datatype datatype, MPI_Comm comm, int* size) {
+   NOT_YET_IMPLEMENTED
 -    }
++}
+ int PMPI_Cart_map(MPI_Comm comm_old, int ndims, int* dims, int* periods, int* newrank) {
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graph_create(MPI_Comm comm_old, int nnodes, int* index, int* edges, int reorder, MPI_Comm* comm_graph) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graph_get(MPI_Comm comm, int maxindex, int maxedges, int* index, int* edges) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graph_map(MPI_Comm comm_old, int nnodes, int* index, int* edges, int* newrank) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graph_neighbors(MPI_Comm comm, int rank, int maxneighbors, int* neighbors) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graph_neighbors_count(MPI_Comm comm, int rank, int* nneighbors) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Graphdims_get(MPI_Comm comm, int* nnodes, int* nedges) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Topo_test(MPI_Comm comm, int* top_type) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Errhandler_create(MPI_Handler_function* function, MPI_Errhandler* errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Errhandler_free(MPI_Errhandler* errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Errhandler_get(MPI_Comm comm, MPI_Errhandler* errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Error_string(int errorcode, char* string, int* resultlen) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Errhandler_set(MPI_Comm comm, MPI_Errhandler errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_set_errhandler(MPI_Comm comm, MPI_Errhandler errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_get_errhandler(MPI_Comm comm, MPI_Errhandler* errhandler) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Cancel(MPI_Request* request) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Buffer_attach(void* buffer, int size) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Buffer_detach(void* buffer, int* size) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_test_inter(MPI_Comm comm, int* flag) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag)
  {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_set_attr (MPI_Comm comm, int comm_keyval, void *attribute_val)
  {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_delete_attr (MPI_Comm comm, int comm_keyval)
  {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function* copy_fn, MPI_Comm_delete_attr_function* delete_fn, int* keyval, void* extra_state)
  {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_free_keyval(int* keyval) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Pcontrol(const int level )
  {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Unpack(void* inbuf, int insize, int* position, void* outbuf, int outcount, MPI_Datatype type, MPI_Comm comm) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, int* flag)
  {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val)
  {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_delete_attr (MPI_Datatype type, int comm_keyval)
  {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval, void* extra_state)
  {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_free_keyval(int* keyval) {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm* comm_out) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Intercomm_merge(MPI_Comm comm, int high, MPI_Comm* comm_out) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Bsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Bsend_init(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Ibsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_remote_group(MPI_Comm comm, MPI_Group* group) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_remote_size(MPI_Comm comm, int* size) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Attr_delete(MPI_Comm comm, int keyval) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Rsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Rsend_init(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Irsend(void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Keyval_free(int* keyval) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Test_cancelled(MPI_Status* status, int* flag) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Pack(void* inbuf, int incount, MPI_Datatype type, void* outbuf, int outcount, int* position, MPI_Comm comm) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Pack_external_size(char *datarep, int incount, MPI_Datatype datatype, MPI_Aint *size){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Pack_external(char *datarep, void *inbuf, int incount, MPI_Datatype datatype, void *outbuf, MPI_Aint outcount, MPI_Aint *position){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Unpack_external( char *datarep, void *inbuf, MPI_Aint insize, MPI_Aint *position, void *outbuf, int outcount, MPI_Datatype datatype){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Get_elements(MPI_Status* status, MPI_Datatype datatype, int* elements) {
-    NOT_YET_IMPLEMENTED
- }
- int PMPI_Dims_create(int nnodes, int ndims, int* dims) {
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Win_fence( int assert,  MPI_Win win){
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Win_free( MPI_Win* win){
-    NOT_YET_IMPLEMENTED
+   NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_create( MPI_Info *info){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_set( MPI_Info info, char *key, char *value){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_free( MPI_Info *info){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Get( void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank,
-     MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
+               MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_get_envelope( MPI_Datatype datatype, int *num_integers,
-                           int *num_addresses, int *num_datatypes, int *combiner){
+                             int *num_addresses, int *num_datatypes, int *combiner){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_get_contents(MPI_Datatype datatype, int max_integers, int max_addresses,
-                           int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
-                           MPI_Datatype* array_of_datatypes){
+                            int max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses,
+                            MPI_Datatype* array_of_datatypes){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_create_darray(int size, int rank, int ndims, int* array_of_gsizes,
                              int* array_of_distribs, int* array_of_dargs, int* array_of_psizes,
                              int order, MPI_Datatype oldtype, MPI_Datatype *newtype) {
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Datatype *newtype){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_create_subarray(int ndims,int *array_of_sizes, int *array_of_subsizes, int *array_of_starts, int order, MPI_Datatype oldtype, MPI_Datatype *newtype){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Type_match_size(int typeclass,int size,MPI_Datatype *datatype){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Alltoallw( void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype *sendtypes,
-                    void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype *recvtypes,
-                    MPI_Comm comm){
+                     void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype *recvtypes,
+                     MPI_Comm comm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_set_name(MPI_Comm comm, char* name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_dup_with_info(MPI_Comm comm, MPI_Info info, MPI_Comm * newcomm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_split_type(MPI_Comm comm, int split_type, int key, MPI_Info info, MPI_Comm *newcomm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_set_info (MPI_Comm comm, MPI_Info info){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_get_info (MPI_Comm comm, MPI_Info* info){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_create_errhandler( MPI_Comm_errhandler_fn *function, MPI_Errhandler *errhandler){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Add_error_class( int *errorclass){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Add_error_code(  int errorclass, int *errorcode){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Add_error_string( int errorcode, char *string){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_call_errhandler(MPI_Comm comm,int errorcode){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_delete(MPI_Info info, char *key){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_get_nkeys( MPI_Info info, int *nkeys){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_get_nthkey( MPI_Info info, int n, char *key){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Info_get_valuelen( MPI_Info info, char *key, int *valuelen, int *flag){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Request_get_status( MPI_Request request, int *flag, MPI_Status *status){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Grequest_start( MPI_Grequest_query_function *query_fn, MPI_Grequest_free_function *free_fn, MPI_Grequest_cancel_function *cancel_fn, void *extra_state, MPI_Request *request){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Grequest_complete( MPI_Request request){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Status_set_cancelled(MPI_Status *status,int flag){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Status_set_elements( MPI_Status *status, MPI_Datatype datatype, int count){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_connect( char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Publish_name( char *service_name, MPI_Info info, char *port_name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Unpublish_name( char *service_name, MPI_Info info, char *port_name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Lookup_name( char *service_name, MPI_Info info, char *port_name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_join( int fd, MPI_Comm *intercomm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Open_port( MPI_Info info, char *port_name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Close_port(char *port_name){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_accept( char *port_name, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *newcomm){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_spawn( char *command, char **argv, int maxprocs, MPI_Info info, int root, MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_spawn_multiple( int count, char **array_of_commands, char*** array_of_argv,
-                              int* array_of_maxprocs, MPI_Info* array_of_info, int root,
-                              MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes){
+                               int* array_of_maxprocs, MPI_Info* array_of_info, int root,
+                               MPI_Comm comm, MPI_Comm *intercomm, int* array_of_errcodes){
    NOT_YET_IMPLEMENTED
 -    }
 +}
  
  int PMPI_Comm_get_parent( MPI_Comm *parent){
    NOT_YET_IMPLEMENTED
 -    }
 +}