From f35db13138682b1be530a509d1eeadeafff84ca7 Mon Sep 17 00:00:00 2001 From: degomme Date: Mon, 20 Mar 2017 15:45:39 +0100 Subject: [PATCH] ok, I stop trying to please sonar. That's only to remove ONE "using" directive. --- include/smpi/forward.hpp | 2 + src/smpi/colls/allgather/allgather-GB.cpp | 5 + .../colls/allgather/allgather-mvapich-smp.cpp | 5 + src/smpi/colls/allreduce/allreduce-lr.cpp | 5 +- .../colls/allreduce/allreduce-mvapich-rs.cpp | 6 +- .../allreduce/allreduce-mvapich-two-level.cpp | 7 +- .../allreduce-ompi-ring-segmented.cpp | 4 + .../colls/allreduce/allreduce-rab-rdb.cpp | 5 +- src/smpi/colls/allreduce/allreduce-rab1.cpp | 5 +- src/smpi/colls/allreduce/allreduce-rab2.cpp | 4 + src/smpi/colls/allreduce/allreduce-rdb.cpp | 5 +- .../colls/allreduce/allreduce-redbcast.cpp | 5 +- .../allreduce-smp-binomial-pipeline.cpp | 4 + .../allreduce/allreduce-smp-binomial.cpp | 4 + .../colls/allreduce/allreduce-smp-rdb.cpp | 4 + .../colls/allreduce/allreduce-smp-rsag-lr.cpp | 4 + .../allreduce/allreduce-smp-rsag-rab.cpp | 4 + .../colls/allreduce/allreduce-smp-rsag.cpp | 4 + src/smpi/colls/alltoall/alltoall-2dmesh.cpp | 4 + src/smpi/colls/alltoall/alltoall-3dmesh.cpp | 5 +- .../alltoall-mvapich-scatter-dest.cpp | 5 +- .../alltoall/alltoall-pair-light-barrier.cpp | 4 + .../alltoall/alltoall-pair-mpi-barrier.cpp | 4 + .../alltoall/alltoall-pair-one-barrier.cpp | 4 + src/smpi/colls/alltoall/alltoall-pair.cpp | 5 +- src/smpi/colls/alltoall/alltoall-rdb.cpp | 4 + .../alltoall/alltoall-ring-light-barrier.cpp | 4 + .../alltoall/alltoall-ring-mpi-barrier.cpp | 4 + .../alltoall/alltoall-ring-one-barrier.cpp | 4 + src/smpi/colls/alltoall/alltoall-ring.cpp | 4 + src/smpi/colls/alltoallv/alltoallv-bruck.cpp | 4 + .../alltoallv/alltoallv-ompi-basic-linear.cpp | 4 + .../alltoallv-pair-light-barrier.cpp | 4 + .../alltoallv/alltoallv-pair-mpi-barrier.cpp | 4 + .../alltoallv/alltoallv-pair-one-barrier.cpp | 4 + src/smpi/colls/alltoallv/alltoallv-pair.cpp | 4 + .../alltoallv-ring-light-barrier.cpp | 4 + .../alltoallv/alltoallv-ring-mpi-barrier.cpp | 4 + .../alltoallv/alltoallv-ring-one-barrier.cpp | 4 + src/smpi/colls/alltoallv/alltoallv-ring.cpp | 4 + .../colls/barrier/barrier-mvapich2-pair.cpp | 6 +- src/smpi/colls/barrier/barrier-ompi.cpp | 5 + src/smpi/colls/bcast/bcast-NTSB.cpp | 6 +- src/smpi/colls/bcast/bcast-NTSL-Isend.cpp | 5 + src/smpi/colls/bcast/bcast-NTSL.cpp | 5 + src/smpi/colls/bcast/bcast-SMP-binary.cpp | 6 +- src/smpi/colls/bcast/bcast-SMP-binomial.cpp | 6 +- src/smpi/colls/bcast/bcast-SMP-linear.cpp | 6 +- .../bcast-arrival-pattern-aware-wait.cpp | 6 +- .../bcast/bcast-arrival-pattern-aware.cpp | 5 + .../colls/bcast/bcast-arrival-scatter.cpp | 6 +- src/smpi/colls/bcast/bcast-binomial-tree.cpp | 6 +- .../colls/bcast/bcast-flattree-pipeline.cpp | 6 +- src/smpi/colls/bcast/bcast-flattree.cpp | 6 +- src/smpi/colls/bcast/bcast-mvapich-smp.cpp | 5 + src/smpi/colls/bcast/bcast-ompi-pipeline.cpp | 6 +- .../colls/bcast/bcast-ompi-split-bintree.cpp | 5 + .../bcast/bcast-scatter-LR-allgather.cpp | 5 + .../bcast/bcast-scatter-rdb-allgather.cpp | 6 + src/smpi/colls/reduce/reduce-NTSL.cpp | 4 + .../reduce/reduce-arrival-pattern-aware.cpp | 5 +- src/smpi/colls/reduce/reduce-binomial.cpp | 5 +- src/smpi/colls/reduce/reduce-flat-tree.cpp | 5 +- .../colls/reduce/reduce-mvapich-knomial.cpp | 4 + .../colls/reduce/reduce-mvapich-two-level.cpp | 5 +- src/smpi/colls/reduce/reduce-ompi.cpp | 6 +- src/smpi/colls/reduce/reduce-rab.cpp | 5 +- .../colls/reduce/reduce-scatter-gather.cpp | 5 +- .../reduce_scatter/reduce_scatter-mpich.cpp | 6 +- .../reduce_scatter/reduce_scatter-ompi.cpp | 4 + .../scatter/scatter-mvapich-two-level.cpp | 6 + src/smpi/colls/scatter/scatter-ompi.cpp | 6 + src/smpi/colls/smpi_intel_mpi_selector.cpp | 4 +- src/smpi/colls/smpi_mpich_selector.cpp | 9 +- src/smpi/colls/smpi_mvapich2_selector.cpp | 3 +- .../colls/smpi_mvapich2_selector_stampede.h | 126 +++--- src/smpi/private.h | 7 +- src/smpi/smpi_comm.cpp | 2 +- src/smpi/smpi_datatype.cpp | 4 +- src/smpi/smpi_deployment.cpp | 4 +- src/smpi/smpi_f77.cpp | 428 +++++++++--------- src/smpi/smpi_global.cpp | 38 +- src/smpi/smpi_group.cpp | 3 +- src/smpi/smpi_keyvals.hpp | 8 +- src/smpi/smpi_pmpi.cpp | 188 ++++---- src/smpi/smpi_process.cpp | 2 +- src/smpi/smpi_status.cpp | 6 + src/smpi/smpi_topo.cpp | 2 +- src/smpi/smpi_topo.hpp | 6 +- 89 files changed, 736 insertions(+), 450 deletions(-) diff --git a/include/smpi/forward.hpp b/include/smpi/forward.hpp index 436c9b302f..6713e4b96a 100644 --- a/include/smpi/forward.hpp +++ b/include/smpi/forward.hpp @@ -39,6 +39,7 @@ typedef simgrid::smpi::Datatype SMPI_Datatype; typedef simgrid::smpi::Group SMPI_Group; typedef simgrid::smpi::Info SMPI_Info; typedef simgrid::smpi::Op SMPI_Op; +typedef simgrid::smpi::Process SMPI_Process; typedef simgrid::smpi::Request SMPI_Request; typedef simgrid::smpi::Topo SMPI_Topology; typedef simgrid::smpi::Topo_Cart SMPI_Cart_topology; @@ -53,6 +54,7 @@ typedef struct SMPI_Datatype SMPI_Datatype; typedef struct SMPI_Group SMPI_Group; typedef struct SMPI_Info SMPI_Info; typedef struct SMPI_Op SMPI_Op; +typedef struct SMPI_Process SMPI_Process; typedef struct SMPI_Request SMPI_Request; typedef struct SMPI_Topology SMPI_Topology; typedef struct SMPI_Win SMPI_Win; diff --git a/src/smpi/colls/allgather/allgather-GB.cpp b/src/smpi/colls/allgather/allgather-GB.cpp index 88bc100e85..ada4ae360d 100644 --- a/src/smpi/colls/allgather/allgather-GB.cpp +++ b/src/smpi/colls/allgather/allgather-GB.cpp @@ -6,6 +6,8 @@ #include "../colls_private.h" +namespace simgrid{ +namespace smpi{ // Allgather - gather/bcast algorithm int Coll_allgather_GB::allgather(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, @@ -20,3 +22,6 @@ int Coll_allgather_GB::allgather(void *send_buff, int send_count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/allgather/allgather-mvapich-smp.cpp b/src/smpi/colls/allgather/allgather-mvapich-smp.cpp index 1a0fdcb623..25cae7f339 100644 --- a/src/smpi/colls/allgather/allgather-mvapich-smp.cpp +++ b/src/smpi/colls/allgather/allgather-mvapich-smp.cpp @@ -35,6 +35,8 @@ * See COPYRIGHT in top-level directory. */ #include "../colls_private.h" +namespace simgrid{ +namespace smpi{ int Coll_allgather_mvapich2_smp::allgather(void *sendbuf,int sendcnt, MPI_Datatype sendtype, void *recvbuf, int recvcnt,MPI_Datatype recvtype, @@ -152,3 +154,6 @@ int Coll_allgather_mvapich2_smp::allgather(void *sendbuf,int sendcnt, MPI_Dataty mpi_errno = Colls::bcast (recvbuf, recvcnt * size, recvtype, 0, shmem_comm); return mpi_errno; } + +} +} diff --git a/src/smpi/colls/allreduce/allreduce-lr.cpp b/src/smpi/colls/allreduce/allreduce-lr.cpp index 5ff1b88ff5..5eb4c2d96b 100644 --- a/src/smpi/colls/allreduce/allreduce-lr.cpp +++ b/src/smpi/colls/allreduce/allreduce-lr.cpp @@ -18,7 +18,8 @@ */ //#include - +namespace simgrid{ +namespace smpi{ int Coll_allreduce_lr::allreduce(void *sbuf, void *rbuf, int rcount, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -104,3 +105,5 @@ Coll_allreduce_lr::allreduce(void *sbuf, void *rbuf, int rcount, return 0; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-mvapich-rs.cpp b/src/smpi/colls/allreduce/allreduce-mvapich-rs.cpp index c94bd90758..5187221089 100644 --- a/src/smpi/colls/allreduce/allreduce-mvapich-rs.cpp +++ b/src/smpi/colls/allreduce/allreduce-mvapich-rs.cpp @@ -22,7 +22,8 @@ */ #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf, void *recvbuf, int count, @@ -288,3 +289,6 @@ int Coll_allreduce_mvapich2_rs::allreduce(void *sendbuf, return (mpi_errno); } + +} +} diff --git a/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp b/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp index 123de2ce44..9b8c0ad2f6 100644 --- a/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp +++ b/src/smpi/colls/allreduce/allreduce-mvapich-two-level.cpp @@ -53,6 +53,9 @@ extern int (*MV2_Allreduce_intra_function)( void *sendbuf, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); + +namespace simgrid{ +namespace smpi{ static int MPIR_Allreduce_reduce_p2p_MV2( void *sendbuf, void *recvbuf, int count, @@ -72,7 +75,7 @@ static int MPIR_Allreduce_reduce_shmem_MV2( void *sendbuf, Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm); return MPI_SUCCESS; } - + /* general two level allreduce helper function */ int Coll_allreduce_mvapich2_two_level::allreduce(void *sendbuf, @@ -168,3 +171,5 @@ int Coll_allreduce_mvapich2_two_level::allreduce(void *sendbuf, return (mpi_errno); } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp index 8a88359791..bb3251b1fb 100644 --- a/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp +++ b/src/smpi/colls/allreduce/allreduce-ompi-ring-segmented.cpp @@ -154,6 +154,8 @@ } \ #include "../colls_private.h" +namespace simgrid{ +namespace smpi{ int Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, @@ -387,3 +389,5 @@ Coll_allreduce_ompi_ring_segmented::allreduce(void *sbuf, void *rbuf, int count, if (NULL != inbuf[1]) smpi_free_tmp_buffer(inbuf[1]); return ret; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-rab-rdb.cpp b/src/smpi/colls/allreduce/allreduce-rab-rdb.cpp index def46333ee..0ad64f2227 100644 --- a/src/smpi/colls/allreduce/allreduce-rab-rdb.cpp +++ b/src/smpi/colls/allreduce/allreduce-rab-rdb.cpp @@ -5,7 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_allreduce_rab_rdb::allreduce(void *sbuff, void *rbuff, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -194,3 +195,5 @@ int Coll_allreduce_rab_rdb::allreduce(void *sbuff, void *rbuff, int count, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-rab1.cpp b/src/smpi/colls/allreduce/allreduce-rab1.cpp index 3e255ba57a..3564a46319 100644 --- a/src/smpi/colls/allreduce/allreduce-rab1.cpp +++ b/src/smpi/colls/allreduce/allreduce-rab1.cpp @@ -6,7 +6,8 @@ #include "../colls_private.h" //#include - +namespace simgrid{ +namespace smpi{ // NP pow of 2 for now int Coll_allreduce_rab1::allreduce(void *sbuff, void *rbuff, int count, MPI_Datatype dtype, @@ -108,3 +109,5 @@ int Coll_allreduce_rab1::allreduce(void *sbuff, void *rbuff, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-rab2.cpp b/src/smpi/colls/allreduce/allreduce-rab2.cpp index 62f8d9794e..81ab6b49dc 100644 --- a/src/smpi/colls/allreduce/allreduce-rab2.cpp +++ b/src/smpi/colls/allreduce/allreduce-rab2.cpp @@ -7,6 +7,8 @@ #include "../colls_private.h" //#include +namespace simgrid{ +namespace smpi{ // this requires that count >= NP int Coll_allreduce_rab2::allreduce(void *sbuff, void *rbuff, int count, MPI_Datatype dtype, @@ -82,3 +84,5 @@ int Coll_allreduce_rab2::allreduce(void *sbuff, void *rbuff, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-rdb.cpp b/src/smpi/colls/allreduce/allreduce-rdb.cpp index 82fc3fe9c8..bf9351d520 100644 --- a/src/smpi/colls/allreduce/allreduce-rdb.cpp +++ b/src/smpi/colls/allreduce/allreduce-rdb.cpp @@ -6,7 +6,8 @@ #include "../colls_private.h" //#include - +namespace simgrid{ +namespace smpi{ int Coll_allreduce_rdb::allreduce(void *sbuff, void *rbuff, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) { @@ -128,3 +129,5 @@ int Coll_allreduce_rdb::allreduce(void *sbuff, void *rbuff, int count, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-redbcast.cpp b/src/smpi/colls/allreduce/allreduce-redbcast.cpp index 35e40f17d9..2cb49ee718 100644 --- a/src/smpi/colls/allreduce/allreduce-redbcast.cpp +++ b/src/smpi/colls/allreduce/allreduce-redbcast.cpp @@ -5,7 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_allreduce_redbcast::allreduce(void *buf, void *buf2, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) @@ -14,3 +15,5 @@ int Coll_allreduce_redbcast::allreduce(void *buf, void *buf2, int count, Colls::bcast(buf2, count, datatype, 0, comm); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp b/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp index 9ac2c82281..c558d84e06 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-binomial-pipeline.cpp @@ -38,6 +38,8 @@ This fucntion performs all-reduce operation as follow. ** in a pipeline fashion 3) binomial_tree bcast intra-communication between root of each SMP node 4) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_binomial_pipeline::allreduce(void *send_buf, void *recv_buf, int count, MPI_Datatype dtype, @@ -200,3 +202,5 @@ int Coll_allreduce_smp_binomial_pipeline::allreduce(void *send_buf, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp b/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp index 7b93bf60ab..42af04d0a6 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-binomial.cpp @@ -27,6 +27,8 @@ This fucntion performs all-reduce operation as follow. 3) binomial_tree bcast intra-communication between root of each SMP node 4) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_binomial::allreduce(void *send_buf, void *recv_buf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -153,3 +155,5 @@ int Coll_allreduce_smp_binomial::allreduce(void *send_buf, void *recv_buf, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp index 285d354b26..dd71407c39 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rdb.cpp @@ -26,6 +26,8 @@ This fucntion performs all-reduce operation as follow. 2) Recursive doubling intra-communication between root of each SMP node 3) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_rdb::allreduce(void *send_buf, void *recv_buf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -185,3 +187,5 @@ int Coll_allreduce_smp_rdb::allreduce(void *send_buf, void *recv_buf, int count, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp index 7e62a835ee..9d6ec10c7a 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag-lr.cpp @@ -14,6 +14,8 @@ This fucntion performs all-reduce operation as follow. 3) allgather - inter between root of each SMP node 4) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_rsag_lr::allreduce(void *send_buf, void *recv_buf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -255,3 +257,5 @@ int Coll_allreduce_smp_rsag_lr::allreduce(void *send_buf, void *recv_buf, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp index 53cde44a07..51071e63fc 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag-rab.cpp @@ -18,6 +18,8 @@ This fucntion performs all-reduce operation as follow. 3) allgather - inter between root of each SMP node 4) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_rsag_rab::allreduce(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -209,3 +211,5 @@ int Coll_allreduce_smp_rsag_rab::allreduce(void *sbuf, void *rbuf, int count, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp b/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp index f5f5b75bf1..d35a140e44 100644 --- a/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp +++ b/src/smpi/colls/allreduce/allreduce-smp-rsag.cpp @@ -13,6 +13,8 @@ This fucntion performs all-reduce operation as follow. 3) allgather - inter between root of each SMP node 4) binomial_tree bcast inside each SMP node */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_smp_rsag::allreduce(void *send_buf, void *recv_buf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) @@ -223,3 +225,5 @@ int Coll_allreduce_smp_rsag::allreduce(void *send_buf, void *recv_buf, smpi_free_tmp_buffer(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-2dmesh.cpp b/src/smpi/colls/alltoall/alltoall-2dmesh.cpp index bfc1bc455b..5194886cef 100644 --- a/src/smpi/colls/alltoall/alltoall-2dmesh.cpp +++ b/src/smpi/colls/alltoall/alltoall-2dmesh.cpp @@ -52,6 +52,8 @@ static int alltoall_check_is_2dmesh(int num, int *i, int *j) } return 0; } +namespace simgrid{ +namespace smpi{ int Coll_alltoall_2dmesh::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -172,3 +174,5 @@ int Coll_alltoall_2dmesh::alltoall(void *send_buff, int send_count, smpi_free_tmp_buffer(tmp_buff2); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-3dmesh.cpp b/src/smpi/colls/alltoall/alltoall-3dmesh.cpp index 85415517f4..705be7e9cd 100644 --- a/src/smpi/colls/alltoall/alltoall-3dmesh.cpp +++ b/src/smpi/colls/alltoall/alltoall-3dmesh.cpp @@ -45,7 +45,8 @@ static int alltoall_check_is_3dmesh(int num, int *i, int *j, int *k) } return 0; } - +namespace simgrid{ +namespace smpi{ int Coll_alltoall_3dmesh::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, int recv_count, @@ -184,3 +185,5 @@ int Coll_alltoall_3dmesh::alltoall(void *send_buff, int send_count, smpi_free_tmp_buffer(tmp_buff2); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp index e812ea33d1..77baf42248 100644 --- a/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp +++ b/src/smpi/colls/alltoall/alltoall-mvapich-scatter-dest.cpp @@ -42,7 +42,8 @@ #define MV2_ALLTOALL_THROTTLE_FACTOR 4 #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_alltoall_mvapich2_scatter_dest::alltoall( void *sendbuf, int sendcount, @@ -135,3 +136,5 @@ int Coll_alltoall_mvapich2_scatter_dest::alltoall( return (mpi_errno); } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-pair-light-barrier.cpp b/src/smpi/colls/alltoall/alltoall-pair-light-barrier.cpp index 2299e719d6..7da9727353 100644 --- a/src/smpi/colls/alltoall/alltoall-pair-light-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-pair-light-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_pair_light_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -74,3 +76,5 @@ Coll_alltoall_pair_light_barrier::alltoall(void *send_buff, int send_count, } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-pair-mpi-barrier.cpp b/src/smpi/colls/alltoall/alltoall-pair-mpi-barrier.cpp index 9c04f878f3..3d95ed3d76 100644 --- a/src/smpi/colls/alltoall/alltoall-pair-mpi-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-pair-mpi-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_pair_mpi_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -61,3 +63,5 @@ Coll_alltoall_pair_mpi_barrier::alltoall(void *send_buff, int send_count, } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-pair-one-barrier.cpp b/src/smpi/colls/alltoall/alltoall-pair-one-barrier.cpp index c18406e5ef..dc36d746c7 100644 --- a/src/smpi/colls/alltoall/alltoall-pair-one-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-pair-one-barrier.cpp @@ -26,6 +26,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_pair_one_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -63,3 +65,5 @@ Coll_alltoall_pair_one_barrier::alltoall(void *send_buff, int send_count, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-pair.cpp b/src/smpi/colls/alltoall/alltoall-pair.cpp index 1f1f5ef9ba..68d53e9480 100644 --- a/src/smpi/colls/alltoall/alltoall-pair.cpp +++ b/src/smpi/colls/alltoall/alltoall-pair.cpp @@ -27,7 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ - +namespace simgrid{ +namespace smpi{ int Coll_alltoall_pair_rma::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, int recv_count, MPI_Datatype recv_type, MPI_Comm comm) @@ -96,3 +97,5 @@ int Coll_alltoall_pair::alltoall(void *send_buff, int send_count, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-rdb.cpp b/src/smpi/colls/alltoall/alltoall-rdb.cpp index 2e9aa0e5d3..77a74f3da8 100644 --- a/src/smpi/colls/alltoall/alltoall-rdb.cpp +++ b/src/smpi/colls/alltoall/alltoall-rdb.cpp @@ -27,6 +27,8 @@ * Auther: MPICH / slightly modified by Ahmad Faraj. ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_rdb::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, int recv_count, @@ -152,3 +154,5 @@ int Coll_alltoall_rdb::alltoall(void *send_buff, int send_count, smpi_free_tmp_buffer(tmp_buff); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-ring-light-barrier.cpp b/src/smpi/colls/alltoall/alltoall-ring-light-barrier.cpp index 5292eeff23..f17170174d 100644 --- a/src/smpi/colls/alltoall/alltoall-ring-light-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-ring-light-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_ring_light_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -74,3 +76,5 @@ Coll_alltoall_ring_light_barrier::alltoall(void *send_buff, int send_count, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-ring-mpi-barrier.cpp b/src/smpi/colls/alltoall/alltoall-ring-mpi-barrier.cpp index 9145378770..ac7dcfa151 100644 --- a/src/smpi/colls/alltoall/alltoall-ring-mpi-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-ring-mpi-barrier.cpp @@ -26,6 +26,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_ring_mpi_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -60,3 +62,5 @@ Coll_alltoall_ring_mpi_barrier::alltoall(void *send_buff, int send_count, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-ring-one-barrier.cpp b/src/smpi/colls/alltoall/alltoall-ring-one-barrier.cpp index 7f168b9dab..8dca244ad2 100644 --- a/src/smpi/colls/alltoall/alltoall-ring-one-barrier.cpp +++ b/src/smpi/colls/alltoall/alltoall-ring-one-barrier.cpp @@ -25,6 +25,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_ring_one_barrier::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, @@ -58,3 +60,5 @@ Coll_alltoall_ring_one_barrier::alltoall(void *send_buff, int send_count, } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoall/alltoall-ring.cpp b/src/smpi/colls/alltoall/alltoall-ring.cpp index 9029408630..b4dda86adc 100644 --- a/src/smpi/colls/alltoall/alltoall-ring.cpp +++ b/src/smpi/colls/alltoall/alltoall-ring.cpp @@ -25,6 +25,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoall_ring::alltoall(void *send_buff, int send_count, MPI_Datatype send_type, void *recv_buff, @@ -57,3 +59,5 @@ Coll_alltoall_ring::alltoall(void *send_buff, int send_count, } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-bruck.cpp b/src/smpi/colls/alltoallv/alltoallv-bruck.cpp index 56d750d7e4..5e85a1bc23 100644 --- a/src/smpi/colls/alltoallv/alltoallv-bruck.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-bruck.cpp @@ -13,6 +13,8 @@ * FIXME: uh, check smpi_pmpi again, but this routine is called for > 12, not * less... **/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_bruck::alltoallv(void *sendbuf, int *sendcounts, int *senddisps, MPI_Datatype sendtype, void *recvbuf, int *recvcounts, int *recvdisps, MPI_Datatype recvtype, @@ -90,3 +92,5 @@ int Coll_alltoallv_bruck::alltoallv(void *sendbuf, int *sendcounts, int *senddis } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp b/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp index 0ef5fde063..a383a21888 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ompi-basic-linear.cpp @@ -14,6 +14,8 @@ * differently and so will not have to duplicate code. * GEF Oct05 after asking Jeff. */ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_ompi_basic_linear::alltoallv(void *sbuf, int *scounts, int *sdisps, MPI_Datatype sdtype, @@ -103,4 +105,6 @@ Coll_alltoallv_ompi_basic_linear::alltoallv(void *sbuf, int *scounts, int *sdisp return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-pair-light-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-pair-light-barrier.cpp index 4159fdea7c..1fdbc8934f 100644 --- a/src/smpi/colls/alltoallv/alltoallv-pair-light-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-pair-light-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_pair_light_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -71,3 +73,5 @@ Coll_alltoallv_pair_light_barrier::alltoallv(void *send_buff, int *send_counts, } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-pair-mpi-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-pair-mpi-barrier.cpp index 76b88726db..78d5276992 100644 --- a/src/smpi/colls/alltoallv/alltoallv-pair-mpi-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-pair-mpi-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_pair_mpi_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -58,3 +60,5 @@ Coll_alltoallv_pair_mpi_barrier::alltoallv(void *send_buff, int *send_counts, in } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-pair-one-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-pair-one-barrier.cpp index 77187552ec..ad0db49664 100644 --- a/src/smpi/colls/alltoallv/alltoallv-pair-one-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-pair-one-barrier.cpp @@ -26,6 +26,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_pair_one_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -59,3 +61,5 @@ Coll_alltoallv_pair_one_barrier::alltoallv(void *send_buff, int *send_counts, in return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-pair.cpp b/src/smpi/colls/alltoallv/alltoallv-pair.cpp index b2ebef67f0..48602ec784 100644 --- a/src/smpi/colls/alltoallv/alltoallv-pair.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-pair.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_pair::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, void *recv_buff, int *recv_counts, int *recv_disps, @@ -57,3 +59,5 @@ int Coll_alltoallv_pair::alltoallv(void *send_buff, int *send_counts, int *send_ } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-ring-light-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-ring-light-barrier.cpp index 076771ae17..5c587b5042 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ring-light-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ring-light-barrier.cpp @@ -27,6 +27,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_ring_light_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -71,3 +73,5 @@ Coll_alltoallv_ring_light_barrier::alltoallv(void *send_buff, int *send_counts, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-ring-mpi-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-ring-mpi-barrier.cpp index c4d616927f..c61969d10a 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ring-mpi-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ring-mpi-barrier.cpp @@ -26,6 +26,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_ring_mpi_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -57,3 +59,5 @@ Coll_alltoallv_ring_mpi_barrier::alltoallv(void *send_buff, int *send_counts, in return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-ring-one-barrier.cpp b/src/smpi/colls/alltoallv/alltoallv-ring-one-barrier.cpp index a73f029cf9..52f5474dd9 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ring-one-barrier.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ring-one-barrier.cpp @@ -25,6 +25,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_ring_one_barrier::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -55,3 +57,5 @@ Coll_alltoallv_ring_one_barrier::alltoallv(void *send_buff, int *send_counts, in } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/alltoallv/alltoallv-ring.cpp b/src/smpi/colls/alltoallv/alltoallv-ring.cpp index 09efe30da5..48a8101fc2 100644 --- a/src/smpi/colls/alltoallv/alltoallv-ring.cpp +++ b/src/smpi/colls/alltoallv/alltoallv-ring.cpp @@ -25,6 +25,8 @@ * Auther: Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_alltoallv_ring::alltoallv(void *send_buff, int *send_counts, int *send_disps, MPI_Datatype send_type, @@ -62,3 +64,5 @@ Coll_alltoallv_ring::alltoallv(void *send_buff, int *send_counts, int *send_disp } return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/barrier/barrier-mvapich2-pair.cpp b/src/smpi/colls/barrier/barrier-mvapich2-pair.cpp index 51bc6b6f04..1dba99ada7 100644 --- a/src/smpi/colls/barrier/barrier-mvapich2-pair.cpp +++ b/src/smpi/colls/barrier/barrier-mvapich2-pair.cpp @@ -41,7 +41,8 @@ #include "../colls_private.h" #include "../coll_tuned_topo.h" - +namespace simgrid{ +namespace smpi{ int Coll_barrier_mvapich2_pair::barrier(MPI_Comm comm) { @@ -96,3 +97,6 @@ int Coll_barrier_mvapich2_pair::barrier(MPI_Comm comm) return mpi_errno; } + +} +} diff --git a/src/smpi/colls/barrier/barrier-ompi.cpp b/src/smpi/colls/barrier/barrier-ompi.cpp index f57b2307ed..606fc67989 100644 --- a/src/smpi/colls/barrier/barrier-ompi.cpp +++ b/src/smpi/colls/barrier/barrier-ompi.cpp @@ -43,6 +43,8 @@ * synchronous gurantee made by last ring of sends are synchronous * */ +namespace simgrid{ +namespace smpi{ int Coll_barrier_ompi_doublering::barrier(MPI_Comm comm ) { @@ -344,3 +346,6 @@ int Coll_barrier_ompi_tree::barrier(MPI_Comm comm) return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-NTSB.cpp b/src/smpi/colls/bcast/bcast-NTSB.cpp index e7746da2cd..6e46a261db 100644 --- a/src/smpi/colls/bcast/bcast-NTSB.cpp +++ b/src/smpi/colls/bcast/bcast-NTSB.cpp @@ -7,7 +7,8 @@ #include "../colls_private.h" int bcast_NTSB_segment_size_in_byte = 8192; - +namespace simgrid{ +namespace smpi{ int Coll_bcast_NTSB::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { @@ -183,3 +184,6 @@ int Coll_bcast_NTSB::bcast(void *buf, int count, MPI_Datatype datatype, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-NTSL-Isend.cpp b/src/smpi/colls/bcast/bcast-NTSL-Isend.cpp index ee37369f75..5f30173c2d 100644 --- a/src/smpi/colls/bcast/bcast-NTSL-Isend.cpp +++ b/src/smpi/colls/bcast/bcast-NTSL-Isend.cpp @@ -11,6 +11,8 @@ static int bcast_NTSL_segment_size_in_byte = 8192; /* Non-topology-specific pipelined linear-bcast function 0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion */ +namespace simgrid{ +namespace smpi{ int Coll_bcast_NTSL_Isend::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { @@ -130,3 +132,6 @@ int Coll_bcast_NTSL_Isend::bcast(void *buf, int count, MPI_Datatype datatype, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-NTSL.cpp b/src/smpi/colls/bcast/bcast-NTSL.cpp index 587e8da7d7..c318f5d025 100644 --- a/src/smpi/colls/bcast/bcast-NTSL.cpp +++ b/src/smpi/colls/bcast/bcast-NTSL.cpp @@ -11,6 +11,8 @@ static int bcast_NTSL_segment_size_in_byte = 8192; /* Non-topology-specific pipelined linear-bcast function 0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion */ +namespace simgrid{ +namespace smpi{ int Coll_bcast_NTSL::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) { @@ -130,3 +132,6 @@ int Coll_bcast_NTSL::bcast(void *buf, int count, MPI_Datatype datatype, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-SMP-binary.cpp b/src/smpi/colls/bcast/bcast-SMP-binary.cpp index 02971613fd..61807870b1 100644 --- a/src/smpi/colls/bcast/bcast-SMP-binary.cpp +++ b/src/smpi/colls/bcast/bcast-SMP-binary.cpp @@ -8,7 +8,8 @@ int bcast_SMP_binary_segment_byte = 8192; - +namespace simgrid{ +namespace smpi{ int Coll_bcast_SMP_binary::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) @@ -228,3 +229,6 @@ int Coll_bcast_SMP_binary::bcast(void *buf, int count, return 1; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-SMP-binomial.cpp b/src/smpi/colls/bcast/bcast-SMP-binomial.cpp index 6dfed6a42f..5eaca0db39 100644 --- a/src/smpi/colls/bcast/bcast-SMP-binomial.cpp +++ b/src/smpi/colls/bcast/bcast-SMP-binomial.cpp @@ -5,7 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_bcast_SMP_binomial::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) @@ -108,3 +109,6 @@ int Coll_bcast_SMP_binomial::bcast(void *buf, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-SMP-linear.cpp b/src/smpi/colls/bcast/bcast-SMP-linear.cpp index 2b3aee4023..5a5193d172 100644 --- a/src/smpi/colls/bcast/bcast-SMP-linear.cpp +++ b/src/smpi/colls/bcast/bcast-SMP-linear.cpp @@ -7,7 +7,8 @@ #include "../colls_private.h" int bcast_SMP_linear_segment_byte = 8192; - +namespace simgrid{ +namespace smpi{ int Coll_bcast_SMP_linear::bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm comm) @@ -176,3 +177,6 @@ int Coll_bcast_SMP_linear::bcast(void *buf, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp index 5baa7cf3e9..ba84236b2f 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware-wait.cpp @@ -15,7 +15,8 @@ int bcast_arrival_pattern_aware_wait_segment_size_in_byte = 8192; #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128 #endif - +namespace simgrid{ +namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, MPI_Datatype datatype, @@ -252,3 +253,6 @@ int Coll_bcast_arrival_pattern_aware_wait::bcast(void *buf, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp b/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp index 9138d9290d..564644ba45 100644 --- a/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-pattern-aware.cpp @@ -11,6 +11,8 @@ static int bcast_NTSL_segment_size_in_byte = 8192; #define HEADER_SIZE 1024 #define MAX_NODE 1024 +namespace simgrid{ +namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count, MPI_Datatype datatype, int root, @@ -362,3 +364,6 @@ int Coll_bcast_arrival_pattern_aware::bcast(void *buf, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-arrival-scatter.cpp b/src/smpi/colls/bcast/bcast-arrival-scatter.cpp index 578b0f988a..76905f41d1 100644 --- a/src/smpi/colls/bcast/bcast-arrival-scatter.cpp +++ b/src/smpi/colls/bcast/bcast-arrival-scatter.cpp @@ -13,7 +13,8 @@ #ifndef BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE #define BCAST_ARRIVAL_PATTERN_AWARE_MAX_NODE 128 #endif - +namespace simgrid{ +namespace smpi{ /* Non-topology-specific pipelined linear-bcast function */ int Coll_bcast_arrival_scatter::bcast(void *buf, int count, MPI_Datatype datatype, int root, @@ -231,3 +232,6 @@ int Coll_bcast_arrival_scatter::bcast(void *buf, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-binomial-tree.cpp b/src/smpi/colls/bcast/bcast-binomial-tree.cpp index d73682e7fd..232c3dfcda 100644 --- a/src/smpi/colls/bcast/bcast-binomial-tree.cpp +++ b/src/smpi/colls/bcast/bcast-binomial-tree.cpp @@ -67,7 +67,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Auther: MPIH / modified by Ahmad Faraj ****************************************************************************/ - +namespace simgrid{ +namespace smpi{ int Coll_bcast_binomial_tree::bcast(void *buff, int count, MPI_Datatype data_type, int root, @@ -106,3 +107,6 @@ Coll_bcast_binomial_tree::bcast(void *buff, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-flattree-pipeline.cpp b/src/smpi/colls/bcast/bcast-flattree-pipeline.cpp index 3f283e91bd..845070831f 100644 --- a/src/smpi/colls/bcast/bcast-flattree-pipeline.cpp +++ b/src/smpi/colls/bcast/bcast-flattree-pipeline.cpp @@ -7,7 +7,8 @@ #include "../colls_private.h" int flattree_segment_in_byte = 8192; - +namespace simgrid{ +namespace smpi{ int Coll_bcast_flattree_pipeline::bcast(void *buff, int count, MPI_Datatype data_type, int root, @@ -61,3 +62,6 @@ Coll_bcast_flattree_pipeline::bcast(void *buff, int count, free(status_array); return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-flattree.cpp b/src/smpi/colls/bcast/bcast-flattree.cpp index b77a1864fa..cdcc85f3de 100644 --- a/src/smpi/colls/bcast/bcast-flattree.cpp +++ b/src/smpi/colls/bcast/bcast-flattree.cpp @@ -5,7 +5,8 @@ * under the terms of the license (GNU LGPL) which comes with this package. */ #include "../colls_private.h" - +namespace simgrid{ +namespace smpi{ int Coll_bcast_flattree::bcast(void *buff, int count, MPI_Datatype data_type, int root, MPI_Comm comm) @@ -41,3 +42,6 @@ Coll_bcast_flattree::bcast(void *buff, int count, MPI_Datatype data_type, } return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-mvapich-smp.cpp b/src/smpi/colls/bcast/bcast-mvapich-smp.cpp index 73e5f60d61..a033eea78b 100644 --- a/src/smpi/colls/bcast/bcast-mvapich-smp.cpp +++ b/src/smpi/colls/bcast/bcast-mvapich-smp.cpp @@ -73,6 +73,8 @@ extern int mv2_intra_node_knomial_factor; #define mv2_bcast_large_msg 512*1024 #define mv2_knomial_intra_node_threshold 131072 #define mv2_scatter_rd_inter_leader_bcast 1 +namespace simgrid{ +namespace smpi{ int Coll_bcast_mvapich2_inter_node::bcast(void *buffer, int count, MPI_Datatype datatype, @@ -388,3 +390,6 @@ int Coll_bcast_mvapich2_intra_node::bcast(void *buffer, return mpi_errno; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-ompi-pipeline.cpp b/src/smpi/colls/bcast/bcast-ompi-pipeline.cpp index fe0ef548b6..66ba5b927c 100644 --- a/src/smpi/colls/bcast/bcast-ompi-pipeline.cpp +++ b/src/smpi/colls/bcast/bcast-ompi-pipeline.cpp @@ -9,7 +9,8 @@ #define MAXTREEFANOUT 32 - +namespace simgrid{ +namespace smpi{ int Coll_bcast_ompi_pipeline::bcast( void* buffer, int original_count, MPI_Datatype datatype, @@ -209,3 +210,6 @@ int Coll_bcast_ompi_pipeline::bcast( void* buffer, return (MPI_SUCCESS); } + +} +} diff --git a/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp b/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp index e5dffb31dc..e0f36b4600 100644 --- a/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp +++ b/src/smpi/colls/bcast/bcast-ompi-split-bintree.cpp @@ -58,6 +58,8 @@ #include "../colls_private.h" #include "../coll_tuned_topo.h" #define MAXTREEFANOUT 32 +namespace simgrid{ +namespace smpi{ int Coll_bcast_ompi_split_bintree::bcast ( void* buffer, @@ -300,3 +302,6 @@ Coll_bcast_ompi_split_bintree::bcast ( void* buffer, } +} +} + diff --git a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp index 4adaa0029d..3d8c6e5616 100644 --- a/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-LR-allgather.cpp @@ -67,6 +67,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * Auther: MPIH / modified by Ahmad Faraj ****************************************************************************/ +namespace simgrid{ +namespace smpi{ int Coll_bcast_scatter_LR_allgather::bcast(void *buff, int count, MPI_Datatype data_type, int root, @@ -176,3 +178,6 @@ Coll_bcast_scatter_LR_allgather::bcast(void *buff, int count, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp index cb88efc67b..f4a7d4b3bd 100644 --- a/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp +++ b/src/smpi/colls/bcast/bcast-scatter-rdb-allgather.cpp @@ -1,4 +1,6 @@ #include "../colls_private.h" +namespace simgrid{ +namespace smpi{ static int scatter_for_bcast( int root, @@ -92,6 +94,7 @@ static int scatter_for_bcast( return mpi_errno; } + int Coll_bcast_scatter_rdb_allgather::bcast ( void *buffer, @@ -329,3 +332,6 @@ fn_exit: /* xbt_free(tmp_buf);*/ return mpi_errno; } + +} +} diff --git a/src/smpi/colls/reduce/reduce-NTSL.cpp b/src/smpi/colls/reduce/reduce-NTSL.cpp index 167a2c326e..8f9590bc5b 100644 --- a/src/smpi/colls/reduce/reduce-NTSL.cpp +++ b/src/smpi/colls/reduce/reduce-NTSL.cpp @@ -12,6 +12,8 @@ int reduce_NTSL_segment_size_in_byte = 8192; /* Non-topology-specific pipelined linear-bcast function 0->1, 1->2 ,2->3, ....., ->last node : in a pipeline fashion */ +namespace simgrid{ +namespace smpi{ int Coll_reduce_NTSL::reduce(void *buf, void *rbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) @@ -150,3 +152,5 @@ int Coll_reduce_NTSL::reduce(void *buf, void *rbuf, int count, free(tmp_buf); return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp b/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp index 277034fdce..d51b07be77 100644 --- a/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp +++ b/src/smpi/colls/reduce/reduce-arrival-pattern-aware.cpp @@ -16,7 +16,8 @@ int reduce_arrival_pattern_aware_segment_size_in_byte = 8192; #ifndef MAX_NODE #define MAX_NODE 1024 #endif - +namespace simgrid{ +namespace smpi{ /* Non-topology-specific pipelined linear-reduce function */ int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf, int count, @@ -353,3 +354,5 @@ int Coll_reduce_arrival_pattern_aware::reduce(void *buf, void *rbuf, return MPI_SUCCESS; } +} +} diff --git a/src/smpi/colls/reduce/reduce-binomial.cpp b/src/smpi/colls/reduce/reduce-binomial.cpp index ac0b789fb4..de8c197ce3 100644 --- a/src/smpi/colls/reduce/reduce-binomial.cpp +++ b/src/smpi/colls/reduce/reduce-binomial.cpp @@ -7,7 +7,8 @@ #include "../colls_private.h" //#include - +namespace simgrid{ +namespace smpi{ int Coll_reduce_binomial::reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) @@ -91,3 +92,5 @@ int Coll_reduce_binomial::reduce(void *sendbuf, void *recvbuf, int count, return 0; } +} +} diff --git a/src/smpi/colls/reduce/reduce-flat-tree.cpp b/src/smpi/colls/reduce/reduce-flat-tree.cpp index 76f3c5bc36..3aa6d1c0d1 100644 --- a/src/smpi/colls/reduce/reduce-flat-tree.cpp +++ b/src/smpi/colls/reduce/reduce-flat-tree.cpp @@ -6,7 +6,8 @@ #include "../colls_private.h" //#include - +namespace simgrid{ +namespace smpi{ int Coll_reduce_flat_tree::reduce(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, @@ -66,3 +67,5 @@ Coll_reduce_flat_tree::reduce(void *sbuf, void *rbuf, int count, /* All done */ return 0; } +} +} diff --git a/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp b/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp index cbcc1e4b1d..47bc0f28d6 100644 --- a/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp +++ b/src/smpi/colls/reduce/reduce-mvapich-knomial.cpp @@ -114,6 +114,8 @@ static int MPIR_Reduce_knomial_trace(int root, int reduce_knomial_factor, return 0; } +namespace simgrid{ +namespace smpi{ int Coll_reduce_mvapich2_knomial::reduce ( void *sendbuf, void *recvbuf, @@ -223,3 +225,5 @@ int Coll_reduce_mvapich2_knomial::reduce ( return mpi_errno; } +} +} diff --git a/src/smpi/colls/reduce/reduce-mvapich-two-level.cpp b/src/smpi/colls/reduce/reduce-mvapich-two-level.cpp index d2c5b63820..6bcfd58ec5 100644 --- a/src/smpi/colls/reduce/reduce-mvapich-two-level.cpp +++ b/src/smpi/colls/reduce/reduce-mvapich-two-level.cpp @@ -71,7 +71,8 @@ static int (*reduce_fn)(void *sendbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); - +namespace simgrid{ +namespace smpi{ int Coll_reduce_mvapich2_two_level::reduce( void *sendbuf, void *recvbuf, int count, @@ -309,3 +310,5 @@ int Coll_reduce_mvapich2_two_level::reduce( void *sendbuf, fn_exit: return mpi_errno; } +} +} diff --git a/src/smpi/colls/reduce/reduce-ompi.cpp b/src/smpi/colls/reduce/reduce-ompi.cpp index e7c465ca01..8bccf0d150 100644 --- a/src/smpi/colls/reduce/reduce-ompi.cpp +++ b/src/smpi/colls/reduce/reduce-ompi.cpp @@ -22,7 +22,8 @@ #include "../colls_private.h" #include "../coll_tuned_topo.h" - +namespace simgrid{ +namespace smpi{ int smpi_coll_tuned_ompi_reduce_generic( void* sendbuf, void* recvbuf, int original_count, MPI_Datatype datatype, MPI_Op op, @@ -324,6 +325,7 @@ int smpi_coll_tuned_ompi_reduce_generic( void* sendbuf, void* recvbuf, int origi meaning that at least one datatype must fit in the segment ! */ + int Coll_reduce_ompi_chain::reduce( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, @@ -681,3 +683,5 @@ Coll_reduce_ompi_basic_linear::reduce(void *sbuf, void *rbuf, int count, /* copied function (with appropriate renaming) ends here */ +} +} diff --git a/src/smpi/colls/reduce/reduce-rab.cpp b/src/smpi/colls/reduce/reduce-rab.cpp index 22f34a05d3..ac19cd515d 100644 --- a/src/smpi/colls/reduce/reduce-rab.cpp +++ b/src/smpi/colls/reduce/reduce-rab.cpp @@ -497,7 +497,8 @@ static void MPI_I_do_op(void* b1, void* b2, void* rslt, int cnt, } REDUCE_LIMITS - +namespace simgrid{ +namespace smpi{ static int MPI_I_anyReduce(void* Sendbuf, void* Recvbuf, int count, MPI_Datatype mpi_datatype, MPI_Op mpi_op, int root, MPI_Comm comm, int is_all) { char *scr1buf, *scr2buf, *scr3buf, *xxx, *sendbuf, *recvbuf; @@ -937,3 +938,5 @@ int Coll_allreduce_rab::allreduce(void* Sendbuf, void* Recvbuf, int count, MPI_D { return( MPI_I_anyReduce(Sendbuf, Recvbuf, count, datatype, op, -1, comm, 1) ); } +} +} diff --git a/src/smpi/colls/reduce/reduce-scatter-gather.cpp b/src/smpi/colls/reduce/reduce-scatter-gather.cpp index b8ac14235c..f25b6347bc 100644 --- a/src/smpi/colls/reduce/reduce-scatter-gather.cpp +++ b/src/smpi/colls/reduce/reduce-scatter-gather.cpp @@ -10,7 +10,8 @@ reduce Author: MPICH */ - +namespace simgrid{ +namespace smpi{ int Coll_reduce_scatter_gather::reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) @@ -410,3 +411,5 @@ int Coll_reduce_scatter_gather::reduce(void *sendbuf, void *recvbuf, return 0; } +} +} diff --git a/src/smpi/colls/reduce_scatter/reduce_scatter-mpich.cpp b/src/smpi/colls/reduce_scatter/reduce_scatter-mpich.cpp index e10e5b63a7..8481ab78e6 100644 --- a/src/smpi/colls/reduce_scatter/reduce_scatter-mpich.cpp +++ b/src/smpi/colls/reduce_scatter/reduce_scatter-mpich.cpp @@ -20,7 +20,8 @@ static inline int MPIU_Mirror_permutation(unsigned int x, int bits) return retval; } - +namespace simgrid{ +namespace smpi{ int Coll_reduce_scatter_mpich_pair::reduce_scatter(void *sendbuf, void *recvbuf, int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) @@ -498,5 +499,6 @@ int Coll_reduce_scatter_mpich_rdb::reduce_scatter(void *sendbuf, void *recvbuf, smpi_free_tmp_buffer(tmp_results); return MPI_SUCCESS; } - +} +} diff --git a/src/smpi/colls/reduce_scatter/reduce_scatter-ompi.cpp b/src/smpi/colls/reduce_scatter/reduce_scatter-ompi.cpp index c77f125b37..fd42cb85cb 100644 --- a/src/smpi/colls/reduce_scatter/reduce_scatter-ompi.cpp +++ b/src/smpi/colls/reduce_scatter/reduce_scatter-ompi.cpp @@ -41,6 +41,8 @@ * Returns: - MPI_SUCCESS or error code * Limitation: - Works only for commutative operations. */ +namespace simgrid{ +namespace smpi{ int Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter(void *sbuf, void *rbuf, @@ -517,4 +519,6 @@ Coll_reduce_scatter_ompi_ring::reduce_scatter(void *sbuf, void *rbuf, int *rcoun if (NULL != inbuf_free[1]) smpi_free_tmp_buffer(inbuf_free[1]); return ret; } +} +} diff --git a/src/smpi/colls/scatter/scatter-mvapich-two-level.cpp b/src/smpi/colls/scatter/scatter-mvapich-two-level.cpp index d2cb88f90b..40e9f78868 100644 --- a/src/smpi/colls/scatter/scatter-mvapich-two-level.cpp +++ b/src/smpi/colls/scatter/scatter-mvapich-two-level.cpp @@ -43,6 +43,9 @@ extern int (*MV2_Scatter_intra_function) (void *sendbuf, int sendcount, MPI_Data void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm); +namespace simgrid{ +namespace smpi{ + int Coll_scatter_mvapich2_two_level_direct::scatter(void *sendbuf, int sendcnt, MPI_Datatype sendtype, @@ -408,3 +411,6 @@ int Coll_scatter_mvapich2_two_level_binomial::scatter(void *sendbuf, return (mpi_errno); } +} +} + diff --git a/src/smpi/colls/scatter/scatter-ompi.cpp b/src/smpi/colls/scatter/scatter-ompi.cpp index 04bf1aefec..06fe0759bb 100644 --- a/src/smpi/colls/scatter/scatter-ompi.cpp +++ b/src/smpi/colls/scatter/scatter-ompi.cpp @@ -23,6 +23,9 @@ #include "../colls_private.h" #include "../coll_tuned_topo.h" +namespace simgrid{ +namespace smpi{ + int Coll_scatter_ompi_binomial::scatter(void *sbuf, int scount, @@ -249,3 +252,6 @@ Coll_scatter_ompi_basic_linear::scatter(void *sbuf, int scount, return MPI_SUCCESS; } + +} +} diff --git a/src/smpi/colls/smpi_intel_mpi_selector.cpp b/src/smpi/colls/smpi_intel_mpi_selector.cpp index 350f2a56ce..8af1d07374 100644 --- a/src/smpi/colls/smpi_intel_mpi_selector.cpp +++ b/src/smpi/colls/smpi_intel_mpi_selector.cpp @@ -47,6 +47,8 @@ MPI_Allreduce as Shumilin's ring algorithm is unknown, default to ring' */ +namespace simgrid{ +namespace smpi{ int (*intel_allreduce_functions_table[])(void *sendbuf, void *recvbuf, @@ -2289,8 +2291,6 @@ ret Coll_ ## cat ## _impi:: cat (COLL_UNPAREN args)\ args2);\ } -namespace simgrid{ -namespace smpi{ COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLGATHERV_SIG, (send_buff, send_count, send_type, recv_buff, recv_count, recv_disps, recv_type, comm)); COLL_APPLY(IMPI_COLL_SELECT, COLL_ALLREDUCE_SIG, (sbuf, rbuf, rcount, dtype, op, comm)); diff --git a/src/smpi/colls/smpi_mpich_selector.cpp b/src/smpi/colls/smpi_mpich_selector.cpp index 8a9ffcd40e..550023941c 100644 --- a/src/smpi/colls/smpi_mpich_selector.cpp +++ b/src/smpi/colls/smpi_mpich_selector.cpp @@ -56,6 +56,8 @@ End Algorithm: MPI_Allreduce */ +namespace simgrid{ +namespace smpi{ int Coll_allreduce_mpich::allreduce(void *sbuf, void *rbuf, int count, MPI_Datatype dtype, MPI_Op op, MPI_Comm comm) { @@ -646,8 +648,6 @@ int Coll_allgatherv_mpich::allgatherv(void *sbuf, int scount, End Algorithm: MPI_Gather */ -namespace simgrid{ -namespace smpi{ int Coll_gather_mpich::gather(void *sbuf, int scount, MPI_Datatype sdtype, @@ -662,9 +662,6 @@ int Coll_gather_mpich::gather(void *sbuf, int scount, root, comm); } -} -} - /* This is the default implementation of scatter. The algorithm is: Algorithm: MPI_Scatter @@ -707,4 +704,6 @@ int Coll_scatter_mpich::scatter(void *sbuf, int scount, } return ret; } +} +} diff --git a/src/smpi/colls/smpi_mvapich2_selector.cpp b/src/smpi/colls/smpi_mvapich2_selector.cpp index bda8db4287..2837e97464 100644 --- a/src/smpi/colls/smpi_mvapich2_selector.cpp +++ b/src/smpi/colls/smpi_mvapich2_selector.cpp @@ -10,7 +10,6 @@ #include "smpi_mvapich2_selector_stampede.h" - namespace simgrid{ namespace smpi{ @@ -1007,6 +1006,8 @@ int Coll_scatter_mvapich2::scatter(void *sendbuf, } } + + void smpi_coll_cleanup_mvapich2(void){ int i=0; if(mv2_alltoall_thresholds_table) diff --git a/src/smpi/colls/smpi_mvapich2_selector_stampede.h b/src/smpi/colls/smpi_mvapich2_selector_stampede.h index 1b9560195c..e8a89b5185 100644 --- a/src/smpi/colls/smpi_mvapich2_selector_stampede.h +++ b/src/smpi/colls/smpi_mvapich2_selector_stampede.h @@ -38,11 +38,11 @@ int *mv2_size_alltoall_tuning_table = NULL; mv2_alltoall_tuning_table **mv2_alltoall_thresholds_table = NULL; -#define MPIR_Alltoall_bruck_MV2 Coll_alltoall_bruck::alltoall -#define MPIR_Alltoall_RD_MV2 Coll_alltoall_rdb::alltoall -#define MPIR_Alltoall_Scatter_dest_MV2 Coll_alltoall_mvapich2_scatter_dest::alltoall -#define MPIR_Alltoall_pairwise_MV2 Coll_alltoall_pair::alltoall -#define MPIR_Alltoall_inplace_MV2 Coll_alltoall_ring::alltoall +#define MPIR_Alltoall_bruck_MV2 simgrid::smpi::Coll_alltoall_bruck::alltoall +#define MPIR_Alltoall_RD_MV2 simgrid::smpi::Coll_alltoall_rdb::alltoall +#define MPIR_Alltoall_Scatter_dest_MV2 simgrid::smpi::Coll_alltoall_mvapich2_scatter_dest::alltoall +#define MPIR_Alltoall_pairwise_MV2 simgrid::smpi::Coll_alltoall_pair::alltoall +#define MPIR_Alltoall_inplace_MV2 simgrid::smpi::Coll_alltoall_ring::alltoall static void init_mv2_alltoall_tables_stampede(){ @@ -50,8 +50,8 @@ static void init_mv2_alltoall_tables_stampede(){ int agg_table_sum = 0; mv2_alltoall_tuning_table **table_ptrs = NULL; mv2_alltoall_num_ppn_conf = 3; - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_alltoall_thresholds_table = static_cast(xbt_malloc(sizeof(mv2_alltoall_tuning_table *) * mv2_alltoall_num_ppn_conf)); table_ptrs = static_cast(xbt_malloc(sizeof(mv2_alltoall_tuning_table *) @@ -333,17 +333,17 @@ static int MPIR_Allgather_RD_Allgather_Comm_MV2( void *sendbuf, return 0; } -#define MPIR_Allgather_Bruck_MV2 Coll_allgather_bruck::allgather -#define MPIR_Allgather_RD_MV2 Coll_allgather_rdb::allgather -#define MPIR_Allgather_Ring_MV2 Coll_allgather_ring::allgather -#define MPIR_2lvl_Allgather_MV2 Coll_allgather_mvapich2_smp::allgather +#define MPIR_Allgather_Bruck_MV2 simgrid::smpi::Coll_allgather_bruck::allgather +#define MPIR_Allgather_RD_MV2 simgrid::smpi::Coll_allgather_rdb::allgather +#define MPIR_Allgather_Ring_MV2 simgrid::smpi::Coll_allgather_ring::allgather +#define MPIR_2lvl_Allgather_MV2 simgrid::smpi::Coll_allgather_mvapich2_smp::allgather static void init_mv2_allgather_tables_stampede(){ int i; int agg_table_sum = 0; - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_allgather_tuning_table **table_ptrs = NULL; mv2_allgather_num_ppn_conf = 3; mv2_allgather_thresholds_table @@ -593,15 +593,15 @@ MV2_Gather_function_ptr MV2_Gather_intra_node_function = NULL; -#define MPIR_Gather_MV2_Direct Coll_gather_ompi_basic_linear::gather -#define MPIR_Gather_MV2_two_level_Direct Coll_gather_mvapich2_two_level::gather -#define MPIR_Gather_intra Coll_gather_mpich::gather +#define MPIR_Gather_MV2_Direct simgrid::smpi::Coll_gather_ompi_basic_linear::gather +#define MPIR_Gather_MV2_two_level_Direct simgrid::smpi::Coll_gather_mvapich2_two_level::gather +#define MPIR_Gather_intra simgrid::smpi::Coll_gather_mpich::gather static void init_mv2_gather_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_gather_tuning_table=7; mv2_gather_thresholds_table = static_cast(xbt_malloc(mv2_size_gather_tuning_table* sizeof (mv2_gather_tuning_table))); @@ -681,14 +681,14 @@ int (*MV2_Allgatherv_function)(void *sendbuf, int mv2_size_allgatherv_tuning_table = 0; mv2_allgatherv_tuning_table *mv2_allgatherv_thresholds_table = NULL; -#define MPIR_Allgatherv_Rec_Doubling_MV2 Coll_allgatherv_mpich_rdb::allgatherv -#define MPIR_Allgatherv_Bruck_MV2 Coll_allgatherv_ompi_bruck::allgatherv -#define MPIR_Allgatherv_Ring_MV2 Coll_allgatherv_mpich_ring::allgatherv +#define MPIR_Allgatherv_Rec_Doubling_MV2 simgrid::smpi::Coll_allgatherv_mpich_rdb::allgatherv +#define MPIR_Allgatherv_Bruck_MV2 simgrid::smpi::Coll_allgatherv_ompi_bruck::allgatherv +#define MPIR_Allgatherv_Ring_MV2 simgrid::smpi::Coll_allgatherv_mpich_ring::allgatherv static void init_mv2_allgatherv_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_allgatherv_tuning_table = 6; mv2_allgatherv_thresholds_table = static_cast(xbt_malloc(mv2_size_allgatherv_tuning_table * sizeof (mv2_allgatherv_tuning_table))); @@ -815,7 +815,7 @@ static int MPIR_Allreduce_reduce_p2p_MV2( void *sendbuf, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { - Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm); + simgrid::smpi::Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm); return MPI_SUCCESS; } @@ -825,18 +825,18 @@ static int MPIR_Allreduce_reduce_shmem_MV2( void *sendbuf, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) { - Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm); + simgrid::smpi::Colls::reduce(sendbuf,recvbuf,count,datatype,op,0,comm); return MPI_SUCCESS; } -#define MPIR_Allreduce_pt2pt_rd_MV2 Coll_allreduce_rdb::allreduce -#define MPIR_Allreduce_pt2pt_rs_MV2 Coll_allreduce_mvapich2_rs::allreduce -#define MPIR_Allreduce_two_level_MV2 Coll_allreduce_mvapich2_two_level::allreduce +#define MPIR_Allreduce_pt2pt_rd_MV2 simgrid::smpi::Coll_allreduce_rdb::allreduce +#define MPIR_Allreduce_pt2pt_rs_MV2 simgrid::smpi::Coll_allreduce_mvapich2_rs::allreduce +#define MPIR_Allreduce_two_level_MV2 simgrid::smpi::Coll_allreduce_mvapich2_two_level::allreduce static void init_mv2_allreduce_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_allreduce_tuning_table = 8; mv2_allreduce_thresholds_table = static_cast(xbt_malloc(mv2_size_allreduce_tuning_table * sizeof (mv2_allreduce_tuning_table))); @@ -1021,22 +1021,22 @@ int mv2_intra_node_knomial_factor = 4; #define INTRA_NODE_ROOT 0 -#define MPIR_Pipelined_Bcast_Zcpy_MV2 Coll_bcast_mpich::bcast -#define MPIR_Pipelined_Bcast_MV2 Coll_bcast_mpich::bcast -#define MPIR_Bcast_binomial_MV2 Coll_bcast_binomial_tree::bcast -#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 Coll_bcast_scatter_LR_allgather::bcast -#define MPIR_Bcast_scatter_doubling_allgather_MV2 Coll_bcast_scatter_rdb_allgather::bcast -#define MPIR_Bcast_scatter_ring_allgather_MV2 Coll_bcast_scatter_LR_allgather::bcast -#define MPIR_Shmem_Bcast_MV2 Coll_bcast_mpich::bcast -#define MPIR_Bcast_tune_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast -#define MPIR_Bcast_inter_node_helper_MV2 Coll_bcast_mvapich2_inter_node::bcast -#define MPIR_Knomial_Bcast_intra_node_MV2 Coll_bcast_mvapich2_knomial_intra_node::bcast -#define MPIR_Bcast_intra_MV2 Coll_bcast_mvapich2_intra_node::bcast +#define MPIR_Pipelined_Bcast_Zcpy_MV2 simgrid::smpi::Coll_bcast_mpich::bcast +#define MPIR_Pipelined_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast +#define MPIR_Bcast_binomial_MV2 simgrid::smpi::Coll_bcast_binomial_tree::bcast +#define MPIR_Bcast_scatter_ring_allgather_shm_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast +#define MPIR_Bcast_scatter_doubling_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_rdb_allgather::bcast +#define MPIR_Bcast_scatter_ring_allgather_MV2 simgrid::smpi::Coll_bcast_scatter_LR_allgather::bcast +#define MPIR_Shmem_Bcast_MV2 simgrid::smpi::Coll_bcast_mpich::bcast +#define MPIR_Bcast_tune_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast +#define MPIR_Bcast_inter_node_helper_MV2 simgrid::smpi::Coll_bcast_mvapich2_inter_node::bcast +#define MPIR_Knomial_Bcast_intra_node_MV2 simgrid::smpi::Coll_bcast_mvapich2_knomial_intra_node::bcast +#define MPIR_Bcast_intra_MV2 simgrid::smpi::Coll_bcast_mvapich2_intra_node::bcast static void init_mv2_bcast_tables_stampede(){ //Stampede, - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_bcast_tuning_table=8; mv2_bcast_thresholds_table = static_cast(xbt_malloc(mv2_size_bcast_tuning_table * sizeof (mv2_bcast_tuning_table))); @@ -1294,17 +1294,17 @@ int (*MV2_Reduce_intra_function)( void *sendbuf, MPI_Comm comm_ptr)=NULL; -#define MPIR_Reduce_inter_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce -#define MPIR_Reduce_intra_knomial_wrapper_MV2 Coll_reduce_mvapich2_knomial::reduce -#define MPIR_Reduce_binomial_MV2 Coll_reduce_binomial::reduce -#define MPIR_Reduce_redscat_gather_MV2 Coll_reduce_scatter_gather::reduce -#define MPIR_Reduce_shmem_MV2 Coll_reduce_ompi_basic_linear::reduce -#define MPIR_Reduce_two_level_helper_MV2 Coll_reduce_mvapich2_two_level::reduce +#define MPIR_Reduce_inter_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce +#define MPIR_Reduce_intra_knomial_wrapper_MV2 simgrid::smpi::Coll_reduce_mvapich2_knomial::reduce +#define MPIR_Reduce_binomial_MV2 simgrid::smpi::Coll_reduce_binomial::reduce +#define MPIR_Reduce_redscat_gather_MV2 simgrid::smpi::Coll_reduce_scatter_gather::reduce +#define MPIR_Reduce_shmem_MV2 simgrid::smpi::Coll_reduce_ompi_basic_linear::reduce +#define MPIR_Reduce_two_level_helper_MV2 simgrid::smpi::Coll_reduce_mvapich2_two_level::reduce static void init_mv2_reduce_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; /*Stampede*/ mv2_size_reduce_tuning_table = 8; mv2_reduce_thresholds_table = static_cast(xbt_malloc(mv2_size_reduce_tuning_table * @@ -1536,19 +1536,19 @@ static int MPIR_Reduce_Scatter_Basic_MV2(void *sendbuf, MPI_Op op, MPI_Comm comm) { - Coll_reduce_scatter_default::reduce_scatter(sendbuf,recvbuf,recvcnts,datatype,op,comm); + simgrid::smpi::Coll_reduce_scatter_default::reduce_scatter(sendbuf,recvbuf,recvcnts,datatype,op,comm); return MPI_SUCCESS; } -#define MPIR_Reduce_scatter_non_comm_MV2 Coll_reduce_scatter_mpich_noncomm::reduce_scatter -#define MPIR_Reduce_scatter_Rec_Halving_MV2 Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter -#define MPIR_Reduce_scatter_Pair_Wise_MV2 Coll_reduce_scatter_mpich_pair::reduce_scatter +#define MPIR_Reduce_scatter_non_comm_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_noncomm::reduce_scatter +#define MPIR_Reduce_scatter_Rec_Halving_MV2 simgrid::smpi::Coll_reduce_scatter_ompi_basic_recursivehalving::reduce_scatter +#define MPIR_Reduce_scatter_Pair_Wise_MV2 simgrid::smpi::Coll_reduce_scatter_mpich_pair::reduce_scatter static void init_mv2_reduce_scatter_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; mv2_size_red_scat_tuning_table = 6; mv2_red_scat_thresholds_table = static_cast(xbt_malloc(mv2_size_red_scat_tuning_table * sizeof (mv2_red_scat_tuning_table))); @@ -1664,17 +1664,17 @@ int MPIR_Scatter_mcst_wrap_MV2(void *sendbuf, return 0; } -#define MPIR_Scatter_MV2_Binomial Coll_scatter_ompi_binomial::scatter -#define MPIR_Scatter_MV2_Direct Coll_scatter_ompi_basic_linear::scatter -#define MPIR_Scatter_MV2_two_level_Binomial Coll_scatter_mvapich2_two_level_binomial::scatter -#define MPIR_Scatter_MV2_two_level_Direct Coll_scatter_mvapich2_two_level_direct::scatter +#define MPIR_Scatter_MV2_Binomial simgrid::smpi::Coll_scatter_ompi_binomial::scatter +#define MPIR_Scatter_MV2_Direct simgrid::smpi::Coll_scatter_ompi_basic_linear::scatter +#define MPIR_Scatter_MV2_two_level_Binomial simgrid::smpi::Coll_scatter_mvapich2_two_level_binomial::scatter +#define MPIR_Scatter_MV2_two_level_Direct simgrid::smpi::Coll_scatter_mvapich2_two_level_direct::scatter static void init_mv2_scatter_tables_stampede(){ - if(Colls::smpi_coll_cleanup_callback==NULL) - Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback==NULL) + simgrid::smpi::Colls::smpi_coll_cleanup_callback=&smpi_coll_cleanup_mvapich2; int agg_table_sum = 0; int i; diff --git a/src/smpi/private.h b/src/smpi/private.h index cfb141c510..8d23af3f4d 100644 --- a/src/smpi/private.h +++ b/src/smpi/private.h @@ -31,9 +31,6 @@ SG_BEGIN_DECL() -using namespace simgrid::smpi; - - #define PERSISTENT 0x1 #define NON_PERSISTENT 0x2 #define SEND 0x4 @@ -75,8 +72,8 @@ typedef SMPI_Graph_topology *MPIR_Graph_Topology; typedef SMPI_Dist_Graph_topology *MPIR_Dist_Graph_Topology; -XBT_PRIVATE Process* smpi_process(); -XBT_PRIVATE Process* smpi_process_remote(int index); +XBT_PRIVATE SMPI_Process* smpi_process(); +XBT_PRIVATE SMPI_Process* smpi_process_remote(int index); XBT_PRIVATE int smpi_process_count(); XBT_PRIVATE void smpi_deployment_register_process(const char* instance_id, int rank, int index); diff --git a/src/smpi/smpi_comm.cpp b/src/smpi/smpi_comm.cpp index 97997d46fb..50e6f84372 100644 --- a/src/smpi/smpi_comm.cpp +++ b/src/smpi/smpi_comm.cpp @@ -18,7 +18,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_comm, smpi, "Logging specific to SMPI (comm)"); - Comm mpi_MPI_COMM_UNINITIALIZED; + simgrid::smpi::Comm mpi_MPI_COMM_UNINITIALIZED; MPI_Comm MPI_COMM_UNINITIALIZED=&mpi_MPI_COMM_UNINITIALIZED; /* Support for cartesian topology was added, but there are 2 other types of topology, graph et dist graph. In order to diff --git a/src/smpi/smpi_datatype.cpp b/src/smpi/smpi_datatype.cpp index 8c2fc31bd3..db186c3a53 100644 --- a/src/smpi/smpi_datatype.cpp +++ b/src/smpi/smpi_datatype.cpp @@ -19,7 +19,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI (datatype)"); #define CREATE_MPI_DATATYPE(name, type) \ - static Datatype mpi_##name ( \ + static simgrid::smpi::Datatype mpi_##name ( \ (char*) # name, \ sizeof(type), /* size */ \ 0, /* lb */ \ @@ -29,7 +29,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_datatype, smpi, "Logging specific to SMPI ( const MPI_Datatype name = &mpi_##name; #define CREATE_MPI_DATATYPE_NULL(name) \ - static Datatype mpi_##name ( \ + static simgrid::smpi::Datatype mpi_##name ( \ (char*) # name, \ 0, /* size */ \ 0, /* lb */ \ diff --git a/src/smpi/smpi_deployment.cpp b/src/smpi/smpi_deployment.cpp index df0686c95d..ec2e32c7e0 100644 --- a/src/smpi/smpi_deployment.cpp +++ b/src/smpi/smpi_deployment.cpp @@ -67,8 +67,8 @@ void smpi_deployment_register_process(const char* instance_id, int rank, int ind xbt_assert(instance, "Error, unknown instance %s", instance_id); if(instance->comm_world == MPI_COMM_NULL){ - MPI_Group group = new Group(instance->size); - instance->comm_world = new Comm(group, nullptr); + MPI_Group group = new simgrid::smpi::Group(instance->size); + instance->comm_world = new simgrid::smpi::Comm(group, nullptr); } instance->present_processes++; index_to_process_data[index]=instance->index+rank; diff --git a/src/smpi/smpi_f77.cpp b/src/smpi/smpi_f77.cpp index b5ab90f8bb..36ef41f59f 100644 --- a/src/smpi/smpi_f77.cpp +++ b/src/smpi/smpi_f77.cpp @@ -37,7 +37,7 @@ static char* get_key_id(char* key, int id) { } static void smpi_init_fortran_types(){ - if(F2C::lookup() == nullptr){ + if(simgrid::smpi::F2C::lookup() == nullptr){ MPI_COMM_WORLD->add_f(); MPI_BYTE->add_f();//MPI_BYTE MPI_CHAR->add_f();//MPI_CHARACTER @@ -103,20 +103,20 @@ void mpi_finalize_(int* ierr) { *ierr = MPI_Finalize(); running_processes--; if(running_processes==0){ - F2C::delete_lookup(); + simgrid::smpi::F2C::delete_lookup(); } } void mpi_abort_(int* comm, int* errorcode, int* ierr) { - *ierr = MPI_Abort(Comm::f2c(*comm), *errorcode); + *ierr = MPI_Abort(simgrid::smpi::Comm::f2c(*comm), *errorcode); } void mpi_comm_rank_(int* comm, int* rank, int* ierr) { - *ierr = MPI_Comm_rank(Comm::f2c(*comm), rank); + *ierr = MPI_Comm_rank(simgrid::smpi::Comm::f2c(*comm), rank); } void mpi_comm_size_(int* comm, int* size, int* ierr) { - *ierr = MPI_Comm_size(Comm::f2c(*comm), size); + *ierr = MPI_Comm_size(simgrid::smpi::Comm::f2c(*comm), size); } double mpi_wtime_() { @@ -130,7 +130,7 @@ double mpi_wtick_() { void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Comm_dup(Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_dup(simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } @@ -139,26 +139,26 @@ void mpi_comm_dup_(int* comm, int* newcomm, int* ierr) { void mpi_comm_create_(int* comm, int* group, int* newcomm, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Comm_create(Comm::f2c(*comm),Group::f2c(*group), &tmp); + *ierr = MPI_Comm_create(simgrid::smpi::Comm::f2c(*comm),simgrid::smpi::Group::f2c(*group), &tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } } void mpi_comm_free_(int* comm, int* ierr) { - MPI_Comm tmp = Comm::f2c(*comm); + MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); *ierr = MPI_Comm_free(&tmp); if(*ierr == MPI_SUCCESS) { - Comm::free_f(*comm); + simgrid::smpi::Comm::free_f(*comm); } } void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Comm_split(Comm::f2c(*comm), *color, *key, &tmp); + *ierr = MPI_Comm_split(simgrid::smpi::Comm::f2c(*comm), *color, *key, &tmp); if(*ierr == MPI_SUCCESS) { *comm_out = tmp->add_f(); } @@ -167,7 +167,7 @@ void mpi_comm_split_(int* comm, int* color, int* key, int* comm_out, int* ierr) void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) { MPI_Group tmp; - *ierr = MPI_Group_incl(Group::f2c(*group), *n, ranks, &tmp); + *ierr = MPI_Group_incl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); if(*ierr == MPI_SUCCESS) { *group_out = tmp->add_f(); } @@ -176,7 +176,7 @@ void mpi_group_incl_(int* group, int* n, int* ranks, int* group_out, int* ierr) void mpi_comm_group_(int* comm, int* group_out, int* ierr) { MPI_Group tmp; - *ierr = MPI_Comm_group(Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_group(simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *group_out = tmp->c2f(); } @@ -189,7 +189,7 @@ void mpi_initialized_(int* flag, int* ierr){ void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { MPI_Request req; buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Send_init(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req); + *ierr = MPI_Send_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); if(*ierr == MPI_SUCCESS) { *request = req->add_f(); } @@ -198,7 +198,7 @@ void mpi_send_init_(void *buf, int* count, int* datatype, int* dst, int* tag, in void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { MPI_Request req; buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Isend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req); + *ierr = MPI_Isend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); if(*ierr == MPI_SUCCESS) { *request = req->add_f(); } @@ -207,7 +207,7 @@ void mpi_isend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* c void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* request, int* ierr) { MPI_Request req; buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Irsend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm), &req); + *ierr = MPI_Irsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm), &req); if(*ierr == MPI_SUCCESS) { *request = req->add_f(); } @@ -215,26 +215,26 @@ void mpi_irsend_(void *buf, int* count, int* datatype, int* dst, int* tag, int* void mpi_send_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Send(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm)); + *ierr = MPI_Send(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); } void mpi_rsend_(void* buf, int* count, int* datatype, int* dst, int* tag, int* comm, int* ierr) { buf = static_cast(FORT_BOTTOM(buf)); - *ierr = MPI_Rsend(buf, *count, Datatype::f2c(*datatype), *dst, *tag, Comm::f2c(*comm)); + *ierr = MPI_Rsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *tag, simgrid::smpi::Comm::f2c(*comm)); } void mpi_sendrecv_(void* sendbuf, int* sendcount, int* sendtype, int* dst, int* sendtag, void *recvbuf, int* recvcount, int* recvtype, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) { sendbuf = static_cast( FORT_BOTTOM(sendbuf)); recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Sendrecv(sendbuf, *sendcount, Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount, - Datatype::f2c(*recvtype), *src, *recvtag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); + *ierr = MPI_Sendrecv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), *dst, *sendtag, recvbuf, *recvcount, + simgrid::smpi::Datatype::f2c(*recvtype), *src, *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); } void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { MPI_Request req; buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Recv_init(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), &req); + *ierr = MPI_Recv_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); if(*ierr == MPI_SUCCESS) { *request = req->add_f(); } @@ -243,7 +243,7 @@ void mpi_recv_init_(void *buf, int* count, int* datatype, int* src, int* tag, in void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* comm, int* request, int* ierr) { MPI_Request req; buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Irecv(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), &req); + *ierr = MPI_Irecv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), &req); if(*ierr == MPI_SUCCESS) { *request = req->add_f(); } @@ -251,11 +251,11 @@ void mpi_irecv_(void *buf, int* count, int* datatype, int* src, int* tag, int* c void mpi_recv_(void* buf, int* count, int* datatype, int* src, int* tag, int* comm, MPI_Status* status, int* ierr) { buf = static_cast( FORT_BOTTOM(buf)); - *ierr = MPI_Recv(buf, *count, Datatype::f2c(*datatype), *src, *tag, Comm::f2c(*comm), status); + *ierr = MPI_Recv(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *src, *tag, simgrid::smpi::Comm::f2c(*comm), status); } void mpi_start_(int* request, int* ierr) { - MPI_Request req = Request::f2c(*request); + MPI_Request req = simgrid::smpi::Request::f2c(*request); *ierr = MPI_Start(&req); } @@ -266,18 +266,18 @@ void mpi_startall_(int* count, int* requests, int* ierr) { reqs = xbt_new(MPI_Request, *count); for(i = 0; i < *count; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr = MPI_Startall(*count, reqs); xbt_free(reqs); } void mpi_wait_(int* request, MPI_Status* status, int* ierr) { - MPI_Request req = Request::f2c(*request); + MPI_Request req = simgrid::smpi::Request::f2c(*request); *ierr = MPI_Wait(&req, FORT_STATUS_IGNORE(status)); if(req==MPI_REQUEST_NULL){ - Request::free_f(*request); + simgrid::smpi::Request::free_f(*request); *request=MPI_FORTRAN_REQUEST_NULL; } } @@ -288,11 +288,11 @@ void mpi_waitany_(int* count, int* requests, int* index, MPI_Status* status, int reqs = xbt_new(MPI_Request, *count); for(i = 0; i < *count; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr = MPI_Waitany(*count, reqs, index, status); if(reqs[*index]==MPI_REQUEST_NULL){ - Request::free_f(requests[*index]); + simgrid::smpi::Request::free_f(requests[*index]); requests[*index]=MPI_FORTRAN_REQUEST_NULL; } xbt_free(reqs); @@ -304,12 +304,12 @@ void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) { reqs = xbt_new(MPI_Request, *count); for(i = 0; i < *count; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr = MPI_Waitall(*count, reqs, FORT_STATUSES_IGNORE(status)); for(i = 0; i < *count; i++) { if(reqs[i]==MPI_REQUEST_NULL){ - Request::free_f(requests[i]); + simgrid::smpi::Request::free_f(requests[i]); requests[i]=MPI_FORTRAN_REQUEST_NULL; } } @@ -318,43 +318,43 @@ void mpi_waitall_(int* count, int* requests, MPI_Status* status, int* ierr) { } void mpi_barrier_(int* comm, int* ierr) { - *ierr = MPI_Barrier(Comm::f2c(*comm)); + *ierr = MPI_Barrier(simgrid::smpi::Comm::f2c(*comm)); } void mpi_bcast_(void *buf, int* count, int* datatype, int* root, int* comm, int* ierr) { - *ierr = MPI_Bcast(buf, *count, Datatype::f2c(*datatype), *root, Comm::f2c(*comm)); + *ierr = MPI_Bcast(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_reduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* root, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); sendbuf = static_cast( FORT_BOTTOM(sendbuf)); recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Reduce(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), *root, Comm::f2c(*comm)); + *ierr = MPI_Reduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_allreduce_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), Comm::f2c(*comm)); + *ierr = MPI_Allreduce(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); } void mpi_reduce_scatter_(void* sendbuf, void* recvbuf, int* recvcounts, int* datatype, int* op, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, Datatype::f2c(*datatype), - Op::f2c(*op), Comm::f2c(*comm)); + *ierr = MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, simgrid::smpi::Datatype::f2c(*datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); } void mpi_scatter_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) { recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); - *ierr = MPI_Scatter(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm)); + *ierr = MPI_Scatter(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_scatterv_(void* sendbuf, int* sendcounts, int* displs, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, int* root, int* comm, int* ierr) { recvbuf = static_cast( FORT_IN_PLACE(recvbuf)); - *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, Datatype::f2c(*sendtype), - recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm)); + *ierr = MPI_Scatterv(sendbuf, sendcounts, displs, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, @@ -362,8 +362,8 @@ void mpi_gather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, in sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Gather(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, *recvcount, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm)); + *ierr = MPI_Gather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype, @@ -371,46 +371,46 @@ void mpi_gatherv_(void* sendbuf, int* sendcount, int* sendtype, sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); sendbuf = sendbuf!=MPI_IN_PLACE ? static_cast( FORT_BOTTOM(sendbuf)) : MPI_IN_PLACE; recvbuf = static_cast( FORT_BOTTOM(recvbuf)); - *ierr = MPI_Gatherv(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, recvcounts, displs, Datatype::f2c(*recvtype), *root, Comm::f2c(*comm)); + *ierr = MPI_Gatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), *root, simgrid::smpi::Comm::f2c(*comm)); } void mpi_allgather_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allgather(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, *recvcount, Datatype::f2c(*recvtype), Comm::f2c(*comm)); + *ierr = MPI_Allgather(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); } void mpi_allgatherv_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcounts,int* displs, int* recvtype, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Allgatherv(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, recvcounts, displs, Datatype::f2c(*recvtype), Comm::f2c(*comm)); + *ierr = MPI_Allgatherv(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, displs, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); } void mpi_scan_(void* sendbuf, void* recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr) { - *ierr = MPI_Scan(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), - Op::f2c(*op), Comm::f2c(*comm)); + *ierr = MPI_Scan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), + simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); } void mpi_alltoall_(void* sendbuf, int* sendcount, int* sendtype, void* recvbuf, int* recvcount, int* recvtype, int* comm, int* ierr) { - *ierr = MPI_Alltoall(sendbuf, *sendcount, Datatype::f2c(*sendtype), - recvbuf, *recvcount, Datatype::f2c(*recvtype), Comm::f2c(*comm)); + *ierr = MPI_Alltoall(sendbuf, *sendcount, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); } void mpi_alltoallv_(void* sendbuf, int* sendcounts, int* senddisps, int* sendtype, void* recvbuf, int* recvcounts, int* recvdisps, int* recvtype, int* comm, int* ierr) { - *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, Datatype::f2c(*sendtype), - recvbuf, recvcounts, recvdisps, Datatype::f2c(*recvtype), Comm::f2c(*comm)); + *ierr = MPI_Alltoallv(sendbuf, sendcounts, senddisps, simgrid::smpi::Datatype::f2c(*sendtype), + recvbuf, recvcounts, recvdisps, simgrid::smpi::Datatype::f2c(*recvtype), simgrid::smpi::Comm::f2c(*comm)); } void mpi_test_ (int * request, int *flag, MPI_Status * status, int* ierr){ - MPI_Request req = Request::f2c(*request); + MPI_Request req = simgrid::smpi::Request::f2c(*request); *ierr= MPI_Test(&req, flag, FORT_STATUS_IGNORE(status)); if(req==MPI_REQUEST_NULL){ - Request::free_f(*request); + simgrid::smpi::Request::free_f(*request); *request=MPI_FORTRAN_REQUEST_NULL; } } @@ -419,12 +419,12 @@ void mpi_testall_ (int* count, int * requests, int *flag, MPI_Status * statuses int i; MPI_Request* reqs = xbt_new(MPI_Request, *count); for(i = 0; i < *count; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr= MPI_Testall(*count, reqs, flag, FORT_STATUSES_IGNORE(statuses)); for(i = 0; i < *count; i++) { if(reqs[i]==MPI_REQUEST_NULL){ - Request::free_f(requests[i]); + simgrid::smpi::Request::free_f(requests[i]); requests[i]=MPI_FORTRAN_REQUEST_NULL; } } @@ -436,25 +436,25 @@ void mpi_get_processor_name_(char *name, int *resultlen, int* ierr){ } void mpi_get_count_(MPI_Status * status, int* datatype, int *count, int* ierr){ - *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), Datatype::f2c(*datatype), count); + *ierr = MPI_Get_count(FORT_STATUS_IGNORE(status), simgrid::smpi::Datatype::f2c(*datatype), count); } void mpi_attr_get_(int* comm, int* keyval, void* attr_value, int* flag, int* ierr ){ - *ierr = MPI_Attr_get(Comm::f2c(*comm), *keyval, attr_value, flag); + *ierr = MPI_Attr_get(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value, flag); } void mpi_type_extent_(int* datatype, MPI_Aint * extent, int* ierr){ - *ierr= MPI_Type_extent(Datatype::f2c(*datatype), extent); + *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); } void mpi_type_commit_(int* datatype, int* ierr){ - MPI_Datatype tmp= Datatype::f2c(*datatype); + MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); *ierr= MPI_Type_commit(&tmp); } void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){ MPI_Datatype tmp; - *ierr= MPI_Type_vector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp); + *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -462,7 +462,7 @@ void mpi_type_vector_(int* count, int* blocklen, int* stride, int* old_type, int void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_type, int* newtype, int* ierr){ MPI_Datatype tmp; - *ierr= MPI_Type_vector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp); + *ierr= MPI_Type_vector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -470,7 +470,7 @@ void mpi_type_create_vector_(int* count, int* blocklen, int* stride, int* old_ty void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ MPI_Datatype tmp; - *ierr= MPI_Type_hvector (*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp); + *ierr= MPI_Type_hvector (*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -478,31 +478,31 @@ void mpi_type_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_typ void mpi_type_create_hvector_(int* count, int* blocklen, MPI_Aint* stride, int* old_type, int* newtype, int* ierr){ MPI_Datatype tmp; - *ierr= MPI_Type_hvector(*count, *blocklen, *stride, Datatype::f2c(*old_type), &tmp); + *ierr= MPI_Type_hvector(*count, *blocklen, *stride, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } } void mpi_type_free_(int* datatype, int* ierr){ - MPI_Datatype tmp= Datatype::f2c(*datatype); + MPI_Datatype tmp= simgrid::smpi::Datatype::f2c(*datatype); *ierr= MPI_Type_free (&tmp); if(*ierr == MPI_SUCCESS) { - F2C::free_f(*datatype); + simgrid::smpi::F2C::free_f(*datatype); } } void mpi_type_ub_(int* datatype, MPI_Aint * disp, int* ierr){ - *ierr= MPI_Type_ub(Datatype::f2c(*datatype), disp); + *ierr= MPI_Type_ub(simgrid::smpi::Datatype::f2c(*datatype), disp); } void mpi_type_lb_(int* datatype, MPI_Aint * extent, int* ierr){ - *ierr= MPI_Type_extent(Datatype::f2c(*datatype), extent); + *ierr= MPI_Type_extent(simgrid::smpi::Datatype::f2c(*datatype), extent); } void mpi_type_size_(int* datatype, int *size, int* ierr) { - *ierr = MPI_Type_size(Datatype::f2c(*datatype), size); + *ierr = MPI_Type_size(simgrid::smpi::Datatype::f2c(*datatype), size); } void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){ @@ -510,39 +510,39 @@ void mpi_error_string_(int* errorcode, char* string, int* resultlen, int* ierr){ } void mpi_win_fence_( int* assert, int* win, int* ierr){ - *ierr = MPI_Win_fence(* assert, Win::f2c(*win)); + *ierr = MPI_Win_fence(* assert, simgrid::smpi::Win::f2c(*win)); } void mpi_win_free_( int* win, int* ierr){ - MPI_Win tmp = Win::f2c(*win); + MPI_Win tmp = simgrid::smpi::Win::f2c(*win); *ierr = MPI_Win_free(&tmp); if(*ierr == MPI_SUCCESS) { - F2C::free_f(*win); + simgrid::smpi::F2C::free_f(*win); } } void mpi_win_create_( int *base, MPI_Aint* size, int* disp_unit, int* info, int* comm, int *win, int* ierr){ MPI_Win tmp; - *ierr = MPI_Win_create( static_cast(base), *size, *disp_unit, Info::f2c(*info), Comm::f2c(*comm),&tmp); + *ierr = MPI_Win_create( static_cast(base), *size, *disp_unit, simgrid::smpi::Info::f2c(*info), simgrid::smpi::Comm::f2c(*comm),&tmp); if(*ierr == MPI_SUCCESS) { *win = tmp->add_f(); } } void mpi_win_post_(int* group, int assert, int* win, int* ierr){ - *ierr = MPI_Win_post(Group::f2c(*group), assert, Win::f2c(*win)); + *ierr = MPI_Win_post(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); } void mpi_win_start_(int* group, int assert, int* win, int* ierr){ - *ierr = MPI_Win_start(Group::f2c(*group), assert, Win::f2c(*win)); + *ierr = MPI_Win_start(simgrid::smpi::Group::f2c(*group), assert, simgrid::smpi::Win::f2c(*win)); } void mpi_win_complete_(int* win, int* ierr){ - *ierr = MPI_Win_complete(Win::f2c(*win)); + *ierr = MPI_Win_complete(simgrid::smpi::Win::f2c(*win)); } void mpi_win_wait_(int* win, int* ierr){ - *ierr = MPI_Win_wait(Win::f2c(*win)); + *ierr = MPI_Win_wait(simgrid::smpi::Win::f2c(*win)); } void mpi_win_set_name_ (int* win, char * name, int* ierr, int size){ @@ -556,12 +556,12 @@ void mpi_win_set_name_ (int* win, char * name, int* ierr, int size){ char* tname = xbt_new(char,size+1); strncpy(tname, name, size); tname[size]='\0'; - *ierr = MPI_Win_set_name(Win::f2c(*win), tname); + *ierr = MPI_Win_set_name(simgrid::smpi::Win::f2c(*win), tname); xbt_free(tname); } void mpi_win_get_name_ (int* win, char * name, int* len, int* ierr){ - *ierr = MPI_Win_get_name(Win::f2c(*win),name,len); + *ierr = MPI_Win_get_name(simgrid::smpi::Win::f2c(*win),name,len); if(*len>0) name[*len]=' ';//blank padding, not \0 } @@ -596,7 +596,7 @@ void mpi_info_set_( int *info, char *key, char *value, int* ierr, unsigned int k strncpy(tvalue, value, valuelen); tvalue[valuelen]='\0'; - *ierr = MPI_Info_set( Info::f2c(*info), tkey, tvalue); + *ierr = MPI_Info_set( simgrid::smpi::Info::f2c(*info), tkey, tvalue); xbt_free(tkey); xbt_free(tvalue); } @@ -611,7 +611,7 @@ void mpi_info_get_ (int* info,char *key,int* valuelen, char *value, int *flag, i char* tkey = xbt_new(char,keylen+1); strncpy(tkey, key, keylen); tkey[keylen]='\0'; - *ierr = MPI_Info_get(Info::f2c(*info),tkey,*valuelen, value, flag); + *ierr = MPI_Info_get(simgrid::smpi::Info::f2c(*info),tkey,*valuelen, value, flag); xbt_free(tkey); if(*flag!=0){ int replace=0; @@ -626,29 +626,29 @@ void mpi_info_get_ (int* info,char *key,int* valuelen, char *value, int *flag, i } void mpi_info_free_(int* info, int* ierr){ - MPI_Info tmp = Info::f2c(*info); + MPI_Info tmp = simgrid::smpi::Info::f2c(*info); *ierr = MPI_Info_free(&tmp); if(*ierr == MPI_SUCCESS) { - F2C::free_f(*info); + simgrid::smpi::F2C::free_f(*info); } } void mpi_get_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ - *ierr = MPI_Get( static_cast(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Win::f2c(*win)); + *ierr = MPI_Get( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); } void mpi_accumulate_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* op, int* win, int* ierr){ - *ierr = MPI_Accumulate( static_cast(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Op::f2c(*op), Win::f2c(*win)); + *ierr = MPI_Accumulate( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Win::f2c(*win)); } void mpi_put_( int *origin_addr, int* origin_count, int* origin_datatype, int *target_rank, MPI_Aint* target_disp, int *target_count, int* tarsmpi_type_f2c, int* win, int* ierr){ - *ierr = MPI_Put( static_cast(origin_addr),*origin_count, Datatype::f2c(*origin_datatype),*target_rank, - *target_disp, *target_count,Datatype::f2c(*tarsmpi_type_f2c), Win::f2c(*win)); + *ierr = MPI_Put( static_cast(origin_addr),*origin_count, simgrid::smpi::Datatype::f2c(*origin_datatype),*target_rank, + *target_disp, *target_count, simgrid::smpi::Datatype::f2c(*tarsmpi_type_f2c), simgrid::smpi::Win::f2c(*win)); } //following are automatically generated, and have to be checked @@ -685,7 +685,7 @@ void mpi_get_address_ (void *location, MPI_Aint * address, int* ierr){ void mpi_type_dup_ (int* datatype, int* newdatatype, int* ierr){ MPI_Datatype tmp; - *ierr = MPI_Type_dup(Datatype::f2c(*datatype), &tmp); + *ierr = MPI_Type_dup(simgrid::smpi::Datatype::f2c(*datatype), &tmp); if(*ierr == MPI_SUCCESS) { *newdatatype = tmp->add_f(); } @@ -695,29 +695,29 @@ void mpi_type_set_name_ (int* datatype, char * name, int* ierr, int size){ char* tname = xbt_new(char, size+1); strncpy(tname, name, size); tname[size]='\0'; - *ierr = MPI_Type_set_name(Datatype::f2c(*datatype), tname); + *ierr = MPI_Type_set_name(simgrid::smpi::Datatype::f2c(*datatype), tname); xbt_free(tname); } void mpi_type_get_name_ (int* datatype, char * name, int* len, int* ierr){ - *ierr = MPI_Type_get_name(Datatype::f2c(*datatype),name,len); + *ierr = MPI_Type_get_name(simgrid::smpi::Datatype::f2c(*datatype),name,len); if(*len>0) name[*len]=' '; } void mpi_type_get_attr_ (int* type, int* type_keyval, void *attribute_val, int* flag, int* ierr){ - *ierr = MPI_Type_get_attr ( Datatype::f2c(*type), *type_keyval, attribute_val,flag); + *ierr = MPI_Type_get_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val,flag); } void mpi_type_set_attr_ (int* type, int* type_keyval, void *attribute_val, int* ierr){ - *ierr = MPI_Type_set_attr ( Datatype::f2c(*type), *type_keyval, attribute_val); + *ierr = MPI_Type_set_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval, attribute_val); } void mpi_type_delete_attr_ (int* type, int* type_keyval, int* ierr){ - *ierr = MPI_Type_delete_attr ( Datatype::f2c(*type), *type_keyval); + *ierr = MPI_Type_delete_attr ( simgrid::smpi::Datatype::f2c(*type), *type_keyval); } void mpi_type_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ @@ -736,12 +736,12 @@ void mpi_pcontrol_ (int* level , int* ierr){ void mpi_type_get_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ - *ierr = MPI_Type_get_extent(Datatype::f2c(*datatype), lb, extent); + *ierr = MPI_Type_get_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); } void mpi_type_get_true_extent_ (int* datatype, MPI_Aint * lb, MPI_Aint * extent, int* ierr){ - *ierr = MPI_Type_get_true_extent(Datatype::f2c(*datatype), lb, extent); + *ierr = MPI_Type_get_true_extent(simgrid::smpi::Datatype::f2c(*datatype), lb, extent); } void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){ @@ -754,45 +754,45 @@ void mpi_op_create_ (void * function, int* commute, int* op, int* ierr){ } void mpi_op_free_ (int* op, int* ierr){ - MPI_Op tmp=Op::f2c(*op); + MPI_Op tmp= simgrid::smpi::Op::f2c(*op); *ierr = MPI_Op_free(& tmp); if(*ierr == MPI_SUCCESS) { - F2C::free_f(*op); + simgrid::smpi::F2C::free_f(*op); } } void mpi_group_free_ (int* group, int* ierr){ - MPI_Group tmp=Group::f2c(*group); + MPI_Group tmp = simgrid::smpi::Group::f2c(*group); *ierr = MPI_Group_free(&tmp); if(*ierr == MPI_SUCCESS) { - F2C::free_f(*group); + simgrid::smpi::F2C::free_f(*group); } } void mpi_group_size_ (int* group, int *size, int* ierr){ - *ierr = MPI_Group_size(Group::f2c(*group), size); + *ierr = MPI_Group_size(simgrid::smpi::Group::f2c(*group), size); } void mpi_group_rank_ (int* group, int *rank, int* ierr){ - *ierr = MPI_Group_rank(Group::f2c(*group), rank); + *ierr = MPI_Group_rank(simgrid::smpi::Group::f2c(*group), rank); } void mpi_group_translate_ranks_ (int* group1, int* n, int *ranks1, int* group2, int *ranks2, int* ierr) { - *ierr = MPI_Group_translate_ranks(Group::f2c(*group1), *n, ranks1, Group::f2c(*group2), ranks2); + *ierr = MPI_Group_translate_ranks(simgrid::smpi::Group::f2c(*group1), *n, ranks1, simgrid::smpi::Group::f2c(*group2), ranks2); } void mpi_group_compare_ (int* group1, int* group2, int *result, int* ierr){ - *ierr = MPI_Group_compare(Group::f2c(*group1), Group::f2c(*group2), result); + *ierr = MPI_Group_compare(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), result); } void mpi_group_union_ (int* group1, int* group2, int* newgroup, int* ierr){ MPI_Group tmp; - *ierr = MPI_Group_union(Group::f2c(*group1), Group::f2c(*group2), &tmp); + *ierr = MPI_Group_union(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -800,7 +800,7 @@ void mpi_group_union_ (int* group1, int* group2, int* newgroup, int* ierr){ void mpi_group_intersection_ (int* group1, int* group2, int* newgroup, int* ierr){ MPI_Group tmp; - *ierr = MPI_Group_intersection(Group::f2c(*group1), Group::f2c(*group2), &tmp); + *ierr = MPI_Group_intersection(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -808,7 +808,7 @@ void mpi_group_intersection_ (int* group1, int* group2, int* newgroup, int* ierr void mpi_group_difference_ (int* group1, int* group2, int* newgroup, int* ierr){ MPI_Group tmp; - *ierr = MPI_Group_difference(Group::f2c(*group1), Group::f2c(*group2), &tmp); + *ierr = MPI_Group_difference(simgrid::smpi::Group::f2c(*group1), simgrid::smpi::Group::f2c(*group2), &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -816,7 +816,7 @@ void mpi_group_difference_ (int* group1, int* group2, int* newgroup, int* ierr){ void mpi_group_excl_ (int* group, int* n, int *ranks, int* newgroup, int* ierr){ MPI_Group tmp; - *ierr = MPI_Group_excl(Group::f2c(*group), *n, ranks, &tmp); + *ierr = MPI_Group_excl(simgrid::smpi::Group::f2c(*group), *n, ranks, &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -825,7 +825,7 @@ void mpi_group_excl_ (int* group, int* n, int *ranks, int* newgroup, int* ierr){ void mpi_group_range_incl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) { MPI_Group tmp; - *ierr = MPI_Group_range_incl(Group::f2c(*group), *n, ranges, &tmp); + *ierr = MPI_Group_range_incl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -834,7 +834,7 @@ void mpi_group_range_incl_ (int* group, int* n, int ranges[][3], int* newgroup, void mpi_group_range_excl_ (int* group, int* n, int ranges[][3], int* newgroup, int* ierr) { MPI_Group tmp; - *ierr = MPI_Group_range_excl(Group::f2c(*group), *n, ranges, &tmp); + *ierr = MPI_Group_range_excl(simgrid::smpi::Group::f2c(*group), *n, ranges, &tmp); if(*ierr == MPI_SUCCESS) { *newgroup = tmp->add_f(); } @@ -842,17 +842,17 @@ void mpi_group_range_excl_ (int* group, int* n, int ranges[][3], int* newgroup, void mpi_comm_get_attr_ (int* comm, int* comm_keyval, void *attribute_val, int *flag, int* ierr){ - *ierr = MPI_Comm_get_attr (Comm::f2c(*comm), *comm_keyval, attribute_val, flag); + *ierr = MPI_Comm_get_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val, flag); } void mpi_comm_set_attr_ (int* comm, int* comm_keyval, void *attribute_val, int* ierr){ - *ierr = MPI_Comm_set_attr ( Comm::f2c(*comm), *comm_keyval, attribute_val); + *ierr = MPI_Comm_set_attr ( simgrid::smpi::Comm::f2c(*comm), *comm_keyval, attribute_val); } void mpi_comm_delete_attr_ (int* comm, int* comm_keyval, int* ierr){ - *ierr = MPI_Comm_delete_attr (Comm::f2c(*comm), *comm_keyval); + *ierr = MPI_Comm_delete_attr (simgrid::smpi::Comm::f2c(*comm), *comm_keyval); } void mpi_comm_create_keyval_ (void* copy_fn, void* delete_fn, int* keyval, void* extra_state, int* ierr){ @@ -866,37 +866,37 @@ void mpi_comm_free_keyval_ (int* keyval, int* ierr) { } void mpi_comm_get_name_ (int* comm, char* name, int* len, int* ierr){ - *ierr = MPI_Comm_get_name(Comm::f2c(*comm), name, len); + *ierr = MPI_Comm_get_name(simgrid::smpi::Comm::f2c(*comm), name, len); if(*len>0) name[*len]=' '; } void mpi_comm_compare_ (int* comm1, int* comm2, int *result, int* ierr){ - *ierr = MPI_Comm_compare(Comm::f2c(*comm1), Comm::f2c(*comm2), result); + *ierr = MPI_Comm_compare(simgrid::smpi::Comm::f2c(*comm1), simgrid::smpi::Comm::f2c(*comm2), result); } void mpi_comm_disconnect_ (int* comm, int* ierr){ - MPI_Comm tmp=Comm::f2c(*comm); + MPI_Comm tmp = simgrid::smpi::Comm::f2c(*comm); *ierr = MPI_Comm_disconnect(&tmp); if(*ierr == MPI_SUCCESS) { - Comm::free_f(*comm); + simgrid::smpi::Comm::free_f(*comm); } } void mpi_request_free_ (int* request, int* ierr){ - MPI_Request tmp=Request::f2c(*request); + MPI_Request tmp=simgrid::smpi::Request::f2c(*request); *ierr = MPI_Request_free(&tmp); if(*ierr == MPI_SUCCESS) { - Request::free_f(*request); + simgrid::smpi::Request::free_f(*request); } } void mpi_sendrecv_replace_ (void *buf, int* count, int* datatype, int* dst, int* sendtag, int* src, int* recvtag, int* comm, MPI_Status* status, int* ierr) { - *ierr = MPI_Sendrecv_replace(buf, *count, Datatype::f2c(*datatype), *dst, *sendtag, *src, - *recvtag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); + *ierr = MPI_Sendrecv_replace(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dst, *sendtag, *src, + *recvtag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); } void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* status, int* ierr) @@ -906,11 +906,11 @@ void mpi_testany_ (int* count, int* requests, int *index, int *flag, MPI_Status* reqs = xbt_new(MPI_Request, *count); for(i = 0; i < *count; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr = MPI_Testany(*count, reqs, index, flag, FORT_STATUS_IGNORE(status)); if(*index!=MPI_UNDEFINED && reqs[*index]==MPI_REQUEST_NULL){ - Request::free_f(requests[*index]); + simgrid::smpi::Request::free_f(requests[*index]); requests[*index]=MPI_FORTRAN_REQUEST_NULL; } xbt_free(reqs); @@ -923,12 +923,12 @@ void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MP reqs = xbt_new(MPI_Request, *incount); for(i = 0; i < *incount; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); } *ierr = MPI_Waitsome(*incount, reqs, outcount, indices, status); for(i=0;i<*outcount;i++){ if(reqs[indices[i]]==MPI_REQUEST_NULL){ - Request::free_f(requests[indices[i]]); + simgrid::smpi::Request::free_f(requests[indices[i]]); requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; } } @@ -937,91 +937,91 @@ void mpi_waitsome_ (int* incount, int* requests, int *outcount, int *indices, MP void mpi_reduce_local_ (void *inbuf, void *inoutbuf, int* count, int* datatype, int* op, int* ierr){ - *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op)); + *ierr = MPI_Reduce_local(inbuf, inoutbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op)); } void mpi_reduce_scatter_block_ (void *sendbuf, void *recvbuf, int* recvcount, int* datatype, int* op, int* comm, int* ierr) { sendbuf = static_cast( FORT_IN_PLACE(sendbuf)); - *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, Datatype::f2c(*datatype), Op::f2c(*op), - Comm::f2c(*comm)); + *ierr = MPI_Reduce_scatter_block(sendbuf, recvbuf, *recvcount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), + simgrid::smpi::Comm::f2c(*comm)); } void mpi_pack_size_ (int* incount, int* datatype, int* comm, int* size, int* ierr) { - *ierr = MPI_Pack_size(*incount, Datatype::f2c(*datatype), Comm::f2c(*comm), size); + *ierr = MPI_Pack_size(*incount, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Comm::f2c(*comm), size); } void mpi_cart_coords_ (int* comm, int* rank, int* maxdims, int* coords, int* ierr) { - *ierr = MPI_Cart_coords(Comm::f2c(*comm), *rank, *maxdims, coords); + *ierr = MPI_Cart_coords(simgrid::smpi::Comm::f2c(*comm), *rank, *maxdims, coords); } void mpi_cart_create_ (int* comm_old, int* ndims, int* dims, int* periods, int* reorder, int* comm_cart, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Cart_create(Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp); + *ierr = MPI_Cart_create(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, *reorder, &tmp); if(*ierr == MPI_SUCCESS) { *comm_cart = tmp->add_f(); } } void mpi_cart_get_ (int* comm, int* maxdims, int* dims, int* periods, int* coords, int* ierr) { - *ierr = MPI_Cart_get(Comm::f2c(*comm), *maxdims, dims, periods, coords); + *ierr = MPI_Cart_get(simgrid::smpi::Comm::f2c(*comm), *maxdims, dims, periods, coords); } void mpi_cart_map_ (int* comm_old, int* ndims, int* dims, int* periods, int* newrank, int* ierr) { - *ierr = MPI_Cart_map(Comm::f2c(*comm_old), *ndims, dims, periods, newrank); + *ierr = MPI_Cart_map(simgrid::smpi::Comm::f2c(*comm_old), *ndims, dims, periods, newrank); } void mpi_cart_rank_ (int* comm, int* coords, int* rank, int* ierr) { - *ierr = MPI_Cart_rank(Comm::f2c(*comm), coords, rank); + *ierr = MPI_Cart_rank(simgrid::smpi::Comm::f2c(*comm), coords, rank); } void mpi_cart_shift_ (int* comm, int* direction, int* displ, int* source, int* dest, int* ierr) { - *ierr = MPI_Cart_shift(Comm::f2c(*comm), *direction, *displ, source, dest); + *ierr = MPI_Cart_shift(simgrid::smpi::Comm::f2c(*comm), *direction, *displ, source, dest); } void mpi_cart_sub_ (int* comm, int* remain_dims, int* comm_new, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Cart_sub(Comm::f2c(*comm), remain_dims, &tmp); + *ierr = MPI_Cart_sub(simgrid::smpi::Comm::f2c(*comm), remain_dims, &tmp); if(*ierr == MPI_SUCCESS) { *comm_new = tmp->add_f(); } } void mpi_cartdim_get_ (int* comm, int* ndims, int* ierr) { - *ierr = MPI_Cartdim_get(Comm::f2c(*comm), ndims); + *ierr = MPI_Cartdim_get(simgrid::smpi::Comm::f2c(*comm), ndims); } void mpi_graph_create_ (int* comm_old, int* nnodes, int* index, int* edges, int* reorder, int* comm_graph, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Graph_create(Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp); + *ierr = MPI_Graph_create(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, *reorder, &tmp); if(*ierr == MPI_SUCCESS) { *comm_graph = tmp->add_f(); } } void mpi_graph_get_ (int* comm, int* maxindex, int* maxedges, int* index, int* edges, int* ierr) { - *ierr = MPI_Graph_get(Comm::f2c(*comm), *maxindex, *maxedges, index, edges); + *ierr = MPI_Graph_get(simgrid::smpi::Comm::f2c(*comm), *maxindex, *maxedges, index, edges); } void mpi_graph_map_ (int* comm_old, int* nnodes, int* index, int* edges, int* newrank, int* ierr) { - *ierr = MPI_Graph_map(Comm::f2c(*comm_old), *nnodes, index, edges, newrank); + *ierr = MPI_Graph_map(simgrid::smpi::Comm::f2c(*comm_old), *nnodes, index, edges, newrank); } void mpi_graph_neighbors_ (int* comm, int* rank, int* maxneighbors, int* neighbors, int* ierr) { - *ierr = MPI_Graph_neighbors(Comm::f2c(*comm), *rank, *maxneighbors, neighbors); + *ierr = MPI_Graph_neighbors(simgrid::smpi::Comm::f2c(*comm), *rank, *maxneighbors, neighbors); } void mpi_graph_neighbors_count_ (int* comm, int* rank, int* nneighbors, int* ierr) { - *ierr = MPI_Graph_neighbors_count(Comm::f2c(*comm), *rank, nneighbors); + *ierr = MPI_Graph_neighbors_count(simgrid::smpi::Comm::f2c(*comm), *rank, nneighbors); } void mpi_graphdims_get_ (int* comm, int* nnodes, int* nedges, int* ierr) { - *ierr = MPI_Graphdims_get(Comm::f2c(*comm), nnodes, nedges); + *ierr = MPI_Graphdims_get(simgrid::smpi::Comm::f2c(*comm), nnodes, nedges); } void mpi_topo_test_ (int* comm, int* top_type, int* ierr) { - *ierr = MPI_Topo_test(Comm::f2c(*comm), top_type); + *ierr = MPI_Topo_test(simgrid::smpi::Comm::f2c(*comm), top_type); } void mpi_error_class_ (int* errorcode, int* errorclass, int* ierr) { @@ -1037,31 +1037,31 @@ void mpi_errhandler_free_ (void* errhandler, int* ierr) { } void mpi_errhandler_get_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_get(Comm::f2c(*comm), static_cast(errhandler)); + *ierr = MPI_Errhandler_get(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); } void mpi_errhandler_set_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(Comm::f2c(*comm), *static_cast(errhandler)); + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); } void mpi_comm_set_errhandler_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(Comm::f2c(*comm), *static_cast(errhandler)); + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), *static_cast(errhandler)); } void mpi_comm_get_errhandler_ (int* comm, void* errhandler, int* ierr) { - *ierr = MPI_Errhandler_set(Comm::f2c(*comm), static_cast(errhandler)); + *ierr = MPI_Errhandler_set(simgrid::smpi::Comm::f2c(*comm), static_cast(errhandler)); } void mpi_type_contiguous_ (int* count, int* old_type, int* newtype, int* ierr) { MPI_Datatype tmp; - *ierr = MPI_Type_contiguous(*count, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_contiguous(*count, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } } void mpi_cancel_ (int* request, int* ierr) { - MPI_Request tmp=Request::f2c(*request); + MPI_Request tmp=simgrid::smpi::Request::f2c(*request); *ierr = MPI_Cancel(&tmp); } @@ -1079,13 +1079,13 @@ void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, M reqs = xbt_new(MPI_Request, *incount); for(i = 0; i < *incount; i++) { - reqs[i] = Request::f2c(requests[i]); + reqs[i] = simgrid::smpi::Request::f2c(requests[i]); indices[i]=0; } *ierr = MPI_Testsome(*incount, reqs, outcount, indices, FORT_STATUSES_IGNORE(statuses)); for(i=0;i<*incount;i++){ if(indices[i] && reqs[indices[i]]==MPI_REQUEST_NULL){ - Request::free_f(requests[indices[i]]); + simgrid::smpi::Request::free_f(requests[indices[i]]); requests[indices[i]]=MPI_FORTRAN_REQUEST_NULL; } } @@ -1093,31 +1093,31 @@ void mpi_testsome_ (int* incount, int* requests, int* outcount, int* indices, M } void mpi_comm_test_inter_ (int* comm, int* flag, int* ierr) { - *ierr = MPI_Comm_test_inter(Comm::f2c(*comm), flag); + *ierr = MPI_Comm_test_inter(simgrid::smpi::Comm::f2c(*comm), flag); } void mpi_unpack_ (void* inbuf, int* insize, int* position, void* outbuf, int* outcount, int* type, int* comm, int* ierr) { - *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, Datatype::f2c(*type), Comm::f2c(*comm)); + *ierr = MPI_Unpack(inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*type), simgrid::smpi::Comm::f2c(*comm)); } void mpi_pack_external_size_ (char *datarep, int* incount, int* datatype, MPI_Aint *size, int* ierr){ - *ierr = MPI_Pack_external_size(datarep, *incount, Datatype::f2c(*datatype), size); + *ierr = MPI_Pack_external_size(datarep, *incount, simgrid::smpi::Datatype::f2c(*datatype), size); } void mpi_pack_external_ (char *datarep, void *inbuf, int* incount, int* datatype, void *outbuf, MPI_Aint* outcount, MPI_Aint *position, int* ierr){ - *ierr = MPI_Pack_external(datarep, inbuf, *incount, Datatype::f2c(*datatype), outbuf, *outcount, position); + *ierr = MPI_Pack_external(datarep, inbuf, *incount, simgrid::smpi::Datatype::f2c(*datatype), outbuf, *outcount, position); } void mpi_unpack_external_ ( char *datarep, void *inbuf, MPI_Aint* insize, MPI_Aint *position, void *outbuf, int* outcount, int* datatype, int* ierr){ - *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, Datatype::f2c(*datatype)); + *ierr = MPI_Unpack_external( datarep, inbuf, *insize, position, outbuf, *outcount, simgrid::smpi::Datatype::f2c(*datatype)); } void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) { MPI_Datatype tmp; - *ierr = MPI_Type_hindexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1125,7 +1125,7 @@ void mpi_type_hindexed_ (int* count, int* blocklens, MPI_Aint* indices, int* old void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, int* old_type, int* newtype, int* ierr){ MPI_Datatype tmp; - *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_create_hindexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1134,7 +1134,7 @@ void mpi_type_create_hindexed_(int* count, int* blocklens, MPI_Aint* indices, in void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* indices, int* old_type, int* newtype, int* ierr) { MPI_Datatype tmp; - *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_create_hindexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1142,7 +1142,7 @@ void mpi_type_create_hindexed_block_ (int* count, int* blocklength, MPI_Aint* in void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, int* newtype, int* ierr) { MPI_Datatype tmp; - *ierr = MPI_Type_indexed(*count, blocklens, indices, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_indexed(*count, blocklens, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1151,7 +1151,7 @@ void mpi_type_indexed_ (int* count, int* blocklens, int* indices, int* old_type, void mpi_type_create_indexed_block_ (int* count, int* blocklength, int* indices, int* old_type, int*newtype, int* ierr){ MPI_Datatype tmp; - *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, Datatype::f2c(*old_type), &tmp); + *ierr = MPI_Type_create_indexed_block(*count, *blocklength, indices, simgrid::smpi::Datatype::f2c(*old_type), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1162,7 +1162,7 @@ void mpi_type_struct_ (int* count, int* blocklens, MPI_Aint* indices, int* old_t int i=0; MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); for(i=0; i< *count; i++){ - types[i] = Datatype::f2c(old_types[i]); + types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); } *ierr = MPI_Type_struct(*count, blocklens, indices, types, &tmp); if(*ierr == MPI_SUCCESS) { @@ -1176,7 +1176,7 @@ void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* int i=0; MPI_Datatype* types = static_cast(xbt_malloc(*count*sizeof(MPI_Datatype))); for(i=0; i< *count; i++){ - types[i] = Datatype::f2c(old_types[i]); + types[i] = simgrid::smpi::Datatype::f2c(old_types[i]); } *ierr = MPI_Type_create_struct(*count, blocklens, indices, types, &tmp); if(*ierr == MPI_SUCCESS) { @@ -1186,12 +1186,12 @@ void mpi_type_create_struct_(int* count, int* blocklens, MPI_Aint* indices, int* } void mpi_ssend_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* ierr) { - *ierr = MPI_Ssend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm)); + *ierr = MPI_Ssend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); } void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, int* comm, int* request, int* ierr) { MPI_Request tmp; - *ierr = MPI_Ssend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp); + *ierr = MPI_Ssend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *request = tmp->add_f(); } @@ -1200,7 +1200,7 @@ void mpi_ssend_init_ (void* buf, int* count, int* datatype, int* dest, int* tag, void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, int* remote_leader, int* tag, int* comm_out, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Intercomm_create(Comm::f2c(*local_comm), *local_leader,Comm::f2c(*peer_comm), *remote_leader, + *ierr = MPI_Intercomm_create(simgrid::smpi::Comm::f2c(*local_comm), *local_leader, simgrid::smpi::Comm::f2c(*peer_comm), *remote_leader, *tag, &tmp); if(*ierr == MPI_SUCCESS) { *comm_out = tmp->add_f(); @@ -1209,19 +1209,19 @@ void mpi_intercomm_create_ (int* local_comm, int *local_leader, int* peer_comm, void mpi_intercomm_merge_ (int* comm, int* high, int* comm_out, int* ierr) { MPI_Comm tmp; - *ierr = MPI_Intercomm_merge(Comm::f2c(*comm), *high, &tmp); + *ierr = MPI_Intercomm_merge(simgrid::smpi::Comm::f2c(*comm), *high, &tmp); if(*ierr == MPI_SUCCESS) { *comm_out = tmp->add_f(); } } void mpi_bsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* ierr) { - *ierr = MPI_Bsend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm)); + *ierr = MPI_Bsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm)); } void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { MPI_Request tmp; - *ierr = MPI_Bsend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp); + *ierr = MPI_Bsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *request = tmp->add_f(); } @@ -1229,7 +1229,7 @@ void mpi_bsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { MPI_Request tmp; - *ierr = MPI_Ibsend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp); + *ierr = MPI_Ibsend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *request = tmp->add_f(); } @@ -1237,39 +1237,39 @@ void mpi_ibsend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int void mpi_comm_remote_group_ (int* comm, int* group, int* ierr) { MPI_Group tmp; - *ierr = MPI_Comm_remote_group(Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_remote_group(simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *group = tmp->c2f(); } } void mpi_comm_remote_size_ (int* comm, int* size, int* ierr) { - *ierr = MPI_Comm_remote_size(Comm::f2c(*comm), size); + *ierr = MPI_Comm_remote_size(simgrid::smpi::Comm::f2c(*comm), size); } void mpi_issend_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { MPI_Request tmp; - *ierr = MPI_Issend(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp); + *ierr = MPI_Issend(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *request = tmp->add_f(); } } void mpi_probe_ (int* source, int* tag, int* comm, MPI_Status* status, int* ierr) { - *ierr = MPI_Probe(*source, *tag, Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); + *ierr = MPI_Probe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), FORT_STATUS_IGNORE(status)); } void mpi_attr_delete_ (int* comm, int* keyval, int* ierr) { - *ierr = MPI_Attr_delete(Comm::f2c(*comm), *keyval); + *ierr = MPI_Attr_delete(simgrid::smpi::Comm::f2c(*comm), *keyval); } void mpi_attr_put_ (int* comm, int* keyval, void* attr_value, int* ierr) { - *ierr = MPI_Attr_put(Comm::f2c(*comm), *keyval, attr_value); + *ierr = MPI_Attr_put(simgrid::smpi::Comm::f2c(*comm), *keyval, attr_value); } void mpi_rsend_init_ (void* buf, int* count, int* datatype, int *dest, int* tag, int* comm, int* request, int* ierr) { MPI_Request tmp; - *ierr = MPI_Rsend_init(buf, *count, Datatype::f2c(*datatype), *dest, *tag, Comm::f2c(*comm), &tmp); + *ierr = MPI_Rsend_init(buf, *count, simgrid::smpi::Datatype::f2c(*datatype), *dest, *tag, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *request = tmp->add_f(); } @@ -1288,11 +1288,11 @@ void mpi_test_cancelled_ (MPI_Status* status, int* flag, int* ierr) { } void mpi_pack_ (void* inbuf, int* incount, int* type, void* outbuf, int* outcount, int* position, int* comm, int* ierr) { - *ierr = MPI_Pack(inbuf, *incount, Datatype::f2c(*type), outbuf, *outcount, position, Comm::f2c(*comm)); + *ierr = MPI_Pack(inbuf, *incount, simgrid::smpi::Datatype::f2c(*type), outbuf, *outcount, position, simgrid::smpi::Comm::f2c(*comm)); } void mpi_get_elements_ (MPI_Status* status, int* datatype, int* elements, int* ierr) { - *ierr = MPI_Get_elements(status, Datatype::f2c(*datatype), elements); + *ierr = MPI_Get_elements(status, simgrid::smpi::Datatype::f2c(*datatype), elements); } void mpi_dims_create_ (int* nnodes, int* ndims, int* dims, int* ierr) { @@ -1300,19 +1300,19 @@ void mpi_dims_create_ (int* nnodes, int* ndims, int* dims, int* ierr) { } void mpi_iprobe_ (int* source, int* tag, int* comm, int* flag, MPI_Status* status, int* ierr) { - *ierr = MPI_Iprobe(*source, *tag, Comm::f2c(*comm), flag, status); + *ierr = MPI_Iprobe(*source, *tag, simgrid::smpi::Comm::f2c(*comm), flag, status); } void mpi_type_get_envelope_ ( int* datatype, int *num_integers, int *num_addresses, int *num_datatypes, int *combiner, int* ierr){ - *ierr = MPI_Type_get_envelope( Datatype::f2c(*datatype), num_integers, + *ierr = MPI_Type_get_envelope( simgrid::smpi::Datatype::f2c(*datatype), num_integers, num_addresses, num_datatypes, combiner); } void mpi_type_get_contents_ (int* datatype, int* max_integers, int* max_addresses, int* max_datatypes, int* array_of_integers, MPI_Aint* array_of_addresses, int* array_of_datatypes, int* ierr){ - *ierr = MPI_Type_get_contents(Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes, + *ierr = MPI_Type_get_contents(simgrid::smpi::Datatype::f2c(*datatype), *max_integers, *max_addresses,*max_datatypes, array_of_integers, array_of_addresses, reinterpret_cast(array_of_datatypes)); } @@ -1322,7 +1322,7 @@ void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gs MPI_Datatype tmp; *ierr = MPI_Type_create_darray(*size, *rank, *ndims, array_of_gsizes, array_of_distribs, array_of_dargs, array_of_psizes, - *order, Datatype::f2c(*oldtype), &tmp) ; + *order, simgrid::smpi::Datatype::f2c(*oldtype), &tmp) ; if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1330,7 +1330,7 @@ void mpi_type_create_darray_ (int* size, int* rank, int* ndims, int* array_of_gs void mpi_type_create_resized_ (int* oldtype,MPI_Aint* lb, MPI_Aint* extent, int*newtype, int* ierr){ MPI_Datatype tmp; - *ierr = MPI_Type_create_resized(Datatype::f2c(*oldtype),*lb, *extent, &tmp); + *ierr = MPI_Type_create_resized(simgrid::smpi::Datatype::f2c(*oldtype),*lb, *extent, &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1340,7 +1340,7 @@ void mpi_type_create_subarray_ (int* ndims,int *array_of_sizes, int *array_of_su int* order, int* oldtype, int*newtype, int* ierr){ MPI_Datatype tmp; *ierr = MPI_Type_create_subarray(*ndims,array_of_sizes, array_of_subsizes, array_of_starts, *order, - Datatype::f2c(*oldtype), &tmp); + simgrid::smpi::Datatype::f2c(*oldtype), &tmp); if(*ierr == MPI_SUCCESS) { *newtype = tmp->add_f(); } @@ -1357,24 +1357,24 @@ void mpi_type_match_size_ (int* typeclass,int* size,int* datatype, int* ierr){ void mpi_alltoallw_ ( void *sendbuf, int *sendcnts, int *sdispls, int* sendtypes, void *recvbuf, int *recvcnts, int *rdispls, int* recvtypes, int* comm, int* ierr){ *ierr = MPI_Alltoallw( sendbuf, sendcnts, sdispls, reinterpret_cast(sendtypes), recvbuf, recvcnts, rdispls, - reinterpret_cast(recvtypes), Comm::f2c(*comm)); + reinterpret_cast(recvtypes), simgrid::smpi::Comm::f2c(*comm)); } void mpi_exscan_ (void *sendbuf, void *recvbuf, int* count, int* datatype, int* op, int* comm, int* ierr){ - *ierr = MPI_Exscan(sendbuf, recvbuf, *count, Datatype::f2c(*datatype), Op::f2c(*op), Comm::f2c(*comm)); + *ierr = MPI_Exscan(sendbuf, recvbuf, *count, simgrid::smpi::Datatype::f2c(*datatype), simgrid::smpi::Op::f2c(*op), simgrid::smpi::Comm::f2c(*comm)); } void mpi_comm_set_name_ (int* comm, char* name, int* ierr, int size){ char* tname = xbt_new(char, size+1); strncpy(tname, name, size); tname[size]='\0'; - *ierr = MPI_Comm_set_name (Comm::f2c(*comm), tname); + *ierr = MPI_Comm_set_name (simgrid::smpi::Comm::f2c(*comm), tname); xbt_free(tname); } void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){ MPI_Comm tmp; - *ierr = MPI_Comm_dup_with_info(Comm::f2c(*comm),Info::f2c(*info),&tmp); + *ierr = MPI_Comm_dup_with_info(simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info),&tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } @@ -1382,19 +1382,19 @@ void mpi_comm_dup_with_info_ (int* comm, int* info, int* newcomm, int* ierr){ void mpi_comm_split_type_ (int* comm, int* split_type, int* key, int* info, int* newcomm, int* ierr){ MPI_Comm tmp; - *ierr = MPI_Comm_split_type(Comm::f2c(*comm), *split_type, *key, Info::f2c(*info), &tmp); + *ierr = MPI_Comm_split_type(simgrid::smpi::Comm::f2c(*comm), *split_type, *key, simgrid::smpi::Info::f2c(*info), &tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } } void mpi_comm_set_info_ (int* comm, int* info, int* ierr){ - *ierr = MPI_Comm_set_info (Comm::f2c(*comm), Info::f2c(*info)); + *ierr = MPI_Comm_set_info (simgrid::smpi::Comm::f2c(*comm), simgrid::smpi::Info::f2c(*info)); } void mpi_comm_get_info_ (int* comm, int* info, int* ierr){ MPI_Info tmp; - *ierr = MPI_Comm_get_info (Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_get_info (simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr==MPI_SUCCESS){ *info = tmp->c2f(); } @@ -1417,12 +1417,12 @@ void mpi_add_error_string_ ( int* errorcode, char *string, int* ierr){ } void mpi_comm_call_errhandler_ (int* comm,int* errorcode, int* ierr){ - *ierr = MPI_Comm_call_errhandler(Comm::f2c(*comm), *errorcode); + *ierr = MPI_Comm_call_errhandler(simgrid::smpi::Comm::f2c(*comm), *errorcode); } void mpi_info_dup_ (int* info, int* newinfo, int* ierr){ MPI_Info tmp; - *ierr = MPI_Info_dup(Info::f2c(*info), &tmp); + *ierr = MPI_Info_dup(simgrid::smpi::Info::f2c(*info), &tmp); if(*ierr==MPI_SUCCESS){ *newinfo= tmp->add_f(); } @@ -1438,7 +1438,7 @@ void mpi_info_get_valuelen_ ( int* info, char *key, int *valuelen, int *flag, in char* tkey = xbt_new(char, keylen+1); strncpy(tkey, key, keylen); tkey[keylen]='\0'; - *ierr = MPI_Info_get_valuelen( Info::f2c(*info), tkey, valuelen, flag); + *ierr = MPI_Info_get_valuelen( simgrid::smpi::Info::f2c(*info), tkey, valuelen, flag); xbt_free(tkey); } @@ -1452,16 +1452,16 @@ void mpi_info_delete_ (int* info, char *key, int* ierr, unsigned int keylen){ char* tkey = xbt_new(char, keylen+1); strncpy(tkey, key, keylen); tkey[keylen]='\0'; - *ierr = MPI_Info_delete(Info::f2c(*info), tkey); + *ierr = MPI_Info_delete(simgrid::smpi::Info::f2c(*info), tkey); xbt_free(tkey); } void mpi_info_get_nkeys_ ( int* info, int *nkeys, int* ierr){ - *ierr = MPI_Info_get_nkeys( Info::f2c(*info), nkeys); + *ierr = MPI_Info_get_nkeys( simgrid::smpi::Info::f2c(*info), nkeys); } void mpi_info_get_nthkey_ ( int* info, int* n, char *key, int* ierr, unsigned int keylen){ - *ierr = MPI_Info_get_nthkey( Info::f2c(*info), *n, key); + *ierr = MPI_Info_get_nthkey( simgrid::smpi::Info::f2c(*info), *n, key); unsigned int i = 0; for (i=strlen(key); i(info), *root, Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_connect( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } @@ -1538,7 +1538,7 @@ void mpi_close_port_ ( char *port_name, int* ierr){ void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*newcomm, int* ierr){ MPI_Comm tmp; - *ierr = MPI_Comm_accept( port_name, *reinterpret_cast(info), *root, Comm::f2c(*comm), &tmp); + *ierr = MPI_Comm_accept( port_name, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp); if(*ierr == MPI_SUCCESS) { *newcomm = tmp->add_f(); } @@ -1547,7 +1547,7 @@ void mpi_comm_accept_ ( char *port_name, int* info, int* root, int* comm, int*ne void mpi_comm_spawn_ ( char *command, char *argv, int* maxprocs, int* info, int* root, int* comm, int* intercomm, int* array_of_errcodes, int* ierr){ MPI_Comm tmp; - *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast(info), *root, Comm::f2c(*comm), &tmp, + *ierr = MPI_Comm_spawn( command, nullptr, *maxprocs, *reinterpret_cast(info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes); if(*ierr == MPI_SUCCESS) { *intercomm = tmp->add_f(); @@ -1559,7 +1559,7 @@ void mpi_comm_spawn_multiple_ ( int* count, char *array_of_commands, char** arra int* comm, int* intercomm, int* array_of_errcodes, int* ierr){ MPI_Comm tmp; *ierr = MPI_Comm_spawn_multiple(* count, &array_of_commands, &array_of_argv, array_of_maxprocs, - reinterpret_cast(array_of_info), *root, Comm::f2c(*comm), &tmp, array_of_errcodes); + reinterpret_cast(array_of_info), *root, simgrid::smpi::Comm::f2c(*comm), &tmp, array_of_errcodes); if(*ierr == MPI_SUCCESS) { *intercomm = tmp->add_f(); } @@ -1578,23 +1578,23 @@ void mpi_file_close_ ( int* file, int* ierr){ } void mpi_file_delete_ ( char* filename, int* info, int* ierr){ - *ierr= MPI_File_delete(filename, Info::f2c(*info)); + *ierr= MPI_File_delete(filename, simgrid::smpi::Info::f2c(*info)); } void mpi_file_open_ ( int* comm, char* filename, int* amode, int* info, int* fh, int* ierr){ - *ierr= MPI_File_open(Comm::f2c(*comm), filename, *amode, Info::f2c(*info), reinterpret_cast(*fh)); + *ierr= MPI_File_open(simgrid::smpi::Comm::f2c(*comm), filename, *amode, simgrid::smpi::Info::f2c(*info), reinterpret_cast(*fh)); } void mpi_file_set_view_ ( int* fh, long long int* offset, int* etype, int* filetype, char* datarep, int* info, int* ierr){ - *ierr= MPI_File_set_view(reinterpret_cast(*fh) , reinterpret_cast(*offset), Datatype::f2c(*etype), Datatype::f2c(*filetype), datarep, Info::f2c(*info)); + *ierr= MPI_File_set_view(reinterpret_cast(*fh) , reinterpret_cast(*offset), simgrid::smpi::Datatype::f2c(*etype), simgrid::smpi::Datatype::f2c(*filetype), datarep, simgrid::smpi::Info::f2c(*info)); } void mpi_file_read_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ - *ierr= MPI_File_read(reinterpret_cast(*fh), buf, *count, Datatype::f2c(*datatype), status); + *ierr= MPI_File_read(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); } void mpi_file_write_ ( int* fh, void* buf, int* count, int* datatype, MPI_Status* status, int* ierr){ - *ierr= MPI_File_write(reinterpret_cast(*fh), buf, *count, Datatype::f2c(*datatype), status); + *ierr= MPI_File_write(reinterpret_cast(*fh), buf, *count, simgrid::smpi::Datatype::f2c(*datatype), status); } } // extern "C" diff --git a/src/smpi/smpi_global.cpp b/src/smpi/smpi_global.cpp index 6878d5d5b0..8459d4e9ee 100644 --- a/src/smpi/smpi_global.cpp +++ b/src/smpi/smpi_global.cpp @@ -42,7 +42,7 @@ struct papi_process_data { #endif std::unordered_map location2speedup; -Process **process_data = nullptr; +simgrid::smpi::Process **process_data = nullptr; int process_count = 0; int smpi_universe_size = 0; int* index_to_process_data = nullptr; @@ -66,13 +66,13 @@ int smpi_process_count() return process_count; } -Process* smpi_process() +simgrid::smpi::Process* smpi_process() { simgrid::MsgActorExt* msgExt = static_cast(SIMIX_process_self()->data); - return static_cast(msgExt->data); + return static_cast(msgExt->data); } -Process* smpi_process_remote(int index) +simgrid::smpi::Process* smpi_process_remote(int index) { return process_data[index_to_process_data[index]]; } @@ -82,7 +82,7 @@ MPI_Comm smpi_process_comm_self(){ } void smpi_process_init(int *argc, char ***argv){ - Process::init(argc, argv); + simgrid::smpi::Process::init(argc, argv); } int smpi_process_index(){ @@ -115,7 +115,7 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b XBT_DEBUG("Privatization : We are copying from a zone inside global memory... Saving data to temp buffer !"); smpi_switch_data_segment( - (static_cast((static_cast(comm->src_proc->data)->data))->index())); + (static_cast((static_cast(comm->src_proc->data)->data))->index())); tmpbuff = static_cast(xbt_malloc(buff_size)); memcpy(tmpbuff, buff, buff_size); } @@ -124,7 +124,7 @@ void smpi_comm_copy_buffer_callback(smx_activity_t synchro, void *buff, size_t b && ((char*)comm->dst_buff < smpi_start_data_exe + smpi_size_data_exe )){ XBT_DEBUG("Privatization : We are copying to a zone inside global memory - Switch data segment"); smpi_switch_data_segment( - (static_cast((static_cast(comm->dst_proc->data)->data))->index())); + (static_cast((static_cast(comm->dst_proc->data)->data))->index())); } memcpy(comm->dst_buff, tmpbuff, buff_size); @@ -272,15 +272,15 @@ void smpi_global_init() smpirun=1; } smpi_universe_size = process_count; - process_data = new Process*[process_count]; + process_data = new simgrid::smpi::Process*[process_count]; for (i = 0; i < process_count; i++) { - process_data[i] = new Process(i); + process_data[i] = new simgrid::smpi::Process(i); } //if the process was launched through smpirun script we generate a global mpi_comm_world //if not, we let MPI_COMM_NULL, and the comm world will be private to each mpi instance if(smpirun){ - group = new Group(process_count); - MPI_COMM_WORLD = new Comm(group, nullptr); + group = new simgrid::smpi::Group(process_count); + MPI_COMM_WORLD = new simgrid::smpi::Comm(group, nullptr); MPI_Attr_put(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, reinterpret_cast(process_count)); msg_bar_t bar = MSG_barrier_init(process_count); @@ -304,10 +304,10 @@ void smpi_global_destroy() } for (int i = 0; i < count; i++) { if(process_data[i]->comm_self()!=MPI_COMM_NULL){ - Comm::destroy(process_data[i]->comm_self()); + simgrid::smpi::Comm::destroy(process_data[i]->comm_self()); } if(process_data[i]->comm_intra()!=MPI_COMM_NULL){ - Comm::destroy(process_data[i]->comm_intra()); + simgrid::smpi::Comm::destroy(process_data[i]->comm_intra()); } xbt_os_timer_free(process_data[i]->timer()); xbt_mutex_destroy(process_data[i]->mailboxes_mutex()); @@ -318,9 +318,9 @@ void smpi_global_destroy() if (MPI_COMM_WORLD != MPI_COMM_UNINITIALIZED){ MPI_COMM_WORLD->cleanup_smp(); - MPI_COMM_WORLD->cleanup_attr(); - if(Colls::smpi_coll_cleanup_callback!=nullptr) - Colls::smpi_coll_cleanup_callback(); + MPI_COMM_WORLD->cleanup_attr(); + if(simgrid::smpi::Colls::smpi_coll_cleanup_callback!=nullptr) + simgrid::smpi::Colls::smpi_coll_cleanup_callback(); delete MPI_COMM_WORLD; } @@ -347,7 +347,7 @@ void __attribute__ ((weak)) user_main_() int __attribute__ ((weak)) smpi_simulated_main_(int argc, char **argv) { - Process::init(&argc, &argv); + simgrid::smpi::Process::init(&argc, &argv); user_main_(); return 0; } @@ -396,8 +396,8 @@ static void smpi_init_logs(){ static void smpi_init_options(){ - Colls::set_collectives(); - Colls::smpi_coll_cleanup_callback=nullptr; + simgrid::smpi::Colls::set_collectives(); + simgrid::smpi::Colls::smpi_coll_cleanup_callback=nullptr; smpi_cpu_threshold = xbt_cfg_get_double("smpi/cpu-threshold"); smpi_host_speed = xbt_cfg_get_double("smpi/host-speed"); smpi_privatize_global_variables = xbt_cfg_get_boolean("smpi/privatize-global-variables"); diff --git a/src/smpi/smpi_group.cpp b/src/smpi/smpi_group.cpp index adb199d040..48f1118097 100644 --- a/src/smpi/smpi_group.cpp +++ b/src/smpi/smpi_group.cpp @@ -7,7 +7,8 @@ #include "private.h" XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_group, smpi, "Logging specific to SMPI (group)"); - Group mpi_MPI_GROUP_EMPTY; + +simgrid::smpi::Group mpi_MPI_GROUP_EMPTY; MPI_Group MPI_GROUP_EMPTY=&mpi_MPI_GROUP_EMPTY; namespace simgrid{ diff --git a/src/smpi/smpi_keyvals.hpp b/src/smpi/smpi_keyvals.hpp index 7f461ba621..c302a55667 100644 --- a/src/smpi/smpi_keyvals.hpp +++ b/src/smpi/smpi_keyvals.hpp @@ -11,10 +11,6 @@ #include #include -namespace simgrid{ -namespace smpi{ - - typedef struct smpi_delete_fn{ MPI_Comm_delete_attr_function *comm_delete_fn; MPI_Type_delete_attr_function *type_delete_fn; @@ -32,8 +28,12 @@ typedef struct s_smpi_key_elem { smpi_delete_fn delete_fn; int refcount; } s_smpi_mpi_key_elem_t; + typedef struct s_smpi_key_elem *smpi_key_elem; +namespace simgrid{ +namespace smpi{ + class Keyval{ private: std::unordered_map attributes_; diff --git a/src/smpi/smpi_pmpi.cpp b/src/smpi/smpi_pmpi.cpp index a265c3f8b4..3db40738cd 100644 --- a/src/smpi/smpi_pmpi.cpp +++ b/src/smpi/smpi_pmpi.cpp @@ -28,7 +28,7 @@ int PMPI_Init(int *argc, char ***argv) int already_init; MPI_Initialized(&already_init); if(already_init == 0){ - Process::init(argc, argv); + simgrid::smpi::Process::init(argc, argv); smpi_process()->mark_as_initialized(); int rank = smpi_process()->index(); TRACE_smpi_init(rank); @@ -151,7 +151,7 @@ int PMPI_Type_free(MPI_Datatype * datatype) if (*datatype == MPI_DATATYPE_NULL) { return MPI_ERR_ARG; } else { - Datatype::unref(*datatype); + simgrid::smpi::Datatype::unref(*datatype); return MPI_SUCCESS; } } @@ -237,10 +237,10 @@ int PMPI_Type_dup(MPI_Datatype datatype, MPI_Datatype *newtype){ if (datatype == MPI_DATATYPE_NULL) { retval=MPI_ERR_TYPE; } else { - *newtype = new Datatype(datatype, &retval); + *newtype = new simgrid::smpi::Datatype(datatype, &retval); //error when duplicating, free the new datatype if(retval!=MPI_SUCCESS){ - Datatype::unref(*newtype); + simgrid::smpi::Datatype::unref(*newtype); *newtype = MPI_DATATYPE_NULL; } } @@ -252,7 +252,7 @@ int PMPI_Op_create(MPI_User_function * function, int commute, MPI_Op * op) if (function == nullptr || op == nullptr) { return MPI_ERR_ARG; } else { - *op = new Op(function, (commute!=0)); + *op = new simgrid::smpi::Op(function, (commute!=0)); return MPI_SUCCESS; } } @@ -276,7 +276,7 @@ int PMPI_Group_free(MPI_Group * group) return MPI_ERR_ARG; } else { if(*group != MPI_COMM_WORLD->group() && *group != MPI_GROUP_EMPTY) - Group::unref(*group); + simgrid::smpi::Group::unref(*group); *group = MPI_GROUP_NULL; return MPI_SUCCESS; } @@ -531,7 +531,7 @@ int PMPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm * newcomm) return MPI_SUCCESS; }else{ group->ref(); - *newcomm = new Comm(group, nullptr); + *newcomm = new simgrid::smpi::Comm(group, nullptr); return MPI_SUCCESS; } } @@ -543,7 +543,7 @@ int PMPI_Comm_free(MPI_Comm * comm) } else if (*comm == MPI_COMM_NULL) { return MPI_ERR_COMM; } else { - Comm::destroy(*comm); + simgrid::smpi::Comm::destroy(*comm); *comm = MPI_COMM_NULL; return MPI_SUCCESS; } @@ -557,7 +557,7 @@ int PMPI_Comm_disconnect(MPI_Comm * comm) } else if (*comm == MPI_COMM_NULL) { return MPI_ERR_COMM; } else { - Comm::destroy(*comm); + simgrid::smpi::Comm::destroy(*comm); *comm = MPI_COMM_NULL; return MPI_SUCCESS; } @@ -612,7 +612,7 @@ int PMPI_Send_init(void *buf, int count, MPI_Datatype datatype, int dst, int tag } else if (dst == MPI_PROC_NULL) { retval = MPI_SUCCESS; } else { - *request = Request::send_init(buf, count, datatype, dst, tag, comm); + *request = simgrid::smpi::Request::send_init(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -635,7 +635,7 @@ int PMPI_Recv_init(void *buf, int count, MPI_Datatype datatype, int src, int tag } else if (src == MPI_PROC_NULL) { retval = MPI_SUCCESS; } else { - *request = Request::recv_init(buf, count, datatype, src, tag, comm); + *request = simgrid::smpi::Request::recv_init(buf, count, datatype, src, tag, comm); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -658,7 +658,7 @@ int PMPI_Ssend_init(void* buf, int count, MPI_Datatype datatype, int dst, int ta } else if (dst == MPI_PROC_NULL) { retval = MPI_SUCCESS; } else { - *request = Request::ssend_init(buf, count, datatype, dst, tag, comm); + *request = simgrid::smpi::Request::ssend_init(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -696,7 +696,7 @@ int PMPI_Startall(int count, MPI_Request * requests) } } if(retval != MPI_ERR_REQUEST) { - Request::startall(count, requests); + simgrid::smpi::Request::startall(count, requests); } } smpi_bench_begin(); @@ -711,7 +711,7 @@ int PMPI_Request_free(MPI_Request * request) if (*request == MPI_REQUEST_NULL) { retval = MPI_ERR_ARG; } else { - Request::unref(request); + simgrid::smpi::Request::unref(request); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -756,7 +756,7 @@ int PMPI_Irecv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MP extra->send_size = count*dt_size_send; TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); - *request = Request::irecv(buf, count, datatype, src, tag, comm); + *request = simgrid::smpi::Request::irecv(buf, count, datatype, src, tag, comm); retval = MPI_SUCCESS; TRACE_smpi_ptp_out(rank, src_traced, rank, __FUNCTION__); @@ -805,7 +805,7 @@ int PMPI_Isend(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MP TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); - *request = Request::isend(buf, count, datatype, dst, tag, comm); + *request = simgrid::smpi::Request::isend(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); @@ -853,7 +853,7 @@ int PMPI_Issend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, M TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); TRACE_smpi_send(rank, rank, dst_traced, tag, count*datatype->size()); - *request = Request::issend(buf, count, datatype, dst, tag, comm); + *request = simgrid::smpi::Request::issend(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); @@ -873,7 +873,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; } else if (src == MPI_PROC_NULL) { - Status::empty(status); + simgrid::smpi::Status::empty(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; } else if (src!=MPI_ANY_SOURCE && (src >= comm->group()->size() || src <0)){ @@ -899,7 +899,7 @@ int PMPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI extra->send_size = count * dt_size_send; TRACE_smpi_ptp_in(rank, src_traced, rank, __FUNCTION__, extra); - Request::recv(buf, count, datatype, src, tag, comm, status); + simgrid::smpi::Request::recv(buf, count, datatype, src, tag, comm, status); retval = MPI_SUCCESS; // the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) @@ -953,7 +953,7 @@ int PMPI_Send(void *buf, int count, MPI_Datatype datatype, int dst, int tag, MPI TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); } - Request::send(buf, count, datatype, dst, tag, comm); + simgrid::smpi::Request::send(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); @@ -997,7 +997,7 @@ int PMPI_Ssend(void* buf, int count, MPI_Datatype datatype, int dst, int tag, MP TRACE_smpi_ptp_in(rank, rank, dst_traced, __FUNCTION__, extra); TRACE_smpi_send(rank, rank, dst_traced, tag,count*datatype->size()); - Request::ssend(buf, count, datatype, dst, tag, comm); + simgrid::smpi::Request::ssend(buf, count, datatype, dst, tag, comm); retval = MPI_SUCCESS; TRACE_smpi_ptp_out(rank, rank, dst_traced, __FUNCTION__); @@ -1019,7 +1019,7 @@ int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, } else if (!sendtype->is_valid() || !recvtype->is_valid()) { retval = MPI_ERR_TYPE; } else if (src == MPI_PROC_NULL || dst == MPI_PROC_NULL) { - Status::empty(status); + simgrid::smpi::Status::empty(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; }else if (dst >= comm->group()->size() || dst <0 || @@ -1054,7 +1054,7 @@ int PMPI_Sendrecv(void *sendbuf, int sendcount, MPI_Datatype sendtype, int dst, TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); TRACE_smpi_send(rank, rank, dst_traced, sendtag,sendcount*sendtype->size()); - Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm, + simgrid::smpi::Request::sendrecv(sendbuf, sendcount, sendtype, dst, sendtag, recvbuf, recvcount, recvtype, src, recvtag, comm, status); retval = MPI_SUCCESS; @@ -1079,7 +1079,7 @@ int PMPI_Sendrecv_replace(void* buf, int count, MPI_Datatype datatype, int dst, void* recvbuf = xbt_new0(char, size); retval = MPI_Sendrecv(buf, count, datatype, dst, sendtag, recvbuf, count, datatype, src, recvtag, comm, status); if(retval==MPI_SUCCESS){ - Datatype::copy(recvbuf, count, datatype, buf, count, datatype); + simgrid::smpi::Datatype::copy(recvbuf, count, datatype, buf, count, datatype); } xbt_free(recvbuf); @@ -1095,7 +1095,7 @@ int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status) retval = MPI_ERR_ARG; } else if (*request == MPI_REQUEST_NULL) { *flag= true; - Status::empty(status); + simgrid::smpi::Status::empty(status); retval = MPI_SUCCESS; } else { int rank = ((*request)->comm() != MPI_COMM_NULL) ? smpi_process()->index() : -1; @@ -1104,7 +1104,7 @@ int PMPI_Test(MPI_Request * request, int *flag, MPI_Status * status) extra->type = TRACING_TEST; TRACE_smpi_testing_in(rank, extra); - *flag = Request::test(request,status); + *flag = simgrid::smpi::Request::test(request,status); TRACE_smpi_testing_out(rank); retval = MPI_SUCCESS; @@ -1121,7 +1121,7 @@ int PMPI_Testany(int count, MPI_Request requests[], int *index, int *flag, MPI_S if (index == nullptr || flag == nullptr) { retval = MPI_ERR_ARG; } else { - *flag = Request::testany(count, requests, index, status); + *flag = simgrid::smpi::Request::testany(count, requests, index, status); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1136,7 +1136,7 @@ int PMPI_Testall(int count, MPI_Request* requests, int* flag, MPI_Status* status if (flag == nullptr) { retval = MPI_ERR_ARG; } else { - *flag = Request::testall(count, requests, statuses); + *flag = simgrid::smpi::Request::testall(count, requests, statuses); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1152,11 +1152,11 @@ int PMPI_Probe(int source, int tag, MPI_Comm comm, MPI_Status* status) { } else if (comm == MPI_COMM_NULL) { retval = MPI_ERR_COMM; } else if (source == MPI_PROC_NULL) { - Status::empty(status); + simgrid::smpi::Status::empty(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; } else { - Request::probe(source, tag, comm, status); + simgrid::smpi::Request::probe(source, tag, comm, status); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1173,11 +1173,11 @@ int PMPI_Iprobe(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* statu retval = MPI_ERR_COMM; } else if (source == MPI_PROC_NULL) { *flag=true; - Status::empty(status); + simgrid::smpi::Status::empty(status); status->MPI_SOURCE = MPI_PROC_NULL; retval = MPI_SUCCESS; } else { - Request::iprobe(source, tag, comm, flag, status); + simgrid::smpi::Request::iprobe(source, tag, comm, flag, status); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1190,7 +1190,7 @@ int PMPI_Wait(MPI_Request * request, MPI_Status * status) smpi_bench_end(); - Status::empty(status); + simgrid::smpi::Status::empty(status); if (request == nullptr) { retval = MPI_ERR_ARG; @@ -1209,7 +1209,7 @@ int PMPI_Wait(MPI_Request * request, MPI_Status * status) extra->type = TRACING_WAIT; TRACE_smpi_ptp_in(rank, src_traced, dst_traced, __FUNCTION__, extra); - Request::wait(request, status); + simgrid::smpi::Request::wait(request, status); retval = MPI_SUCCESS; //the src may not have been known at the beginning of the recv (MPI_ANY_SOURCE) @@ -1257,7 +1257,7 @@ int PMPI_Waitany(int count, MPI_Request requests[], int *index, MPI_Status * sta extra->send_size=count; TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); - *index = Request::waitany(count, requests, status); + *index = simgrid::smpi::Request::waitany(count, requests, status); if(*index!=MPI_UNDEFINED){ int src_traced = savedvals[*index].src; @@ -1307,7 +1307,7 @@ int PMPI_Waitall(int count, MPI_Request requests[], MPI_Status status[]) extra->send_size=count; TRACE_smpi_ptp_in(rank_traced, -1, -1, __FUNCTION__,extra); - int retval =Request::waitall(count, requests, status); + int retval = simgrid::smpi::Request::waitall(count, requests, status); for (int i = 0; i < count; i++) { if(savedvals[i].valid){ @@ -1338,7 +1338,7 @@ int PMPI_Waitsome(int incount, MPI_Request requests[], int *outcount, int *indic if (outcount == nullptr) { retval = MPI_ERR_ARG; } else { - *outcount = Request::waitsome(incount, requests, indices, status); + *outcount = simgrid::smpi::Request::waitsome(incount, requests, indices, status); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1353,7 +1353,7 @@ int PMPI_Testsome(int incount, MPI_Request requests[], int* outcount, int* indic if (outcount == nullptr) { retval = MPI_ERR_ARG; } else { - *outcount = Request::testsome(incount, requests, indices, status); + *outcount = simgrid::smpi::Request::testsome(incount, requests, indices, status); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -1386,7 +1386,7 @@ int PMPI_Bcast(void *buf, int count, MPI_Datatype datatype, int root, MPI_Comm c extra->send_size = count * dt_size_send; TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); if (comm->size() > 1) - Colls::bcast(buf, count, datatype, root, comm); + simgrid::smpi::Colls::bcast(buf, count, datatype, root, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); @@ -1409,7 +1409,7 @@ int PMPI_Barrier(MPI_Comm comm) extra->type = TRACING_BARRIER; TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - Colls::barrier(comm); + simgrid::smpi::Colls::barrier(comm); //Barrier can be used to synchronize RMA calls. Finish all requests from comm before. comm->finish_rma_calls(); @@ -1465,7 +1465,7 @@ int PMPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype,void *recvbu TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm); + simgrid::smpi::Colls::gather(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, root, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); @@ -1525,7 +1525,7 @@ int PMPI_Gatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recv } TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - retval = Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); + retval = simgrid::smpi::Colls::gatherv(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcounts, displs, recvtype, root, comm); TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); } @@ -1571,7 +1571,7 @@ int PMPI_Allgather(void *sendbuf, int sendcount, MPI_Datatype sendtype, TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); + simgrid::smpi::Colls::allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, -1, __FUNCTION__); } @@ -1623,7 +1623,7 @@ int PMPI_Allgatherv(void *sendbuf, int sendcount, MPI_Datatype sendtype, TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); + simgrid::smpi::Colls::allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, -1, __FUNCTION__); } @@ -1671,7 +1671,7 @@ int PMPI_Scatter(void *sendbuf, int sendcount, MPI_Datatype sendtype, extra->recv_size = recvcount * dt_size_recv; TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); + simgrid::smpi::Colls::scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); } @@ -1724,7 +1724,7 @@ int PMPI_Scatterv(void *sendbuf, int *sendcounts, int *displs, extra->recv_size = recvcount * dt_size_recv; TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - retval = Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); + retval = simgrid::smpi::Colls::scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); } @@ -1758,7 +1758,7 @@ int PMPI_Reduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, TRACE_smpi_collective_in(rank, root_traced, __FUNCTION__, extra); - Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm); + simgrid::smpi::Colls::reduce(sendbuf, recvbuf, count, datatype, op, root, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, root_traced, __FUNCTION__); @@ -1799,7 +1799,7 @@ int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp char* sendtmpbuf = static_cast(sendbuf); if( sendbuf == MPI_IN_PLACE ) { sendtmpbuf = static_cast(xbt_malloc(count*datatype->get_extent())); - Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype); + simgrid::smpi::Datatype::copy(recvbuf, count, datatype,sendtmpbuf, count, datatype); } int rank = comm != MPI_COMM_NULL ? smpi_process()->index() : -1; instr_extra_data extra = xbt_new0(s_instr_extra_data_t, 1); @@ -1813,7 +1813,7 @@ int PMPI_Allreduce(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatyp TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm); + simgrid::smpi::Colls::allreduce(sendtmpbuf, recvbuf, count, datatype, op, comm); if( sendbuf == MPI_IN_PLACE ) xbt_free(sendtmpbuf); @@ -1851,7 +1851,7 @@ int PMPI_Scan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MP TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - retval = Colls::scan(sendbuf, recvbuf, count, datatype, op, comm); + retval = simgrid::smpi::Colls::scan(sendbuf, recvbuf, count, datatype, op, comm); TRACE_smpi_collective_out(rank, -1, __FUNCTION__); } @@ -1888,7 +1888,7 @@ int PMPI_Exscan(void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, } TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - retval = Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm); + retval = simgrid::smpi::Colls::exscan(sendtmpbuf, recvbuf, count, datatype, op, comm); TRACE_smpi_collective_out(rank, -1, __FUNCTION__); if (sendbuf == MPI_IN_PLACE) @@ -1939,7 +1939,7 @@ int PMPI_Reduce_scatter(void *sendbuf, void *recvbuf, int *recvcounts, MPI_Datat TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); + simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); retval = MPI_SUCCESS; TRACE_smpi_collective_out(rank, -1, __FUNCTION__); @@ -1992,7 +1992,7 @@ int PMPI_Reduce_scatter_block(void *sendbuf, void *recvbuf, int recvcount, int* recvcounts = static_cast(xbt_malloc(count * sizeof(int))); for (int i = 0; i < count; i++) recvcounts[i] = recvcount; - Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); + simgrid::smpi::Colls::reduce_scatter(sendtmpbuf, recvbuf, recvcounts, datatype, op, comm); xbt_free(recvcounts); retval = MPI_SUCCESS; @@ -2045,7 +2045,7 @@ int PMPI_Alltoall(void* sendbuf, int sendcount, MPI_Datatype sendtype, void* rec TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - retval = Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm); + retval = simgrid::smpi::Colls::alltoall(sendtmpbuf, sendtmpcount, sendtmptype, recvbuf, recvcount, recvtype, comm); TRACE_smpi_collective_out(rank, -1, __FUNCTION__); @@ -2118,7 +2118,7 @@ int PMPI_Alltoallv(void* sendbuf, int* sendcounts, int* senddisps, MPI_Datatype } extra->num_processes = size; TRACE_smpi_collective_in(rank, -1, __FUNCTION__, extra); - retval = Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts, + retval = simgrid::smpi::Colls::alltoallv(sendtmpbuf, sendtmpcounts, sendtmpdisps, sendtmptype, recvbuf, recvcounts, recvdisps, recvtype, comm); TRACE_smpi_collective_out(rank, -1, __FUNCTION__); @@ -2158,7 +2158,7 @@ int PMPI_Get_count(MPI_Status * status, MPI_Datatype datatype, int *count) } else if (status->count % size != 0) { return MPI_UNDEFINED; } else { - *count = Status::get_count(status, datatype); + *count = simgrid::smpi::Status::get_count(status, datatype); return MPI_SUCCESS; } } @@ -2170,7 +2170,7 @@ int PMPI_Type_contiguous(int count, MPI_Datatype old_type, MPI_Datatype* new_typ } else if (count<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_contiguous(count, old_type, 0, new_type); + return simgrid::smpi::Datatype::create_contiguous(count, old_type, 0, new_type); } } @@ -2189,7 +2189,7 @@ int PMPI_Type_vector(int count, int blocklen, int stride, MPI_Datatype old_type, } else if (count<0 || blocklen<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_vector(count, blocklen, stride, old_type, new_type); + return simgrid::smpi::Datatype::create_vector(count, blocklen, stride, old_type, new_type); } } @@ -2199,7 +2199,7 @@ int PMPI_Type_hvector(int count, int blocklen, MPI_Aint stride, MPI_Datatype old } else if (count<0 || blocklen<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_hvector(count, blocklen, stride, old_type, new_type); + return simgrid::smpi::Datatype::create_hvector(count, blocklen, stride, old_type, new_type); } } @@ -2213,7 +2213,7 @@ int PMPI_Type_indexed(int count, int* blocklens, int* indices, MPI_Datatype old_ } else if (count<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); } } @@ -2223,7 +2223,7 @@ int PMPI_Type_create_indexed(int count, int* blocklens, int* indices, MPI_Dataty } else if (count<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + return simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); } } @@ -2238,7 +2238,7 @@ int PMPI_Type_create_indexed_block(int count, int blocklength, int* indices, MPI int* blocklens=static_cast(xbt_malloc(blocklength*count*sizeof(int))); for (int i = 0; i < count; i++) blocklens[i]=blocklength; - int retval = Datatype::create_indexed(count, blocklens, indices, old_type, new_type); + int retval = simgrid::smpi::Datatype::create_indexed(count, blocklens, indices, old_type, new_type); xbt_free(blocklens); return retval; } @@ -2251,7 +2251,7 @@ int PMPI_Type_hindexed(int count, int* blocklens, MPI_Aint* indices, MPI_Datatyp } else if (count<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); + return simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); } } @@ -2270,7 +2270,7 @@ int PMPI_Type_create_hindexed_block(int count, int blocklength, MPI_Aint* indice int* blocklens=(int*)xbt_malloc(blocklength*count*sizeof(int)); for (int i = 0; i < count; i++) blocklens[i] = blocklength; - int retval = Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); + int retval = simgrid::smpi::Datatype::create_hindexed(count, blocklens, indices, old_type, new_type); xbt_free(blocklens); return retval; } @@ -2280,7 +2280,7 @@ int PMPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* if (count<0){ return MPI_ERR_COUNT; } else { - return Datatype::create_struct(count, blocklens, indices, old_types, new_type); + return simgrid::smpi::Datatype::create_struct(count, blocklens, indices, old_types, new_type); } } @@ -2310,7 +2310,7 @@ int PMPI_Cart_create(MPI_Comm comm_old, int ndims, int* dims, int* periodic, int } else if (ndims < 0 || (ndims > 0 && (dims == nullptr || periodic == nullptr)) || comm_cart == nullptr) { return MPI_ERR_ARG; } else{ - Topo_Cart* topo = new Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart); + simgrid::smpi::Topo_Cart* topo = new simgrid::smpi::Topo_Cart(comm_old, ndims, dims, periodic, reorder, comm_cart); if(*comm_cart==MPI_COMM_NULL) delete topo; return MPI_SUCCESS; @@ -2400,7 +2400,7 @@ int PMPI_Dims_create(int nnodes, int ndims, int* dims) { if (ndims < 1 || nnodes < 1) { return MPI_ERR_DIMS; } - return Dims_create(nnodes, ndims, dims); + return simgrid::smpi::Topo_Cart::Dims_create(nnodes, ndims, dims); } int PMPI_Cart_sub(MPI_Comm comm, int* remain_dims, MPI_Comm* comm_new) { @@ -2430,7 +2430,7 @@ int PMPI_Type_create_resized(MPI_Datatype oldtype,MPI_Aint lb, MPI_Aint extent, MPI_Aint disps[3] = {lb, 0, lb + extent}; MPI_Datatype types[3] = {MPI_LB, oldtype, MPI_UB}; - *newtype = new Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types); + *newtype = new simgrid::smpi::Type_Struct(oldtype->size(), lb, lb + extent, DT_FLAG_DERIVED, 3, blocks, disps, types); (*newtype)->addflag(~DT_FLAG_COMMITED); return MPI_SUCCESS; @@ -2444,7 +2444,7 @@ int PMPI_Win_create( void *base, MPI_Aint size, int disp_unit, MPI_Info info, MP }else if ((base == nullptr && size != 0) || disp_unit <= 0 || size < 0 ){ retval= MPI_ERR_OTHER; }else{ - *win = new Win( base, size, disp_unit, info, comm); + *win = new simgrid::smpi::Win( base, size, disp_unit, info, comm); retval = MPI_SUCCESS; } smpi_bench_begin(); @@ -2761,7 +2761,7 @@ int PMPI_Type_get_name(MPI_Datatype datatype, char * name, int* len) } MPI_Datatype PMPI_Type_f2c(MPI_Fint datatype){ - return static_cast(F2C::f2c(datatype)); + return static_cast(simgrid::smpi::F2C::f2c(datatype)); } MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){ @@ -2769,7 +2769,7 @@ MPI_Fint PMPI_Type_c2f(MPI_Datatype datatype){ } MPI_Group PMPI_Group_f2c(MPI_Fint group){ - return Group::f2c(group); + return simgrid::smpi::Group::f2c(group); } MPI_Fint PMPI_Group_c2f(MPI_Group group){ @@ -2777,7 +2777,7 @@ MPI_Fint PMPI_Group_c2f(MPI_Group group){ } MPI_Request PMPI_Request_f2c(MPI_Fint request){ - return static_cast(Request::f2c(request)); + return static_cast(simgrid::smpi::Request::f2c(request)); } MPI_Fint PMPI_Request_c2f(MPI_Request request) { @@ -2785,7 +2785,7 @@ MPI_Fint PMPI_Request_c2f(MPI_Request request) { } MPI_Win PMPI_Win_f2c(MPI_Fint win){ - return static_cast(Win::f2c(win)); + return static_cast(simgrid::smpi::Win::f2c(win)); } MPI_Fint PMPI_Win_c2f(MPI_Win win){ @@ -2793,7 +2793,7 @@ MPI_Fint PMPI_Win_c2f(MPI_Win win){ } MPI_Op PMPI_Op_f2c(MPI_Fint op){ - return static_cast(Op::f2c(op)); + return static_cast(simgrid::smpi::Op::f2c(op)); } MPI_Fint PMPI_Op_c2f(MPI_Op op){ @@ -2801,7 +2801,7 @@ MPI_Fint PMPI_Op_c2f(MPI_Op op){ } MPI_Comm PMPI_Comm_f2c(MPI_Fint comm){ - return static_cast(Comm::f2c(comm)); + return static_cast(simgrid::smpi::Comm::f2c(comm)); } MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){ @@ -2809,7 +2809,7 @@ MPI_Fint PMPI_Comm_c2f(MPI_Comm comm){ } MPI_Info PMPI_Info_f2c(MPI_Fint info){ - return static_cast(Info::f2c(info)); + return static_cast(simgrid::smpi::Info::f2c(info)); } MPI_Fint PMPI_Info_c2f(MPI_Info info){ @@ -2819,11 +2819,11 @@ MPI_Fint PMPI_Info_c2f(MPI_Info info){ int PMPI_Keyval_create(MPI_Copy_function* copy_fn, MPI_Delete_function* delete_fn, int* keyval, void* extra_state) { smpi_copy_fn _copy_fn={copy_fn,nullptr,nullptr}; smpi_delete_fn _delete_fn={delete_fn,nullptr,nullptr}; - return Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); } int PMPI_Keyval_free(int* keyval) { - return Keyval::keyval_free(keyval); + return simgrid::smpi::Keyval::keyval_free(keyval); } int PMPI_Attr_delete(MPI_Comm comm, int keyval) { @@ -2833,7 +2833,7 @@ int PMPI_Attr_delete(MPI_Comm comm, int keyval) { else if (comm==MPI_COMM_NULL) return MPI_ERR_COMM; else - return comm->attr_delete(keyval); + return comm->attr_delete(keyval); } int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { @@ -2871,7 +2871,7 @@ int PMPI_Attr_get(MPI_Comm comm, int keyval, void* attr_value, int* flag) { *static_cast(attr_value) = &one; return MPI_SUCCESS; default: - return comm->attr_get(keyval, attr_value, flag); + return comm->attr_get(keyval, attr_value, flag); } } @@ -2882,7 +2882,7 @@ int PMPI_Attr_put(MPI_Comm comm, int keyval, void* attr_value) { else if (comm==MPI_COMM_NULL) return MPI_ERR_COMM; else - return comm->attr_put(keyval, attr_value); + return comm->attr_put(keyval, attr_value); } int PMPI_Comm_get_attr (MPI_Comm comm, int comm_keyval, void *attribute_val, int *flag) @@ -2915,7 +2915,7 @@ int PMPI_Type_get_attr (MPI_Datatype type, int type_keyval, void *attribute_val, if (type==MPI_DATATYPE_NULL) return MPI_ERR_TYPE; else - return type->attr_get(type_keyval, attribute_val, flag); + return type->attr_get(type_keyval, attribute_val, flag); } int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val) @@ -2923,7 +2923,7 @@ int PMPI_Type_set_attr (MPI_Datatype type, int type_keyval, void *attribute_val) if (type==MPI_DATATYPE_NULL) return MPI_ERR_TYPE; else - return type->attr_put(type_keyval, attribute_val); + return type->attr_put(type_keyval, attribute_val); } int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval) @@ -2931,7 +2931,7 @@ int PMPI_Type_delete_attr (MPI_Datatype type, int type_keyval) if (type==MPI_DATATYPE_NULL) return MPI_ERR_TYPE; else - return type->attr_delete(type_keyval); + return type->attr_delete(type_keyval); } int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delete_attr_function* delete_fn, int* keyval, @@ -2939,11 +2939,11 @@ int PMPI_Type_create_keyval(MPI_Type_copy_attr_function* copy_fn, MPI_Type_delet { smpi_copy_fn _copy_fn={nullptr,copy_fn,nullptr}; smpi_delete_fn _delete_fn={nullptr,delete_fn,nullptr}; - return Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); } int PMPI_Type_free_keyval(int* keyval) { - return Keyval::keyval_free(keyval); + return simgrid::smpi::Keyval::keyval_free(keyval); } int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag) @@ -2969,7 +2969,7 @@ int PMPI_Win_get_attr (MPI_Win win, int keyval, void *attribute_val, int* flag) *flag = 1; return MPI_SUCCESS; default: - return win->attr_get(keyval, attribute_val, flag); + return win->attr_get(keyval, attribute_val, flag); } } @@ -2980,7 +2980,7 @@ int PMPI_Win_set_attr (MPI_Win win, int type_keyval, void *attribute_val) if (win==MPI_WIN_NULL) return MPI_ERR_TYPE; else - return win->attr_put(type_keyval, attribute_val); + return win->attr_put(type_keyval, attribute_val); } int PMPI_Win_delete_attr (MPI_Win win, int type_keyval) @@ -2988,7 +2988,7 @@ int PMPI_Win_delete_attr (MPI_Win win, int type_keyval) if (win==MPI_WIN_NULL) return MPI_ERR_TYPE; else - return win->attr_delete(type_keyval); + return win->attr_delete(type_keyval); } int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_attr_function* delete_fn, int* keyval, @@ -2996,17 +2996,17 @@ int PMPI_Win_create_keyval(MPI_Win_copy_attr_function* copy_fn, MPI_Win_delete_a { smpi_copy_fn _copy_fn={nullptr, nullptr, copy_fn}; smpi_delete_fn _delete_fn={nullptr, nullptr, delete_fn}; - return Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); + return simgrid::smpi::Keyval::keyval_create(_copy_fn, _delete_fn, keyval, extra_state); } int PMPI_Win_free_keyval(int* keyval) { - return Keyval::keyval_free(keyval); + return simgrid::smpi::Keyval::keyval_free(keyval); } int PMPI_Info_create( MPI_Info *info){ if (info == nullptr) return MPI_ERR_ARG; - *info = new Info(); + *info = new simgrid::smpi::Info(); return MPI_SUCCESS; } @@ -3020,7 +3020,7 @@ int PMPI_Info_set( MPI_Info info, char *key, char *value){ int PMPI_Info_free( MPI_Info *info){ if (info == nullptr || *info==nullptr) return MPI_ERR_ARG; - Info::unref(*info); + simgrid::smpi::Info::unref(*info); *info=MPI_INFO_NULL; return MPI_SUCCESS; } @@ -3037,7 +3037,7 @@ int PMPI_Info_get(MPI_Info info,char *key,int valuelen, char *value, int *flag){ int PMPI_Info_dup(MPI_Info info, MPI_Info *newinfo){ if (info == nullptr || newinfo==nullptr) return MPI_ERR_ARG; - *newinfo = new Info(info); + *newinfo = new simgrid::smpi::Info(info); return MPI_SUCCESS; } diff --git a/src/smpi/smpi_process.cpp b/src/smpi/smpi_process.cpp index 5fc634ea97..83c2219162 100644 --- a/src/smpi/smpi_process.cpp +++ b/src/smpi/smpi_process.cpp @@ -12,7 +12,7 @@ XBT_LOG_NEW_DEFAULT_SUBCATEGORY(smpi_process, smpi, "Logging specific to SMPI (kernel)"); //TODO : replace -extern Process **process_data; +extern simgrid::smpi::Process **process_data; extern int* index_to_process_data; #define MAILBOX_NAME_MAXLEN (5 + sizeof(int) * 2 + 1) diff --git a/src/smpi/smpi_status.cpp b/src/smpi/smpi_status.cpp index 2d6caf9144..7d42098522 100644 --- a/src/smpi/smpi_status.cpp +++ b/src/smpi/smpi_status.cpp @@ -7,6 +7,9 @@ #include "private.h" #include "src/simix/smx_private.h" +namespace simgrid{ +namespace smpi{ + void Status::empty(MPI_Status * status) { if(status != MPI_STATUS_IGNORE) { @@ -21,3 +24,6 @@ int Status::get_count(MPI_Status * status, MPI_Datatype datatype) { return status->count / datatype->size(); } + +} +} diff --git a/src/smpi/smpi_topo.cpp b/src/smpi/smpi_topo.cpp index 143efe0996..44a111bed2 100644 --- a/src/smpi/smpi_topo.cpp +++ b/src/smpi/smpi_topo.cpp @@ -258,7 +258,7 @@ int Topo_Cart::dim_get(int *ndims) { /* * This is a utility function, no need to have anything in the lower layer for this at all */ -int Dims_create(int nnodes, int ndims, int dims[]) +int Topo_Cart::Dims_create(int nnodes, int ndims, int dims[]) { /* Get # of free-to-be-assigned processes and # of free dimensions */ int freeprocs = nnodes; diff --git a/src/smpi/smpi_topo.hpp b/src/smpi/smpi_topo.hpp index 96f76098d8..7dd16ce74a 100644 --- a/src/smpi/smpi_topo.hpp +++ b/src/smpi/smpi_topo.hpp @@ -46,6 +46,7 @@ class Topo_Cart: public Topo { int rank(int* coords, int* rank); int shift(int direction, int disp, int *rank_source, int *rank_dest) ; int dim_get(int *ndims); + static int Dims_create(int nnodes, int ndims, int dims[]); }; @@ -74,11 +75,6 @@ class Topo_Dist_Graph: public Topo { ~Topo_Dist_Graph(); }; -/* - * This is a utility function, no need to have anything in the lower layer for this at all - */ -extern int Dims_create(int nnodes, int ndims, int dims[]); - } } -- 2.20.1